hexsha
stringlengths 40
40
| size
int64 2
1.02M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
245
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
245
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 2
1.02M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
987k
| alphanum_fraction
float64 0
1
| content_no_comment
stringlengths 0
1.01M
| is_comment_constant_removed
bool 2
classes | is_sharp_comment_removed
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1c4a0fbb4cfee7ec85a999391bf78a422dea764f | 1,513 | py | Python | get_image_from_video.py | matiji66/face-card-machine | bb466a4e06815869ff801ae50f044bf29e53f20d | [
"Apache-2.0"
] | 39 | 2018-09-29T02:57:09.000Z | 2021-04-12T13:45:21.000Z | get_image_from_video.py | a543713743/face-attendance-machine | bb466a4e06815869ff801ae50f044bf29e53f20d | [
"Apache-2.0"
] | 2 | 2019-03-26T13:50:29.000Z | 2021-03-23T09:49:43.000Z | get_image_from_video.py | a543713743/face-attendance-machine | bb466a4e06815869ff801ae50f044bf29e53f20d | [
"Apache-2.0"
] | 20 | 2018-06-27T13:55:12.000Z | 2022-03-26T15:08:01.000Z | import cv2
# This is a demo of running face recognition on a video file and saving the results to a new video file.
#
# PLEASE NOTE: This example requires OpenCV (the `cv2` library) to be installed only to read from your webcam.
# OpenCV is *not* required to use the face_recognition library. It's only required if you want to run this
# specific demo. If you have trouble installing it, try any of the other demos that don't require it instead.
# Open the input movie file
# input_movie = cv2.VideoCapture("outpy1525941951.7225914.avi")
# length = int(input_movie.get(cv2.CAP_PROP_FRAME_COUNT))
# Create an output movie file (make sure resolution/frame rate matches input video!)
# fourcc = cv2.VideoWriter_fourcc(*'XVID')
# output_movie = cv2.VideoWriter('output.avi', fourcc, 29.97, (640, 360))
import os
files = [path for path in os.listdir("./videos") if os.path.isfile(path) and path.endswith(".avi")]
frame_number = 0
for avi in files:
input_movie = cv2.VideoCapture(avi)
length = int(input_movie.get(cv2.CAP_PROP_FRAME_COUNT))
while True:
# Grab a single frame of video
ret, frame = input_movie.read()
frame_number += 1
# Quit when the input video file ends
if not ret:
break
# Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)
rgb_frame = frame[:, :, ::-1]
cv2.imwrite("images/image_{}.jpg".format(frame_number), frame)
# All done!
input_movie.release()
| 38.794872 | 110 | 0.702578 | import cv2
# specific demo. If you have trouble installing it, try any of the other demos that don't require it instead.
import os
files = [path for path in os.listdir("./videos") if os.path.isfile(path) and path.endswith(".avi")]
frame_number = 0
for avi in files:
input_movie = cv2.VideoCapture(avi)
length = int(input_movie.get(cv2.CAP_PROP_FRAME_COUNT))
while True:
ret, frame = input_movie.read()
frame_number += 1
if not ret:
break
rgb_frame = frame[:, :, ::-1]
cv2.imwrite("images/image_{}.jpg".format(frame_number), frame)
input_movie.release()
| true | true |
1c4a0fd0c26a592c12df47d7e444bcfc3897cded | 37,709 | py | Python | sympy/functions/combinatorial/factorials.py | jainachal03/sympy | 7dbc2f49370b31ac6960524ea7e5444e2e5a50d5 | [
"BSD-3-Clause"
] | 1 | 2018-11-20T11:40:30.000Z | 2018-11-20T11:40:30.000Z | sympy/functions/combinatorial/factorials.py | jainachal03/sympy | 7dbc2f49370b31ac6960524ea7e5444e2e5a50d5 | [
"BSD-3-Clause"
] | 14 | 2018-02-08T10:11:03.000Z | 2019-04-16T10:32:46.000Z | sympy/functions/combinatorial/factorials.py | jainachal03/sympy | 7dbc2f49370b31ac6960524ea7e5444e2e5a50d5 | [
"BSD-3-Clause"
] | 1 | 2020-09-09T20:41:34.000Z | 2020-09-09T20:41:34.000Z | from typing import List
from functools import reduce
from sympy.core import S, sympify, Dummy, Mod
from sympy.core.cache import cacheit
from sympy.core.function import Function, ArgumentIndexError, PoleError
from sympy.core.logic import fuzzy_and
from sympy.core.numbers import Integer, pi, I
from sympy.core.relational import Eq
from sympy.external.gmpy import HAS_GMPY, gmpy
from sympy.ntheory import sieve
from sympy.polys.polytools import Poly
from math import sqrt as _sqrt
class CombinatorialFunction(Function):
"""Base class for combinatorial functions. """
def _eval_simplify(self, **kwargs):
from sympy.simplify.combsimp import combsimp
# combinatorial function with non-integer arguments is
# automatically passed to gammasimp
expr = combsimp(self)
measure = kwargs['measure']
if measure(expr) <= kwargs['ratio']*measure(self):
return expr
return self
###############################################################################
######################## FACTORIAL and MULTI-FACTORIAL ########################
###############################################################################
class factorial(CombinatorialFunction):
r"""Implementation of factorial function over nonnegative integers.
By convention (consistent with the gamma function and the binomial
coefficients), factorial of a negative integer is complex infinity.
The factorial is very important in combinatorics where it gives
the number of ways in which `n` objects can be permuted. It also
arises in calculus, probability, number theory, etc.
There is strict relation of factorial with gamma function. In
fact `n! = gamma(n+1)` for nonnegative integers. Rewrite of this
kind is very useful in case of combinatorial simplification.
Computation of the factorial is done using two algorithms. For
small arguments a precomputed look up table is used. However for bigger
input algorithm Prime-Swing is used. It is the fastest algorithm
known and computes `n!` via prime factorization of special class
of numbers, called here the 'Swing Numbers'.
Examples
========
>>> from sympy import Symbol, factorial, S
>>> n = Symbol('n', integer=True)
>>> factorial(0)
1
>>> factorial(7)
5040
>>> factorial(-2)
zoo
>>> factorial(n)
factorial(n)
>>> factorial(2*n)
factorial(2*n)
>>> factorial(S(1)/2)
factorial(1/2)
See Also
========
factorial2, RisingFactorial, FallingFactorial
"""
def fdiff(self, argindex=1):
from sympy.functions.special.gamma_functions import (gamma, polygamma)
if argindex == 1:
return gamma(self.args[0] + 1)*polygamma(0, self.args[0] + 1)
else:
raise ArgumentIndexError(self, argindex)
_small_swing = [
1, 1, 1, 3, 3, 15, 5, 35, 35, 315, 63, 693, 231, 3003, 429, 6435, 6435, 109395,
12155, 230945, 46189, 969969, 88179, 2028117, 676039, 16900975, 1300075,
35102025, 5014575, 145422675, 9694845, 300540195, 300540195
]
_small_factorials = [] # type: List[int]
@classmethod
def _swing(cls, n):
if n < 33:
return cls._small_swing[n]
else:
N, primes = int(_sqrt(n)), []
for prime in sieve.primerange(3, N + 1):
p, q = 1, n
while True:
q //= prime
if q > 0:
if q & 1 == 1:
p *= prime
else:
break
if p > 1:
primes.append(p)
for prime in sieve.primerange(N + 1, n//3 + 1):
if (n // prime) & 1 == 1:
primes.append(prime)
L_product = R_product = 1
for prime in sieve.primerange(n//2 + 1, n + 1):
L_product *= prime
for prime in primes:
R_product *= prime
return L_product*R_product
@classmethod
def _recursive(cls, n):
if n < 2:
return 1
else:
return (cls._recursive(n//2)**2)*cls._swing(n)
@classmethod
def eval(cls, n):
n = sympify(n)
if n.is_Number:
if n.is_zero:
return S.One
elif n is S.Infinity:
return S.Infinity
elif n.is_Integer:
if n.is_negative:
return S.ComplexInfinity
else:
n = n.p
if n < 20:
if not cls._small_factorials:
result = 1
for i in range(1, 20):
result *= i
cls._small_factorials.append(result)
result = cls._small_factorials[n-1]
# GMPY factorial is faster, use it when available
elif HAS_GMPY:
result = gmpy.fac(n)
else:
bits = bin(n).count('1')
result = cls._recursive(n)*2**(n - bits)
return Integer(result)
def _facmod(self, n, q):
res, N = 1, int(_sqrt(n))
# Exponent of prime p in n! is e_p(n) = [n/p] + [n/p**2] + ...
# for p > sqrt(n), e_p(n) < sqrt(n), the primes with [n/p] = m,
# occur consecutively and are grouped together in pw[m] for
# simultaneous exponentiation at a later stage
pw = [1]*N
m = 2 # to initialize the if condition below
for prime in sieve.primerange(2, n + 1):
if m > 1:
m, y = 0, n // prime
while y:
m += y
y //= prime
if m < N:
pw[m] = pw[m]*prime % q
else:
res = res*pow(prime, m, q) % q
for ex, bs in enumerate(pw):
if ex == 0 or bs == 1:
continue
if bs == 0:
return 0
res = res*pow(bs, ex, q) % q
return res
def _eval_Mod(self, q):
n = self.args[0]
if n.is_integer and n.is_nonnegative and q.is_integer:
aq = abs(q)
d = aq - n
if d.is_nonpositive:
return S.Zero
else:
isprime = aq.is_prime
if d == 1:
# Apply Wilson's theorem (if a natural number n > 1
# is a prime number, then (n-1)! = -1 mod n) and
# its inverse (if n > 4 is a composite number, then
# (n-1)! = 0 mod n)
if isprime:
return -1 % q
elif isprime is False and (aq - 6).is_nonnegative:
return S.Zero
elif n.is_Integer and q.is_Integer:
n, d, aq = map(int, (n, d, aq))
if isprime and (d - 1 < n):
fc = self._facmod(d - 1, aq)
fc = pow(fc, aq - 2, aq)
if d%2:
fc = -fc
else:
fc = self._facmod(n, aq)
return fc % q
def _eval_rewrite_as_gamma(self, n, piecewise=True, **kwargs):
from sympy.functions.special.gamma_functions import gamma
return gamma(n + 1)
def _eval_rewrite_as_Product(self, n, **kwargs):
from sympy.concrete.products import Product
if n.is_nonnegative and n.is_integer:
i = Dummy('i', integer=True)
return Product(i, (i, 1, n))
def _eval_is_integer(self):
if self.args[0].is_integer and self.args[0].is_nonnegative:
return True
def _eval_is_positive(self):
if self.args[0].is_integer and self.args[0].is_nonnegative:
return True
def _eval_is_even(self):
x = self.args[0]
if x.is_integer and x.is_nonnegative:
return (x - 2).is_nonnegative
def _eval_is_composite(self):
x = self.args[0]
if x.is_integer and x.is_nonnegative:
return (x - 3).is_nonnegative
def _eval_is_real(self):
x = self.args[0]
if x.is_nonnegative or x.is_noninteger:
return True
def _eval_as_leading_term(self, x, logx=None, cdir=0):
arg = self.args[0].as_leading_term(x)
arg0 = arg.subs(x, 0)
if arg0.is_zero:
return S.One
elif not arg0.is_infinite:
return self.func(arg)
raise PoleError("Cannot expand %s around 0" % (self))
class MultiFactorial(CombinatorialFunction):
pass
class subfactorial(CombinatorialFunction):
r"""The subfactorial counts the derangements of n items and is
defined for non-negative integers as:
.. math:: !n = \begin{cases} 1 & n = 0 \\ 0 & n = 1 \\
(n-1)(!(n-1) + !(n-2)) & n > 1 \end{cases}
It can also be written as ``int(round(n!/exp(1)))`` but the
recursive definition with caching is implemented for this function.
An interesting analytic expression is the following [2]_
.. math:: !x = \Gamma(x + 1, -1)/e
which is valid for non-negative integers `x`. The above formula
is not very useful incase of non-integers. :math:`\Gamma(x + 1, -1)` is
single-valued only for integral arguments `x`, elsewhere on the positive
real axis it has an infinite number of branches none of which are real.
References
==========
.. [1] https://en.wikipedia.org/wiki/Subfactorial
.. [2] http://mathworld.wolfram.com/Subfactorial.html
Examples
========
>>> from sympy import subfactorial
>>> from sympy.abc import n
>>> subfactorial(n + 1)
subfactorial(n + 1)
>>> subfactorial(5)
44
See Also
========
sympy.functions.combinatorial.factorials.factorial,
sympy.utilities.iterables.generate_derangements,
sympy.functions.special.gamma_functions.uppergamma
"""
@classmethod
@cacheit
def _eval(self, n):
if not n:
return S.One
elif n == 1:
return S.Zero
else:
z1, z2 = 1, 0
for i in range(2, n + 1):
z1, z2 = z2, (i - 1)*(z2 + z1)
return z2
@classmethod
def eval(cls, arg):
if arg.is_Number:
if arg.is_Integer and arg.is_nonnegative:
return cls._eval(arg)
elif arg is S.NaN:
return S.NaN
elif arg is S.Infinity:
return S.Infinity
def _eval_is_even(self):
if self.args[0].is_odd and self.args[0].is_nonnegative:
return True
def _eval_is_integer(self):
if self.args[0].is_integer and self.args[0].is_nonnegative:
return True
def _eval_rewrite_as_factorial(self, arg, **kwargs):
from sympy.concrete.summations import summation
i = Dummy('i')
f = S.NegativeOne**i / factorial(i)
return factorial(arg) * summation(f, (i, 0, arg))
def _eval_rewrite_as_gamma(self, arg, piecewise=True, **kwargs):
from sympy.functions.elementary.exponential import exp
from sympy.functions.special.gamma_functions import (gamma, lowergamma)
return (S.NegativeOne**(arg + 1)*exp(-I*pi*arg)*lowergamma(arg + 1, -1)
+ gamma(arg + 1))*exp(-1)
def _eval_rewrite_as_uppergamma(self, arg, **kwargs):
from sympy.functions.special.gamma_functions import uppergamma
return uppergamma(arg + 1, -1)/S.Exp1
def _eval_is_nonnegative(self):
if self.args[0].is_integer and self.args[0].is_nonnegative:
return True
def _eval_is_odd(self):
if self.args[0].is_even and self.args[0].is_nonnegative:
return True
class factorial2(CombinatorialFunction):
r"""The double factorial `n!!`, not to be confused with `(n!)!`
The double factorial is defined for nonnegative integers and for odd
negative integers as:
.. math:: n!! = \begin{cases} 1 & n = 0 \\
n(n-2)(n-4) \cdots 1 & n\ \text{positive odd} \\
n(n-2)(n-4) \cdots 2 & n\ \text{positive even} \\
(n+2)!!/(n+2) & n\ \text{negative odd} \end{cases}
References
==========
.. [1] https://en.wikipedia.org/wiki/Double_factorial
Examples
========
>>> from sympy import factorial2, var
>>> n = var('n')
>>> n
n
>>> factorial2(n + 1)
factorial2(n + 1)
>>> factorial2(5)
15
>>> factorial2(-1)
1
>>> factorial2(-5)
1/3
See Also
========
factorial, RisingFactorial, FallingFactorial
"""
@classmethod
def eval(cls, arg):
# TODO: extend this to complex numbers?
if arg.is_Number:
if not arg.is_Integer:
raise ValueError("argument must be nonnegative integer "
"or negative odd integer")
# This implementation is faster than the recursive one
# It also avoids "maximum recursion depth exceeded" runtime error
if arg.is_nonnegative:
if arg.is_even:
k = arg / 2
return 2**k * factorial(k)
return factorial(arg) / factorial2(arg - 1)
if arg.is_odd:
return arg*(S.NegativeOne)**((1 - arg)/2) / factorial2(-arg)
raise ValueError("argument must be nonnegative integer "
"or negative odd integer")
def _eval_is_even(self):
# Double factorial is even for every positive even input
n = self.args[0]
if n.is_integer:
if n.is_odd:
return False
if n.is_even:
if n.is_positive:
return True
if n.is_zero:
return False
def _eval_is_integer(self):
# Double factorial is an integer for every nonnegative input, and for
# -1 and -3
n = self.args[0]
if n.is_integer:
if (n + 1).is_nonnegative:
return True
if n.is_odd:
return (n + 3).is_nonnegative
def _eval_is_odd(self):
# Double factorial is odd for every odd input not smaller than -3, and
# for 0
n = self.args[0]
if n.is_odd:
return (n + 3).is_nonnegative
if n.is_even:
if n.is_positive:
return False
if n.is_zero:
return True
def _eval_is_positive(self):
# Double factorial is positive for every nonnegative input, and for
# every odd negative input which is of the form -1-4k for an
# nonnegative integer k
n = self.args[0]
if n.is_integer:
if (n + 1).is_nonnegative:
return True
if n.is_odd:
return ((n + 1) / 2).is_even
def _eval_rewrite_as_gamma(self, n, piecewise=True, **kwargs):
from sympy.functions.elementary.miscellaneous import sqrt
from sympy.functions.elementary.piecewise import Piecewise
from sympy.functions.special.gamma_functions import gamma
return 2**(n/2)*gamma(n/2 + 1) * Piecewise((1, Eq(Mod(n, 2), 0)),
(sqrt(2/pi), Eq(Mod(n, 2), 1)))
###############################################################################
######################## RISING and FALLING FACTORIALS ########################
###############################################################################
class RisingFactorial(CombinatorialFunction):
r"""
Rising factorial (also called Pochhammer symbol) is a double valued
function arising in concrete mathematics, hypergeometric functions
and series expansions. It is defined by:
.. math:: rf(x,k) = x \cdot (x+1) \cdots (x+k-1)
where `x` can be arbitrary expression and `k` is an integer. For
more information check "Concrete mathematics" by Graham, pp. 66
or visit http://mathworld.wolfram.com/RisingFactorial.html page.
When `x` is a Poly instance of degree >= 1 with a single variable,
`rf(x,k) = x(y) \cdot x(y+1) \cdots x(y+k-1)`, where `y` is the
variable of `x`. This is as described in Peter Paule, "Greatest
Factorial Factorization and Symbolic Summation", Journal of
Symbolic Computation, vol. 20, pp. 235-268, 1995.
Examples
========
>>> from sympy import rf, Poly
>>> from sympy.abc import x
>>> rf(x, 0)
1
>>> rf(1, 5)
120
>>> rf(x, 5) == x*(1 + x)*(2 + x)*(3 + x)*(4 + x)
True
>>> rf(Poly(x**3, x), 2)
Poly(x**6 + 3*x**5 + 3*x**4 + x**3, x, domain='ZZ')
Rewriting is complicated unless the relationship between
the arguments is known, but rising factorial can
be rewritten in terms of gamma, factorial and binomial
and falling factorial.
>>> from sympy import Symbol, factorial, ff, binomial, gamma
>>> n = Symbol('n', integer=True, positive=True)
>>> R = rf(n, n + 2)
>>> for i in (rf, ff, factorial, binomial, gamma):
... R.rewrite(i)
...
RisingFactorial(n, n + 2)
FallingFactorial(2*n + 1, n + 2)
factorial(2*n + 1)/factorial(n - 1)
binomial(2*n + 1, n + 2)*factorial(n + 2)
gamma(2*n + 2)/gamma(n)
See Also
========
factorial, factorial2, FallingFactorial
References
==========
.. [1] https://en.wikipedia.org/wiki/Pochhammer_symbol
"""
@classmethod
def eval(cls, x, k):
x = sympify(x)
k = sympify(k)
if x is S.NaN or k is S.NaN:
return S.NaN
elif x is S.One:
return factorial(k)
elif k.is_Integer:
if k.is_zero:
return S.One
else:
if k.is_positive:
if x is S.Infinity:
return S.Infinity
elif x is S.NegativeInfinity:
if k.is_odd:
return S.NegativeInfinity
else:
return S.Infinity
else:
if isinstance(x, Poly):
gens = x.gens
if len(gens)!= 1:
raise ValueError("rf only defined for "
"polynomials on one generator")
else:
return reduce(lambda r, i:
r*(x.shift(i)),
range(0, int(k)), 1)
else:
return reduce(lambda r, i: r*(x + i),
range(0, int(k)), 1)
else:
if x is S.Infinity:
return S.Infinity
elif x is S.NegativeInfinity:
return S.Infinity
else:
if isinstance(x, Poly):
gens = x.gens
if len(gens)!= 1:
raise ValueError("rf only defined for "
"polynomials on one generator")
else:
return 1/reduce(lambda r, i:
r*(x.shift(-i)),
range(1, abs(int(k)) + 1), 1)
else:
return 1/reduce(lambda r, i:
r*(x - i),
range(1, abs(int(k)) + 1), 1)
if k.is_integer == False:
if x.is_integer and x.is_negative:
return S.Zero
def _eval_rewrite_as_gamma(self, x, k, piecewise=True, **kwargs):
from sympy.functions.elementary.piecewise import Piecewise
from sympy.functions.special.gamma_functions import gamma
if not piecewise:
if (x <= 0) == True:
return S.NegativeOne**k*gamma(1 - x) / gamma(-k - x + 1)
return gamma(x + k) / gamma(x)
return Piecewise(
(gamma(x + k) / gamma(x), x > 0),
(S.NegativeOne**k*gamma(1 - x) / gamma(-k - x + 1), True))
def _eval_rewrite_as_FallingFactorial(self, x, k, **kwargs):
return FallingFactorial(x + k - 1, k)
def _eval_rewrite_as_factorial(self, x, k, **kwargs):
from sympy.functions.elementary.piecewise import Piecewise
if x.is_integer and k.is_integer:
return Piecewise(
(factorial(k + x - 1)/factorial(x - 1), x > 0),
(S.NegativeOne**k*factorial(-x)/factorial(-k - x), True))
def _eval_rewrite_as_binomial(self, x, k, **kwargs):
if k.is_integer:
return factorial(k) * binomial(x + k - 1, k)
def _eval_rewrite_as_tractable(self, x, k, limitvar=None, **kwargs):
from sympy.functions.special.gamma_functions import gamma
if limitvar:
k_lim = k.subs(limitvar, S.Infinity)
if k_lim is S.Infinity:
return (gamma(x + k).rewrite('tractable', deep=True) / gamma(x))
elif k_lim is S.NegativeInfinity:
return (S.NegativeOne**k*gamma(1 - x) / gamma(-k - x + 1).rewrite('tractable', deep=True))
return self.rewrite(gamma).rewrite('tractable', deep=True)
def _eval_is_integer(self):
return fuzzy_and((self.args[0].is_integer, self.args[1].is_integer,
self.args[1].is_nonnegative))
class FallingFactorial(CombinatorialFunction):
r"""
Falling factorial (related to rising factorial) is a double valued
function arising in concrete mathematics, hypergeometric functions
and series expansions. It is defined by
.. math:: ff(x,k) = x \cdot (x-1) \cdots (x-k+1)
where `x` can be arbitrary expression and `k` is an integer. For
more information check "Concrete mathematics" by Graham, pp. 66
or visit http://mathworld.wolfram.com/FallingFactorial.html page.
When `x` is a Poly instance of degree >= 1 with single variable,
`ff(x,k) = x(y) \cdot x(y-1) \cdots x(y-k+1)`, where `y` is the
variable of `x`. This is as described in Peter Paule, "Greatest
Factorial Factorization and Symbolic Summation", Journal of
Symbolic Computation, vol. 20, pp. 235-268, 1995.
>>> from sympy import ff, Poly, Symbol
>>> from sympy.abc import x
>>> n = Symbol('n', integer=True)
>>> ff(x, 0)
1
>>> ff(5, 5)
120
>>> ff(x, 5) == x*(x - 1)*(x - 2)*(x - 3)*(x - 4)
True
>>> ff(Poly(x**2, x), 2)
Poly(x**4 - 2*x**3 + x**2, x, domain='ZZ')
>>> ff(n, n)
factorial(n)
Rewriting is complicated unless the relationship between
the arguments is known, but falling factorial can
be rewritten in terms of gamma, factorial and binomial
and rising factorial.
>>> from sympy import factorial, rf, gamma, binomial, Symbol
>>> n = Symbol('n', integer=True, positive=True)
>>> F = ff(n, n - 2)
>>> for i in (rf, ff, factorial, binomial, gamma):
... F.rewrite(i)
...
RisingFactorial(3, n - 2)
FallingFactorial(n, n - 2)
factorial(n)/2
binomial(n, n - 2)*factorial(n - 2)
gamma(n + 1)/2
See Also
========
factorial, factorial2, RisingFactorial
References
==========
.. [1] http://mathworld.wolfram.com/FallingFactorial.html
"""
@classmethod
def eval(cls, x, k):
x = sympify(x)
k = sympify(k)
if x is S.NaN or k is S.NaN:
return S.NaN
elif k.is_integer and x == k:
return factorial(x)
elif k.is_Integer:
if k.is_zero:
return S.One
else:
if k.is_positive:
if x is S.Infinity:
return S.Infinity
elif x is S.NegativeInfinity:
if k.is_odd:
return S.NegativeInfinity
else:
return S.Infinity
else:
if isinstance(x, Poly):
gens = x.gens
if len(gens)!= 1:
raise ValueError("ff only defined for "
"polynomials on one generator")
else:
return reduce(lambda r, i:
r*(x.shift(-i)),
range(0, int(k)), 1)
else:
return reduce(lambda r, i: r*(x - i),
range(0, int(k)), 1)
else:
if x is S.Infinity:
return S.Infinity
elif x is S.NegativeInfinity:
return S.Infinity
else:
if isinstance(x, Poly):
gens = x.gens
if len(gens)!= 1:
raise ValueError("rf only defined for "
"polynomials on one generator")
else:
return 1/reduce(lambda r, i:
r*(x.shift(i)),
range(1, abs(int(k)) + 1), 1)
else:
return 1/reduce(lambda r, i: r*(x + i),
range(1, abs(int(k)) + 1), 1)
def _eval_rewrite_as_gamma(self, x, k, piecewise=True, **kwargs):
from sympy.functions.elementary.piecewise import Piecewise
from sympy.functions.special.gamma_functions import gamma
if not piecewise:
if (x < 0) == True:
return S.NegativeOne**k*gamma(k - x) / gamma(-x)
return gamma(x + 1) / gamma(x - k + 1)
return Piecewise(
(gamma(x + 1) / gamma(x - k + 1), x >= 0),
(S.NegativeOne**k*gamma(k - x) / gamma(-x), True))
def _eval_rewrite_as_RisingFactorial(self, x, k, **kwargs):
return rf(x - k + 1, k)
def _eval_rewrite_as_binomial(self, x, k, **kwargs):
if k.is_integer:
return factorial(k) * binomial(x, k)
def _eval_rewrite_as_factorial(self, x, k, **kwargs):
from sympy.functions.elementary.piecewise import Piecewise
if x.is_integer and k.is_integer:
return Piecewise(
(factorial(x)/factorial(-k + x), x >= 0),
(S.NegativeOne**k*factorial(k - x - 1)/factorial(-x - 1), True))
def _eval_rewrite_as_tractable(self, x, k, limitvar=None, **kwargs):
from sympy.functions.special.gamma_functions import gamma
if limitvar:
k_lim = k.subs(limitvar, S.Infinity)
if k_lim is S.Infinity:
return (S.NegativeOne**k*gamma(k - x).rewrite('tractable', deep=True) / gamma(-x))
elif k_lim is S.NegativeInfinity:
return (gamma(x + 1) / gamma(x - k + 1).rewrite('tractable', deep=True))
return self.rewrite(gamma).rewrite('tractable', deep=True)
def _eval_is_integer(self):
return fuzzy_and((self.args[0].is_integer, self.args[1].is_integer,
self.args[1].is_nonnegative))
rf = RisingFactorial
ff = FallingFactorial
###############################################################################
########################### BINOMIAL COEFFICIENTS #############################
###############################################################################
class binomial(CombinatorialFunction):
r"""Implementation of the binomial coefficient. It can be defined
in two ways depending on its desired interpretation:
.. math:: \binom{n}{k} = \frac{n!}{k!(n-k)!}\ \text{or}\
\binom{n}{k} = \frac{ff(n, k)}{k!}
First, in a strict combinatorial sense it defines the
number of ways we can choose `k` elements from a set of
`n` elements. In this case both arguments are nonnegative
integers and binomial is computed using an efficient
algorithm based on prime factorization.
The other definition is generalization for arbitrary `n`,
however `k` must also be nonnegative. This case is very
useful when evaluating summations.
For the sake of convenience for negative integer `k` this function
will return zero no matter what valued is the other argument.
To expand the binomial when `n` is a symbol, use either
``expand_func()`` or ``expand(func=True)``. The former will keep
the polynomial in factored form while the latter will expand the
polynomial itself. See examples for details.
Examples
========
>>> from sympy import Symbol, Rational, binomial, expand_func
>>> n = Symbol('n', integer=True, positive=True)
>>> binomial(15, 8)
6435
>>> binomial(n, -1)
0
Rows of Pascal's triangle can be generated with the binomial function:
>>> for N in range(8):
... print([binomial(N, i) for i in range(N + 1)])
...
[1]
[1, 1]
[1, 2, 1]
[1, 3, 3, 1]
[1, 4, 6, 4, 1]
[1, 5, 10, 10, 5, 1]
[1, 6, 15, 20, 15, 6, 1]
[1, 7, 21, 35, 35, 21, 7, 1]
As can a given diagonal, e.g. the 4th diagonal:
>>> N = -4
>>> [binomial(N, i) for i in range(1 - N)]
[1, -4, 10, -20, 35]
>>> binomial(Rational(5, 4), 3)
-5/128
>>> binomial(Rational(-5, 4), 3)
-195/128
>>> binomial(n, 3)
binomial(n, 3)
>>> binomial(n, 3).expand(func=True)
n**3/6 - n**2/2 + n/3
>>> expand_func(binomial(n, 3))
n*(n - 2)*(n - 1)/6
References
==========
.. [1] https://www.johndcook.com/blog/binomial_coefficients/
"""
def fdiff(self, argindex=1):
from sympy.functions.special.gamma_functions import polygamma
if argindex == 1:
# http://functions.wolfram.com/GammaBetaErf/Binomial/20/01/01/
n, k = self.args
return binomial(n, k)*(polygamma(0, n + 1) - \
polygamma(0, n - k + 1))
elif argindex == 2:
# http://functions.wolfram.com/GammaBetaErf/Binomial/20/01/02/
n, k = self.args
return binomial(n, k)*(polygamma(0, n - k + 1) - \
polygamma(0, k + 1))
else:
raise ArgumentIndexError(self, argindex)
@classmethod
def _eval(self, n, k):
# n.is_Number and k.is_Integer and k != 1 and n != k
if k.is_Integer:
if n.is_Integer and n >= 0:
n, k = int(n), int(k)
if k > n:
return S.Zero
elif k > n // 2:
k = n - k
if HAS_GMPY:
return Integer(gmpy.bincoef(n, k))
d, result = n - k, 1
for i in range(1, k + 1):
d += 1
result = result * d // i
return Integer(result)
else:
d, result = n - k, 1
for i in range(1, k + 1):
d += 1
result *= d
result /= i
return result
@classmethod
def eval(cls, n, k):
n, k = map(sympify, (n, k))
d = n - k
n_nonneg, n_isint = n.is_nonnegative, n.is_integer
if k.is_zero or ((n_nonneg or n_isint is False)
and d.is_zero):
return S.One
if (k - 1).is_zero or ((n_nonneg or n_isint is False)
and (d - 1).is_zero):
return n
if k.is_integer:
if k.is_negative or (n_nonneg and n_isint and d.is_negative):
return S.Zero
elif n.is_number:
res = cls._eval(n, k)
return res.expand(basic=True) if res else res
elif n_nonneg is False and n_isint:
# a special case when binomial evaluates to complex infinity
return S.ComplexInfinity
elif k.is_number:
from sympy.functions.special.gamma_functions import gamma
return gamma(n + 1)/(gamma(k + 1)*gamma(n - k + 1))
def _eval_Mod(self, q):
n, k = self.args
if any(x.is_integer is False for x in (n, k, q)):
raise ValueError("Integers expected for binomial Mod")
if all(x.is_Integer for x in (n, k, q)):
n, k = map(int, (n, k))
aq, res = abs(q), 1
# handle negative integers k or n
if k < 0:
return S.Zero
if n < 0:
n = -n + k - 1
res = -1 if k%2 else 1
# non negative integers k and n
if k > n:
return S.Zero
isprime = aq.is_prime
aq = int(aq)
if isprime:
if aq < n:
# use Lucas Theorem
N, K = n, k
while N or K:
res = res*binomial(N % aq, K % aq) % aq
N, K = N // aq, K // aq
else:
# use Factorial Modulo
d = n - k
if k > d:
k, d = d, k
kf = 1
for i in range(2, k + 1):
kf = kf*i % aq
df = kf
for i in range(k + 1, d + 1):
df = df*i % aq
res *= df
for i in range(d + 1, n + 1):
res = res*i % aq
res *= pow(kf*df % aq, aq - 2, aq)
res %= aq
else:
# Binomial Factorization is performed by calculating the
# exponents of primes <= n in `n! /(k! (n - k)!)`,
# for non-negative integers n and k. As the exponent of
# prime in n! is e_p(n) = [n/p] + [n/p**2] + ...
# the exponent of prime in binomial(n, k) would be
# e_p(n) - e_p(k) - e_p(n - k)
M = int(_sqrt(n))
for prime in sieve.primerange(2, n + 1):
if prime > n - k:
res = res*prime % aq
elif prime > n // 2:
continue
elif prime > M:
if n % prime < k % prime:
res = res*prime % aq
else:
N, K = n, k
exp = a = 0
while N > 0:
a = int((N % prime) < (K % prime + a))
N, K = N // prime, K // prime
exp += a
if exp > 0:
res *= pow(prime, exp, aq)
res %= aq
return S(res % q)
def _eval_expand_func(self, **hints):
"""
Function to expand binomial(n, k) when m is positive integer
Also,
n is self.args[0] and k is self.args[1] while using binomial(n, k)
"""
n = self.args[0]
if n.is_Number:
return binomial(*self.args)
k = self.args[1]
if (n-k).is_Integer:
k = n - k
if k.is_Integer:
if k.is_zero:
return S.One
elif k.is_negative:
return S.Zero
else:
n, result = self.args[0], 1
for i in range(1, k + 1):
result *= n - k + i
result /= i
return result
else:
return binomial(*self.args)
def _eval_rewrite_as_factorial(self, n, k, **kwargs):
return factorial(n)/(factorial(k)*factorial(n - k))
def _eval_rewrite_as_gamma(self, n, k, piecewise=True, **kwargs):
from sympy.functions.special.gamma_functions import gamma
return gamma(n + 1)/(gamma(k + 1)*gamma(n - k + 1))
def _eval_rewrite_as_tractable(self, n, k, limitvar=None, **kwargs):
return self._eval_rewrite_as_gamma(n, k).rewrite('tractable')
def _eval_rewrite_as_FallingFactorial(self, n, k, **kwargs):
if k.is_integer:
return ff(n, k) / factorial(k)
def _eval_is_integer(self):
n, k = self.args
if n.is_integer and k.is_integer:
return True
elif k.is_integer is False:
return False
def _eval_is_nonnegative(self):
n, k = self.args
if n.is_integer and k.is_integer:
if n.is_nonnegative or k.is_negative or k.is_even:
return True
elif k.is_even is False:
return False
def _eval_as_leading_term(self, x, logx=None, cdir=0):
from sympy.functions.special.gamma_functions import gamma
return self.rewrite(gamma)._eval_as_leading_term(x, logx=logx, cdir=cdir)
| 33.972072 | 106 | 0.497971 | from typing import List
from functools import reduce
from sympy.core import S, sympify, Dummy, Mod
from sympy.core.cache import cacheit
from sympy.core.function import Function, ArgumentIndexError, PoleError
from sympy.core.logic import fuzzy_and
from sympy.core.numbers import Integer, pi, I
from sympy.core.relational import Eq
from sympy.external.gmpy import HAS_GMPY, gmpy
from sympy.ntheory import sieve
from sympy.polys.polytools import Poly
from math import sqrt as _sqrt
class CombinatorialFunction(Function):
def _eval_simplify(self, **kwargs):
from sympy.simplify.combsimp import combsimp
expr = combsimp(self)
measure = kwargs['measure']
if measure(expr) <= kwargs['ratio']*measure(self):
return expr
return self
class factorial(CombinatorialFunction):
def fdiff(self, argindex=1):
from sympy.functions.special.gamma_functions import (gamma, polygamma)
if argindex == 1:
return gamma(self.args[0] + 1)*polygamma(0, self.args[0] + 1)
else:
raise ArgumentIndexError(self, argindex)
_small_swing = [
1, 1, 1, 3, 3, 15, 5, 35, 35, 315, 63, 693, 231, 3003, 429, 6435, 6435, 109395,
12155, 230945, 46189, 969969, 88179, 2028117, 676039, 16900975, 1300075,
35102025, 5014575, 145422675, 9694845, 300540195, 300540195
]
_small_factorials = []
@classmethod
def _swing(cls, n):
if n < 33:
return cls._small_swing[n]
else:
N, primes = int(_sqrt(n)), []
for prime in sieve.primerange(3, N + 1):
p, q = 1, n
while True:
q //= prime
if q > 0:
if q & 1 == 1:
p *= prime
else:
break
if p > 1:
primes.append(p)
for prime in sieve.primerange(N + 1, n//3 + 1):
if (n // prime) & 1 == 1:
primes.append(prime)
L_product = R_product = 1
for prime in sieve.primerange(n//2 + 1, n + 1):
L_product *= prime
for prime in primes:
R_product *= prime
return L_product*R_product
@classmethod
def _recursive(cls, n):
if n < 2:
return 1
else:
return (cls._recursive(n//2)**2)*cls._swing(n)
@classmethod
def eval(cls, n):
n = sympify(n)
if n.is_Number:
if n.is_zero:
return S.One
elif n is S.Infinity:
return S.Infinity
elif n.is_Integer:
if n.is_negative:
return S.ComplexInfinity
else:
n = n.p
if n < 20:
if not cls._small_factorials:
result = 1
for i in range(1, 20):
result *= i
cls._small_factorials.append(result)
result = cls._small_factorials[n-1]
elif HAS_GMPY:
result = gmpy.fac(n)
else:
bits = bin(n).count('1')
result = cls._recursive(n)*2**(n - bits)
return Integer(result)
def _facmod(self, n, q):
res, N = 1, int(_sqrt(n))
pw = [1]*N
m = 2 for prime in sieve.primerange(2, n + 1):
if m > 1:
m, y = 0, n // prime
while y:
m += y
y //= prime
if m < N:
pw[m] = pw[m]*prime % q
else:
res = res*pow(prime, m, q) % q
for ex, bs in enumerate(pw):
if ex == 0 or bs == 1:
continue
if bs == 0:
return 0
res = res*pow(bs, ex, q) % q
return res
def _eval_Mod(self, q):
n = self.args[0]
if n.is_integer and n.is_nonnegative and q.is_integer:
aq = abs(q)
d = aq - n
if d.is_nonpositive:
return S.Zero
else:
isprime = aq.is_prime
if d == 1:
# is a prime number, then (n-1)! = -1 mod n) and
# its inverse (if n > 4 is a composite number, then
# (n-1)! = 0 mod n)
if isprime:
return -1 % q
elif isprime is False and (aq - 6).is_nonnegative:
return S.Zero
elif n.is_Integer and q.is_Integer:
n, d, aq = map(int, (n, d, aq))
if isprime and (d - 1 < n):
fc = self._facmod(d - 1, aq)
fc = pow(fc, aq - 2, aq)
if d%2:
fc = -fc
else:
fc = self._facmod(n, aq)
return fc % q
def _eval_rewrite_as_gamma(self, n, piecewise=True, **kwargs):
from sympy.functions.special.gamma_functions import gamma
return gamma(n + 1)
def _eval_rewrite_as_Product(self, n, **kwargs):
from sympy.concrete.products import Product
if n.is_nonnegative and n.is_integer:
i = Dummy('i', integer=True)
return Product(i, (i, 1, n))
def _eval_is_integer(self):
if self.args[0].is_integer and self.args[0].is_nonnegative:
return True
def _eval_is_positive(self):
if self.args[0].is_integer and self.args[0].is_nonnegative:
return True
def _eval_is_even(self):
x = self.args[0]
if x.is_integer and x.is_nonnegative:
return (x - 2).is_nonnegative
def _eval_is_composite(self):
x = self.args[0]
if x.is_integer and x.is_nonnegative:
return (x - 3).is_nonnegative
def _eval_is_real(self):
x = self.args[0]
if x.is_nonnegative or x.is_noninteger:
return True
def _eval_as_leading_term(self, x, logx=None, cdir=0):
arg = self.args[0].as_leading_term(x)
arg0 = arg.subs(x, 0)
if arg0.is_zero:
return S.One
elif not arg0.is_infinite:
return self.func(arg)
raise PoleError("Cannot expand %s around 0" % (self))
class MultiFactorial(CombinatorialFunction):
pass
class subfactorial(CombinatorialFunction):
@classmethod
@cacheit
def _eval(self, n):
if not n:
return S.One
elif n == 1:
return S.Zero
else:
z1, z2 = 1, 0
for i in range(2, n + 1):
z1, z2 = z2, (i - 1)*(z2 + z1)
return z2
@classmethod
def eval(cls, arg):
if arg.is_Number:
if arg.is_Integer and arg.is_nonnegative:
return cls._eval(arg)
elif arg is S.NaN:
return S.NaN
elif arg is S.Infinity:
return S.Infinity
def _eval_is_even(self):
if self.args[0].is_odd and self.args[0].is_nonnegative:
return True
def _eval_is_integer(self):
if self.args[0].is_integer and self.args[0].is_nonnegative:
return True
def _eval_rewrite_as_factorial(self, arg, **kwargs):
from sympy.concrete.summations import summation
i = Dummy('i')
f = S.NegativeOne**i / factorial(i)
return factorial(arg) * summation(f, (i, 0, arg))
def _eval_rewrite_as_gamma(self, arg, piecewise=True, **kwargs):
from sympy.functions.elementary.exponential import exp
from sympy.functions.special.gamma_functions import (gamma, lowergamma)
return (S.NegativeOne**(arg + 1)*exp(-I*pi*arg)*lowergamma(arg + 1, -1)
+ gamma(arg + 1))*exp(-1)
def _eval_rewrite_as_uppergamma(self, arg, **kwargs):
from sympy.functions.special.gamma_functions import uppergamma
return uppergamma(arg + 1, -1)/S.Exp1
def _eval_is_nonnegative(self):
if self.args[0].is_integer and self.args[0].is_nonnegative:
return True
def _eval_is_odd(self):
if self.args[0].is_even and self.args[0].is_nonnegative:
return True
class factorial2(CombinatorialFunction):
@classmethod
def eval(cls, arg):
# TODO: extend this to complex numbers?
if arg.is_Number:
if not arg.is_Integer:
raise ValueError("argument must be nonnegative integer "
"or negative odd integer")
# This implementation is faster than the recursive one
# It also avoids "maximum recursion depth exceeded" runtime error
if arg.is_nonnegative:
if arg.is_even:
k = arg / 2
return 2**k * factorial(k)
return factorial(arg) / factorial2(arg - 1)
if arg.is_odd:
return arg*(S.NegativeOne)**((1 - arg)/2) / factorial2(-arg)
raise ValueError("argument must be nonnegative integer "
"or negative odd integer")
def _eval_is_even(self):
# Double factorial is even for every positive even input
n = self.args[0]
if n.is_integer:
if n.is_odd:
return False
if n.is_even:
if n.is_positive:
return True
if n.is_zero:
return False
def _eval_is_integer(self):
# Double factorial is an integer for every nonnegative input, and for
# -1 and -3
n = self.args[0]
if n.is_integer:
if (n + 1).is_nonnegative:
return True
if n.is_odd:
return (n + 3).is_nonnegative
def _eval_is_odd(self):
# Double factorial is odd for every odd input not smaller than -3, and
# for 0
n = self.args[0]
if n.is_odd:
return (n + 3).is_nonnegative
if n.is_even:
if n.is_positive:
return False
if n.is_zero:
return True
def _eval_is_positive(self):
# Double factorial is positive for every nonnegative input, and for
# every odd negative input which is of the form -1-4k for an
# nonnegative integer k
n = self.args[0]
if n.is_integer:
if (n + 1).is_nonnegative:
return True
if n.is_odd:
return ((n + 1) / 2).is_even
def _eval_rewrite_as_gamma(self, n, piecewise=True, **kwargs):
from sympy.functions.elementary.miscellaneous import sqrt
from sympy.functions.elementary.piecewise import Piecewise
from sympy.functions.special.gamma_functions import gamma
return 2**(n/2)*gamma(n/2 + 1) * Piecewise((1, Eq(Mod(n, 2), 0)),
(sqrt(2/pi), Eq(Mod(n, 2), 1)))
###############################################################################
######################## RISING and FALLING FACTORIALS ########################
###############################################################################
class RisingFactorial(CombinatorialFunction):
@classmethod
def eval(cls, x, k):
x = sympify(x)
k = sympify(k)
if x is S.NaN or k is S.NaN:
return S.NaN
elif x is S.One:
return factorial(k)
elif k.is_Integer:
if k.is_zero:
return S.One
else:
if k.is_positive:
if x is S.Infinity:
return S.Infinity
elif x is S.NegativeInfinity:
if k.is_odd:
return S.NegativeInfinity
else:
return S.Infinity
else:
if isinstance(x, Poly):
gens = x.gens
if len(gens)!= 1:
raise ValueError("rf only defined for "
"polynomials on one generator")
else:
return reduce(lambda r, i:
r*(x.shift(i)),
range(0, int(k)), 1)
else:
return reduce(lambda r, i: r*(x + i),
range(0, int(k)), 1)
else:
if x is S.Infinity:
return S.Infinity
elif x is S.NegativeInfinity:
return S.Infinity
else:
if isinstance(x, Poly):
gens = x.gens
if len(gens)!= 1:
raise ValueError("rf only defined for "
"polynomials on one generator")
else:
return 1/reduce(lambda r, i:
r*(x.shift(-i)),
range(1, abs(int(k)) + 1), 1)
else:
return 1/reduce(lambda r, i:
r*(x - i),
range(1, abs(int(k)) + 1), 1)
if k.is_integer == False:
if x.is_integer and x.is_negative:
return S.Zero
def _eval_rewrite_as_gamma(self, x, k, piecewise=True, **kwargs):
from sympy.functions.elementary.piecewise import Piecewise
from sympy.functions.special.gamma_functions import gamma
if not piecewise:
if (x <= 0) == True:
return S.NegativeOne**k*gamma(1 - x) / gamma(-k - x + 1)
return gamma(x + k) / gamma(x)
return Piecewise(
(gamma(x + k) / gamma(x), x > 0),
(S.NegativeOne**k*gamma(1 - x) / gamma(-k - x + 1), True))
def _eval_rewrite_as_FallingFactorial(self, x, k, **kwargs):
return FallingFactorial(x + k - 1, k)
def _eval_rewrite_as_factorial(self, x, k, **kwargs):
from sympy.functions.elementary.piecewise import Piecewise
if x.is_integer and k.is_integer:
return Piecewise(
(factorial(k + x - 1)/factorial(x - 1), x > 0),
(S.NegativeOne**k*factorial(-x)/factorial(-k - x), True))
def _eval_rewrite_as_binomial(self, x, k, **kwargs):
if k.is_integer:
return factorial(k) * binomial(x + k - 1, k)
def _eval_rewrite_as_tractable(self, x, k, limitvar=None, **kwargs):
from sympy.functions.special.gamma_functions import gamma
if limitvar:
k_lim = k.subs(limitvar, S.Infinity)
if k_lim is S.Infinity:
return (gamma(x + k).rewrite('tractable', deep=True) / gamma(x))
elif k_lim is S.NegativeInfinity:
return (S.NegativeOne**k*gamma(1 - x) / gamma(-k - x + 1).rewrite('tractable', deep=True))
return self.rewrite(gamma).rewrite('tractable', deep=True)
def _eval_is_integer(self):
return fuzzy_and((self.args[0].is_integer, self.args[1].is_integer,
self.args[1].is_nonnegative))
class FallingFactorial(CombinatorialFunction):
@classmethod
def eval(cls, x, k):
x = sympify(x)
k = sympify(k)
if x is S.NaN or k is S.NaN:
return S.NaN
elif k.is_integer and x == k:
return factorial(x)
elif k.is_Integer:
if k.is_zero:
return S.One
else:
if k.is_positive:
if x is S.Infinity:
return S.Infinity
elif x is S.NegativeInfinity:
if k.is_odd:
return S.NegativeInfinity
else:
return S.Infinity
else:
if isinstance(x, Poly):
gens = x.gens
if len(gens)!= 1:
raise ValueError("ff only defined for "
"polynomials on one generator")
else:
return reduce(lambda r, i:
r*(x.shift(-i)),
range(0, int(k)), 1)
else:
return reduce(lambda r, i: r*(x - i),
range(0, int(k)), 1)
else:
if x is S.Infinity:
return S.Infinity
elif x is S.NegativeInfinity:
return S.Infinity
else:
if isinstance(x, Poly):
gens = x.gens
if len(gens)!= 1:
raise ValueError("rf only defined for "
"polynomials on one generator")
else:
return 1/reduce(lambda r, i:
r*(x.shift(i)),
range(1, abs(int(k)) + 1), 1)
else:
return 1/reduce(lambda r, i: r*(x + i),
range(1, abs(int(k)) + 1), 1)
def _eval_rewrite_as_gamma(self, x, k, piecewise=True, **kwargs):
from sympy.functions.elementary.piecewise import Piecewise
from sympy.functions.special.gamma_functions import gamma
if not piecewise:
if (x < 0) == True:
return S.NegativeOne**k*gamma(k - x) / gamma(-x)
return gamma(x + 1) / gamma(x - k + 1)
return Piecewise(
(gamma(x + 1) / gamma(x - k + 1), x >= 0),
(S.NegativeOne**k*gamma(k - x) / gamma(-x), True))
def _eval_rewrite_as_RisingFactorial(self, x, k, **kwargs):
return rf(x - k + 1, k)
def _eval_rewrite_as_binomial(self, x, k, **kwargs):
if k.is_integer:
return factorial(k) * binomial(x, k)
def _eval_rewrite_as_factorial(self, x, k, **kwargs):
from sympy.functions.elementary.piecewise import Piecewise
if x.is_integer and k.is_integer:
return Piecewise(
(factorial(x)/factorial(-k + x), x >= 0),
(S.NegativeOne**k*factorial(k - x - 1)/factorial(-x - 1), True))
def _eval_rewrite_as_tractable(self, x, k, limitvar=None, **kwargs):
from sympy.functions.special.gamma_functions import gamma
if limitvar:
k_lim = k.subs(limitvar, S.Infinity)
if k_lim is S.Infinity:
return (S.NegativeOne**k*gamma(k - x).rewrite('tractable', deep=True) / gamma(-x))
elif k_lim is S.NegativeInfinity:
return (gamma(x + 1) / gamma(x - k + 1).rewrite('tractable', deep=True))
return self.rewrite(gamma).rewrite('tractable', deep=True)
def _eval_is_integer(self):
return fuzzy_and((self.args[0].is_integer, self.args[1].is_integer,
self.args[1].is_nonnegative))
rf = RisingFactorial
ff = FallingFactorial
###############################################################################
########################### BINOMIAL COEFFICIENTS #############################
###############################################################################
class binomial(CombinatorialFunction):
def fdiff(self, argindex=1):
from sympy.functions.special.gamma_functions import polygamma
if argindex == 1:
# http://functions.wolfram.com/GammaBetaErf/Binomial/20/01/01/
n, k = self.args
return binomial(n, k)*(polygamma(0, n + 1) - \
polygamma(0, n - k + 1))
elif argindex == 2:
# http://functions.wolfram.com/GammaBetaErf/Binomial/20/01/02/
n, k = self.args
return binomial(n, k)*(polygamma(0, n - k + 1) - \
polygamma(0, k + 1))
else:
raise ArgumentIndexError(self, argindex)
@classmethod
def _eval(self, n, k):
# n.is_Number and k.is_Integer and k != 1 and n != k
if k.is_Integer:
if n.is_Integer and n >= 0:
n, k = int(n), int(k)
if k > n:
return S.Zero
elif k > n // 2:
k = n - k
if HAS_GMPY:
return Integer(gmpy.bincoef(n, k))
d, result = n - k, 1
for i in range(1, k + 1):
d += 1
result = result * d // i
return Integer(result)
else:
d, result = n - k, 1
for i in range(1, k + 1):
d += 1
result *= d
result /= i
return result
@classmethod
def eval(cls, n, k):
n, k = map(sympify, (n, k))
d = n - k
n_nonneg, n_isint = n.is_nonnegative, n.is_integer
if k.is_zero or ((n_nonneg or n_isint is False)
and d.is_zero):
return S.One
if (k - 1).is_zero or ((n_nonneg or n_isint is False)
and (d - 1).is_zero):
return n
if k.is_integer:
if k.is_negative or (n_nonneg and n_isint and d.is_negative):
return S.Zero
elif n.is_number:
res = cls._eval(n, k)
return res.expand(basic=True) if res else res
elif n_nonneg is False and n_isint:
# a special case when binomial evaluates to complex infinity
return S.ComplexInfinity
elif k.is_number:
from sympy.functions.special.gamma_functions import gamma
return gamma(n + 1)/(gamma(k + 1)*gamma(n - k + 1))
def _eval_Mod(self, q):
n, k = self.args
if any(x.is_integer is False for x in (n, k, q)):
raise ValueError("Integers expected for binomial Mod")
if all(x.is_Integer for x in (n, k, q)):
n, k = map(int, (n, k))
aq, res = abs(q), 1
# handle negative integers k or n
if k < 0:
return S.Zero
if n < 0:
n = -n + k - 1
res = -1 if k%2 else 1
# non negative integers k and n
if k > n:
return S.Zero
isprime = aq.is_prime
aq = int(aq)
if isprime:
if aq < n:
# use Lucas Theorem
N, K = n, k
while N or K:
res = res*binomial(N % aq, K % aq) % aq
N, K = N // aq, K // aq
else:
# use Factorial Modulo
d = n - k
if k > d:
k, d = d, k
kf = 1
for i in range(2, k + 1):
kf = kf*i % aq
df = kf
for i in range(k + 1, d + 1):
df = df*i % aq
res *= df
for i in range(d + 1, n + 1):
res = res*i % aq
res *= pow(kf*df % aq, aq - 2, aq)
res %= aq
else:
# Binomial Factorization is performed by calculating the
# exponents of primes <= n in `n! /(k! (n - k)!)`,
# for non-negative integers n and k. As the exponent of
# prime in n! is e_p(n) = [n/p] + [n/p**2] + ...
# the exponent of prime in binomial(n, k) would be
# e_p(n) - e_p(k) - e_p(n - k)
M = int(_sqrt(n))
for prime in sieve.primerange(2, n + 1):
if prime > n - k:
res = res*prime % aq
elif prime > n // 2:
continue
elif prime > M:
if n % prime < k % prime:
res = res*prime % aq
else:
N, K = n, k
exp = a = 0
while N > 0:
a = int((N % prime) < (K % prime + a))
N, K = N // prime, K // prime
exp += a
if exp > 0:
res *= pow(prime, exp, aq)
res %= aq
return S(res % q)
def _eval_expand_func(self, **hints):
n = self.args[0]
if n.is_Number:
return binomial(*self.args)
k = self.args[1]
if (n-k).is_Integer:
k = n - k
if k.is_Integer:
if k.is_zero:
return S.One
elif k.is_negative:
return S.Zero
else:
n, result = self.args[0], 1
for i in range(1, k + 1):
result *= n - k + i
result /= i
return result
else:
return binomial(*self.args)
def _eval_rewrite_as_factorial(self, n, k, **kwargs):
return factorial(n)/(factorial(k)*factorial(n - k))
def _eval_rewrite_as_gamma(self, n, k, piecewise=True, **kwargs):
from sympy.functions.special.gamma_functions import gamma
return gamma(n + 1)/(gamma(k + 1)*gamma(n - k + 1))
def _eval_rewrite_as_tractable(self, n, k, limitvar=None, **kwargs):
return self._eval_rewrite_as_gamma(n, k).rewrite('tractable')
def _eval_rewrite_as_FallingFactorial(self, n, k, **kwargs):
if k.is_integer:
return ff(n, k) / factorial(k)
def _eval_is_integer(self):
n, k = self.args
if n.is_integer and k.is_integer:
return True
elif k.is_integer is False:
return False
def _eval_is_nonnegative(self):
n, k = self.args
if n.is_integer and k.is_integer:
if n.is_nonnegative or k.is_negative or k.is_even:
return True
elif k.is_even is False:
return False
def _eval_as_leading_term(self, x, logx=None, cdir=0):
from sympy.functions.special.gamma_functions import gamma
return self.rewrite(gamma)._eval_as_leading_term(x, logx=logx, cdir=cdir)
| true | true |
1c4a104cf5b6c4c66a2d343c800f0bc19b3712c1 | 1,120 | py | Python | src/infi/pyutils/misc.py | jasonjorge/infi.asi | 78a4c34a421102f99b959a659cf7303804627d9b | [
"BSD-3-Clause"
] | 1 | 2022-02-12T20:30:55.000Z | 2022-02-12T20:30:55.000Z | src/infi/pyutils/misc.py | jasonjorge/infi.asi | 78a4c34a421102f99b959a659cf7303804627d9b | [
"BSD-3-Clause"
] | 5 | 2015-11-08T14:50:42.000Z | 2020-06-23T14:42:33.000Z | src/infi/pyutils/misc.py | jasonjorge/infi.asi | 78a4c34a421102f99b959a659cf7303804627d9b | [
"BSD-3-Clause"
] | 4 | 2015-02-22T09:06:59.000Z | 2022-02-12T20:30:55.000Z | _NOTHING = object()
def recursive_getattr(obj, attr, default=_NOTHING):
for subattr in attr.split("."):
obj = getattr(obj, subattr, _NOTHING)
if obj is _NOTHING:
if default is not _NOTHING:
return default
raise AttributeError(attr)
return obj
class Reprify(object):
def __init__(self, original, str=None, repr=None):
super(Reprify, self).__init__()
self._strify__original = original
if repr is None:
repr = str
if str is None:
str = repr
self._strify__str = str
self._strify__repr = repr
def __getattribute__(self, attr):
if attr.startswith('_strify__'):
return super(Reprify, self).__getattribute__(attr)
return getattr(self._strify__original, attr)
def __repr__(self):
if self._strify__repr is not None:
return self._strify__repr
return repr(self._strify__original)
def __str__(self):
if self._strify__str is not None:
return self._strify__str
return str(self._strify__originalx)
| 32.941176 | 62 | 0.621429 | _NOTHING = object()
def recursive_getattr(obj, attr, default=_NOTHING):
for subattr in attr.split("."):
obj = getattr(obj, subattr, _NOTHING)
if obj is _NOTHING:
if default is not _NOTHING:
return default
raise AttributeError(attr)
return obj
class Reprify(object):
def __init__(self, original, str=None, repr=None):
super(Reprify, self).__init__()
self._strify__original = original
if repr is None:
repr = str
if str is None:
str = repr
self._strify__str = str
self._strify__repr = repr
def __getattribute__(self, attr):
if attr.startswith('_strify__'):
return super(Reprify, self).__getattribute__(attr)
return getattr(self._strify__original, attr)
def __repr__(self):
if self._strify__repr is not None:
return self._strify__repr
return repr(self._strify__original)
def __str__(self):
if self._strify__str is not None:
return self._strify__str
return str(self._strify__originalx)
| true | true |
1c4a10aa8c31765be89abb6d09a8c8e18a6945b0 | 5,744 | py | Python | qiskit/test/decorators.py | EnriqueL8/qiskit-terra | 08b801f1f8598c4e44680b4a75c232ed92db0262 | [
"Apache-2.0"
] | 2 | 2019-06-28T19:58:42.000Z | 2019-07-26T05:04:02.000Z | qiskit/test/decorators.py | EnriqueL8/qiskit-terra | 08b801f1f8598c4e44680b4a75c232ed92db0262 | [
"Apache-2.0"
] | null | null | null | qiskit/test/decorators.py | EnriqueL8/qiskit-terra | 08b801f1f8598c4e44680b4a75c232ed92db0262 | [
"Apache-2.0"
] | 1 | 2020-01-24T21:01:06.000Z | 2020-01-24T21:01:06.000Z | # -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2018.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Decorator for using with Qiskit unit tests."""
import functools
import os
import sys
import unittest
from warnings import warn
from qiskit.util import _has_connection
from .testing_options import get_test_options
HAS_NET_CONNECTION = None
def is_aer_provider_available():
"""Check if the C++ simulator can be instantiated.
Returns:
bool: True if simulator executable is available
"""
# TODO: HACK FROM THE DEPTHS OF DESPAIR AS AER DOES NOT WORK ON MAC
if sys.platform == 'darwin':
return False
try:
import qiskit.providers.aer # pylint: disable=unused-import
except ImportError:
return False
return True
def requires_aer_provider(test_item):
"""Decorator that skips test if qiskit aer provider is not available
Args:
test_item (callable): function or class to be decorated.
Returns:
callable: the decorated function.
"""
reason = 'Aer provider not found, skipping test'
return unittest.skipIf(not is_aer_provider_available(), reason)(test_item)
def slow_test(func):
"""Decorator that signals that the test takes minutes to run.
Args:
func (callable): test function to be decorated.
Returns:
callable: the decorated function.
"""
@functools.wraps(func)
def _wrapper(*args, **kwargs):
skip_slow = not TEST_OPTIONS['run_slow']
if skip_slow:
raise unittest.SkipTest('Skipping slow tests')
return func(*args, **kwargs)
return _wrapper
def _get_credentials():
"""Finds the credentials for a specific test and options.
Returns:
Credentials: set of credentials
Raises:
SkipTest: when credentials can't be found
"""
try:
from qiskit.providers.ibmq.credentials import (Credentials,
discover_credentials)
except ImportError:
raise unittest.SkipTest('qiskit-ibmq-provider could not be found, '
'and is required for executing online tests.')
if os.getenv('IBMQ_TOKEN') and os.getenv('IBMQ_URL'):
return Credentials(os.getenv('IBMQ_TOKEN'), os.getenv('IBMQ_URL'))
elif os.getenv('QISKIT_TESTS_USE_CREDENTIALS_FILE'):
# Attempt to read the standard credentials.
discovered_credentials = discover_credentials()
if discovered_credentials:
# Decide which credentials to use for testing.
if len(discovered_credentials) > 1:
raise unittest.SkipTest(
"More than 1 credential set found, use: "
"IBMQ_TOKEN and IBMQ_URL env variables to "
"set credentials explicitly")
# Use the first available credentials.
return list(discovered_credentials.values())[0]
raise unittest.SkipTest(
'No IBMQ credentials found for running the test. This is required for '
'running online tests.')
def requires_qe_access(func):
"""Deprecated in favor of `online_test`"""
warn("`requires_qe_access` is going to be replaced in favor of `online_test`",
DeprecationWarning)
@functools.wraps(func)
def _wrapper(self, *args, **kwargs):
if TEST_OPTIONS['skip_online']:
raise unittest.SkipTest('Skipping online tests')
credentials = _get_credentials()
self.using_ibmq_credentials = credentials.is_ibmq()
kwargs.update({'qe_token': credentials.token,
'qe_url': credentials.url})
return func(self, *args, **kwargs)
return _wrapper
def online_test(func):
"""Decorator that signals that the test uses the network (and the online API):
It involves:
* determines if the test should be skipped by checking environment
variables.
* if the `USE_ALTERNATE_ENV_CREDENTIALS` environment variable is
set, it reads the credentials from an alternative set of environment
variables.
* if the test is not skipped, it reads `qe_token` and `qe_url` from
`Qconfig.py`, environment variables or qiskitrc.
* if the test is not skipped, it appends `qe_token` and `qe_url` as
arguments to the test function.
Args:
func (callable): test function to be decorated.
Returns:
callable: the decorated function.
"""
@functools.wraps(func)
def _wrapper(self, *args, **kwargs):
# To avoid checking the connection in each test
global HAS_NET_CONNECTION # pylint: disable=global-statement
if TEST_OPTIONS['skip_online']:
raise unittest.SkipTest('Skipping online tests')
if HAS_NET_CONNECTION is None:
HAS_NET_CONNECTION = _has_connection('qiskit.org', 443)
if not HAS_NET_CONNECTION:
raise unittest.SkipTest("Test requires internet connection.")
credentials = _get_credentials()
self.using_ibmq_credentials = credentials.is_ibmq()
kwargs.update({'qe_token': credentials.token,
'qe_url': credentials.url})
return func(self, *args, **kwargs)
return _wrapper
TEST_OPTIONS = get_test_options()
| 31.56044 | 82 | 0.657556 |
import functools
import os
import sys
import unittest
from warnings import warn
from qiskit.util import _has_connection
from .testing_options import get_test_options
HAS_NET_CONNECTION = None
def is_aer_provider_available():
if sys.platform == 'darwin':
return False
try:
import qiskit.providers.aer except ImportError:
return False
return True
def requires_aer_provider(test_item):
reason = 'Aer provider not found, skipping test'
return unittest.skipIf(not is_aer_provider_available(), reason)(test_item)
def slow_test(func):
@functools.wraps(func)
def _wrapper(*args, **kwargs):
skip_slow = not TEST_OPTIONS['run_slow']
if skip_slow:
raise unittest.SkipTest('Skipping slow tests')
return func(*args, **kwargs)
return _wrapper
def _get_credentials():
try:
from qiskit.providers.ibmq.credentials import (Credentials,
discover_credentials)
except ImportError:
raise unittest.SkipTest('qiskit-ibmq-provider could not be found, '
'and is required for executing online tests.')
if os.getenv('IBMQ_TOKEN') and os.getenv('IBMQ_URL'):
return Credentials(os.getenv('IBMQ_TOKEN'), os.getenv('IBMQ_URL'))
elif os.getenv('QISKIT_TESTS_USE_CREDENTIALS_FILE'):
discovered_credentials = discover_credentials()
if discovered_credentials:
if len(discovered_credentials) > 1:
raise unittest.SkipTest(
"More than 1 credential set found, use: "
"IBMQ_TOKEN and IBMQ_URL env variables to "
"set credentials explicitly")
return list(discovered_credentials.values())[0]
raise unittest.SkipTest(
'No IBMQ credentials found for running the test. This is required for '
'running online tests.')
def requires_qe_access(func):
warn("`requires_qe_access` is going to be replaced in favor of `online_test`",
DeprecationWarning)
@functools.wraps(func)
def _wrapper(self, *args, **kwargs):
if TEST_OPTIONS['skip_online']:
raise unittest.SkipTest('Skipping online tests')
credentials = _get_credentials()
self.using_ibmq_credentials = credentials.is_ibmq()
kwargs.update({'qe_token': credentials.token,
'qe_url': credentials.url})
return func(self, *args, **kwargs)
return _wrapper
def online_test(func):
@functools.wraps(func)
def _wrapper(self, *args, **kwargs):
global HAS_NET_CONNECTION
if TEST_OPTIONS['skip_online']:
raise unittest.SkipTest('Skipping online tests')
if HAS_NET_CONNECTION is None:
HAS_NET_CONNECTION = _has_connection('qiskit.org', 443)
if not HAS_NET_CONNECTION:
raise unittest.SkipTest("Test requires internet connection.")
credentials = _get_credentials()
self.using_ibmq_credentials = credentials.is_ibmq()
kwargs.update({'qe_token': credentials.token,
'qe_url': credentials.url})
return func(self, *args, **kwargs)
return _wrapper
TEST_OPTIONS = get_test_options()
| true | true |
1c4a1166d51808988f81418c99b6254b81daae63 | 4,071 | py | Python | alipay/aop/api/request/KoubeiServindustryPortfolioDataCreateRequest.py | articuly/alipay-sdk-python-all | 0259cd28eca0f219b97dac7f41c2458441d5e7a6 | [
"Apache-2.0"
] | null | null | null | alipay/aop/api/request/KoubeiServindustryPortfolioDataCreateRequest.py | articuly/alipay-sdk-python-all | 0259cd28eca0f219b97dac7f41c2458441d5e7a6 | [
"Apache-2.0"
] | null | null | null | alipay/aop/api/request/KoubeiServindustryPortfolioDataCreateRequest.py | articuly/alipay-sdk-python-all | 0259cd28eca0f219b97dac7f41c2458441d5e7a6 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import simplejson as json
from alipay.aop.api.FileItem import FileItem
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.KoubeiServindustryPortfolioDataCreateModel import KoubeiServindustryPortfolioDataCreateModel
class KoubeiServindustryPortfolioDataCreateRequest(object):
def __init__(self, biz_model=None):
self._biz_model = biz_model
self._biz_content = None
self._version = "1.0"
self._terminal_type = None
self._terminal_info = None
self._prod_code = None
self._notify_url = None
self._return_url = None
self._udf_params = None
self._need_encrypt = False
@property
def biz_model(self):
return self._biz_model
@biz_model.setter
def biz_model(self, value):
self._biz_model = value
@property
def biz_content(self):
return self._biz_content
@biz_content.setter
def biz_content(self, value):
if isinstance(value, KoubeiServindustryPortfolioDataCreateModel):
self._biz_content = value
else:
self._biz_content = KoubeiServindustryPortfolioDataCreateModel.from_alipay_dict(value)
@property
def version(self):
return self._version
@version.setter
def version(self, value):
self._version = value
@property
def terminal_type(self):
return self._terminal_type
@terminal_type.setter
def terminal_type(self, value):
self._terminal_type = value
@property
def terminal_info(self):
return self._terminal_info
@terminal_info.setter
def terminal_info(self, value):
self._terminal_info = value
@property
def prod_code(self):
return self._prod_code
@prod_code.setter
def prod_code(self, value):
self._prod_code = value
@property
def notify_url(self):
return self._notify_url
@notify_url.setter
def notify_url(self, value):
self._notify_url = value
@property
def return_url(self):
return self._return_url
@return_url.setter
def return_url(self, value):
self._return_url = value
@property
def udf_params(self):
return self._udf_params
@udf_params.setter
def udf_params(self, value):
if not isinstance(value, dict):
return
self._udf_params = value
@property
def need_encrypt(self):
return self._need_encrypt
@need_encrypt.setter
def need_encrypt(self, value):
self._need_encrypt = value
def add_other_text_param(self, key, value):
if not self.udf_params:
self.udf_params = dict()
self.udf_params[key] = value
def get_params(self):
params = dict()
params[P_METHOD] = 'koubei.servindustry.portfolio.data.create'
params[P_VERSION] = self.version
if self.biz_model:
params[P_BIZ_CONTENT] = json.dumps(obj=self.biz_model.to_alipay_dict(), use_decimal=True, ensure_ascii=False, sort_keys=True, separators=(',', ':'))
if self.biz_content:
if hasattr(self.biz_content, 'to_alipay_dict'):
params['biz_content'] = json.dumps(obj=self.biz_content.to_alipay_dict(), use_decimal=True, ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['biz_content'] = self.biz_content
if self.terminal_type:
params['terminal_type'] = self.terminal_type
if self.terminal_info:
params['terminal_info'] = self.terminal_info
if self.prod_code:
params['prod_code'] = self.prod_code
if self.notify_url:
params['notify_url'] = self.notify_url
if self.return_url:
params['return_url'] = self.return_url
if self.udf_params:
params.update(self.udf_params)
return params
def get_multipart_params(self):
multipart_params = dict()
return multipart_params
| 28.075862 | 166 | 0.651437 | import simplejson as json
from alipay.aop.api.FileItem import FileItem
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.KoubeiServindustryPortfolioDataCreateModel import KoubeiServindustryPortfolioDataCreateModel
class KoubeiServindustryPortfolioDataCreateRequest(object):
def __init__(self, biz_model=None):
self._biz_model = biz_model
self._biz_content = None
self._version = "1.0"
self._terminal_type = None
self._terminal_info = None
self._prod_code = None
self._notify_url = None
self._return_url = None
self._udf_params = None
self._need_encrypt = False
@property
def biz_model(self):
return self._biz_model
@biz_model.setter
def biz_model(self, value):
self._biz_model = value
@property
def biz_content(self):
return self._biz_content
@biz_content.setter
def biz_content(self, value):
if isinstance(value, KoubeiServindustryPortfolioDataCreateModel):
self._biz_content = value
else:
self._biz_content = KoubeiServindustryPortfolioDataCreateModel.from_alipay_dict(value)
@property
def version(self):
return self._version
@version.setter
def version(self, value):
self._version = value
@property
def terminal_type(self):
return self._terminal_type
@terminal_type.setter
def terminal_type(self, value):
self._terminal_type = value
@property
def terminal_info(self):
return self._terminal_info
@terminal_info.setter
def terminal_info(self, value):
self._terminal_info = value
@property
def prod_code(self):
return self._prod_code
@prod_code.setter
def prod_code(self, value):
self._prod_code = value
@property
def notify_url(self):
return self._notify_url
@notify_url.setter
def notify_url(self, value):
self._notify_url = value
@property
def return_url(self):
return self._return_url
@return_url.setter
def return_url(self, value):
self._return_url = value
@property
def udf_params(self):
return self._udf_params
@udf_params.setter
def udf_params(self, value):
if not isinstance(value, dict):
return
self._udf_params = value
@property
def need_encrypt(self):
return self._need_encrypt
@need_encrypt.setter
def need_encrypt(self, value):
self._need_encrypt = value
def add_other_text_param(self, key, value):
if not self.udf_params:
self.udf_params = dict()
self.udf_params[key] = value
def get_params(self):
params = dict()
params[P_METHOD] = 'koubei.servindustry.portfolio.data.create'
params[P_VERSION] = self.version
if self.biz_model:
params[P_BIZ_CONTENT] = json.dumps(obj=self.biz_model.to_alipay_dict(), use_decimal=True, ensure_ascii=False, sort_keys=True, separators=(',', ':'))
if self.biz_content:
if hasattr(self.biz_content, 'to_alipay_dict'):
params['biz_content'] = json.dumps(obj=self.biz_content.to_alipay_dict(), use_decimal=True, ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['biz_content'] = self.biz_content
if self.terminal_type:
params['terminal_type'] = self.terminal_type
if self.terminal_info:
params['terminal_info'] = self.terminal_info
if self.prod_code:
params['prod_code'] = self.prod_code
if self.notify_url:
params['notify_url'] = self.notify_url
if self.return_url:
params['return_url'] = self.return_url
if self.udf_params:
params.update(self.udf_params)
return params
def get_multipart_params(self):
multipart_params = dict()
return multipart_params
| true | true |
1c4a11f4340a879dad3ddc6e9c05271e621cc0f4 | 4,416 | py | Python | scripts/table4_run.py | nataliepopescu/osdi21-artifact | 6a268c90a8ce449256b5c290caeb7e0e3b9d7e5c | [
"MIT"
] | null | null | null | scripts/table4_run.py | nataliepopescu/osdi21-artifact | 6a268c90a8ce449256b5c290caeb7e0e3b9d7e5c | [
"MIT"
] | null | null | null | scripts/table4_run.py | nataliepopescu/osdi21-artifact | 6a268c90a8ce449256b5c290caeb7e0e3b9d7e5c | [
"MIT"
] | null | null | null | import os
import subprocess
import re
import time
from numpy import average
from ExpStats import runExpWithName
ROOT_PATH = os.path.dirname(os.path.realpath(__file__))
def parseThroughput(out):
try:
m = re.search(r'Requests/sec: ([0-9,.]+)', out)
# m = re.search(r'([0-9,]+) ns/iter', out)
s = m.group(1)
result = float(s.strip())
#s = s.replace(',', '')
#result = int(s)
except Exception:
print(out)
print("Run experiment failed")
return None
return result
def parseZola(out):
try:
m = re.search(r'Done in ([0-9]+)ms.', out)
# m = re.search(r'([0-9,]+) ns/iter', out)
s = m.group(1)
result = float(s.strip())
#s = s.replace(',', '')
#result = int(s)
except Exception:
print(out)
print("Run experiment failed")
return None
return result
def test_swc():
print("Testing swc")
os.chdir(ROOT_PATH + "/../benchmarks/swc")
safe_time, _, _ = runExpWithName("./test_bc-safe", None, 20, False)
unsafe_time, _, _ = runExpWithName("./test_bc-unsafe", None, 20, False)
perf_diff = (safe_time - unsafe_time) / unsafe_time
print("Performance difference of swc is: {:2.2%}".format(perf_diff))
def test_warp():
print("Testing warp")
os.chdir(ROOT_PATH + "/../benchmarks/warp")
out = subprocess.Popen([ROOT_PATH + '/runWarp.sh', 'safe'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out, _ = out.communicate()
out = out.decode("utf-8") # convert to string from bytes
safe_throughput = parseThroughput(out)
out = subprocess.Popen([ROOT_PATH + '/runWarp.sh', 'unsafe'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out, _ = out.communicate()
out = out.decode("utf-8") # convert to string from bytes
unsafe_throughput = parseThroughput(out)
if safe_throughput and unsafe_throughput:
perf_diff = (unsafe_throughput - safe_throughput) / unsafe_throughput
print("Performance difference of warp is: {:2.2%}".format(perf_diff))
def test_iron():
print("Testing iron")
os.chdir(ROOT_PATH + "/../benchmarks/iron")
out = subprocess.Popen([ROOT_PATH + '/runIron.sh', 'safe'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out, _ = out.communicate()
out = out.decode("utf-8") # convert to string from bytes
safe_throughput = parseThroughput(out)
out = subprocess.Popen([ROOT_PATH + '/runIron.sh', 'unsafe'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out, _ = out.communicate()
out = out.decode("utf-8") # convert to string from bytes
unsafe_throughput = parseThroughput(out)
if safe_throughput and unsafe_throughput:
perf_diff = (unsafe_throughput - safe_throughput) / unsafe_throughput
print("Performance difference of iron is: {:2.2%}".format(perf_diff))
def test_zola():
print("Testing zola")
os.chdir(ROOT_PATH + "/../benchmarks/zola")
time_list = []
for _ in range(100):
out = subprocess.Popen([ROOT_PATH + '/runZola.sh', 'safe'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out, _ = out.communicate()
out = out.decode("utf-8") # convert to string from bytes
time = parseZola(out)
time_list.append(time)
unsafe_time = average(time_list)
time_list = []
for _ in range(100):
out = subprocess.Popen([ROOT_PATH + '/runZola.sh', 'safe'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out, _ = out.communicate()
out = out.decode("utf-8") # convert to string from bytes
time = parseZola(out)
time_list.append(time)
safe_time = average(time_list)
perf_diff = (safe_time - unsafe_time) / unsafe_time
print("Performance difference of zola is: {:2.2%}".format(perf_diff))
def test_rustpython():
print("Testing RustPython")
os.chdir(ROOT_PATH + "/../benchmarks/RustPython")
arg = ROOT_PATH + "/../benchmarks/RustPython/benches/benchmarks/pystone.py"
safe_time, _, _ = runExpWithName("./test_bc-safe", arg, 10, False)
unsafe_time, _, _ = runExpWithName("./test_bc-unsafe", arg, 10, False)
perf_diff = (safe_time - unsafe_time) / unsafe_time
print("Performance difference of RustPython is: {:2.2%}".format(perf_diff))
os.chdir(ROOT_PATH + "/../benchmarks")
test_iron()
test_swc()
test_warp()
test_zola()
test_rustpython()
| 35.328 | 118 | 0.64538 | import os
import subprocess
import re
import time
from numpy import average
from ExpStats import runExpWithName
ROOT_PATH = os.path.dirname(os.path.realpath(__file__))
def parseThroughput(out):
try:
m = re.search(r'Requests/sec: ([0-9,.]+)', out)
s = m.group(1)
result = float(s.strip())
except Exception:
print(out)
print("Run experiment failed")
return None
return result
def parseZola(out):
try:
m = re.search(r'Done in ([0-9]+)ms.', out)
s = m.group(1)
result = float(s.strip())
except Exception:
print(out)
print("Run experiment failed")
return None
return result
def test_swc():
print("Testing swc")
os.chdir(ROOT_PATH + "/../benchmarks/swc")
safe_time, _, _ = runExpWithName("./test_bc-safe", None, 20, False)
unsafe_time, _, _ = runExpWithName("./test_bc-unsafe", None, 20, False)
perf_diff = (safe_time - unsafe_time) / unsafe_time
print("Performance difference of swc is: {:2.2%}".format(perf_diff))
def test_warp():
print("Testing warp")
os.chdir(ROOT_PATH + "/../benchmarks/warp")
out = subprocess.Popen([ROOT_PATH + '/runWarp.sh', 'safe'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out, _ = out.communicate()
out = out.decode("utf-8") safe_throughput = parseThroughput(out)
out = subprocess.Popen([ROOT_PATH + '/runWarp.sh', 'unsafe'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out, _ = out.communicate()
out = out.decode("utf-8") unsafe_throughput = parseThroughput(out)
if safe_throughput and unsafe_throughput:
perf_diff = (unsafe_throughput - safe_throughput) / unsafe_throughput
print("Performance difference of warp is: {:2.2%}".format(perf_diff))
def test_iron():
print("Testing iron")
os.chdir(ROOT_PATH + "/../benchmarks/iron")
out = subprocess.Popen([ROOT_PATH + '/runIron.sh', 'safe'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out, _ = out.communicate()
out = out.decode("utf-8") safe_throughput = parseThroughput(out)
out = subprocess.Popen([ROOT_PATH + '/runIron.sh', 'unsafe'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out, _ = out.communicate()
out = out.decode("utf-8") unsafe_throughput = parseThroughput(out)
if safe_throughput and unsafe_throughput:
perf_diff = (unsafe_throughput - safe_throughput) / unsafe_throughput
print("Performance difference of iron is: {:2.2%}".format(perf_diff))
def test_zola():
print("Testing zola")
os.chdir(ROOT_PATH + "/../benchmarks/zola")
time_list = []
for _ in range(100):
out = subprocess.Popen([ROOT_PATH + '/runZola.sh', 'safe'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out, _ = out.communicate()
out = out.decode("utf-8") time = parseZola(out)
time_list.append(time)
unsafe_time = average(time_list)
time_list = []
for _ in range(100):
out = subprocess.Popen([ROOT_PATH + '/runZola.sh', 'safe'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out, _ = out.communicate()
out = out.decode("utf-8") time = parseZola(out)
time_list.append(time)
safe_time = average(time_list)
perf_diff = (safe_time - unsafe_time) / unsafe_time
print("Performance difference of zola is: {:2.2%}".format(perf_diff))
def test_rustpython():
print("Testing RustPython")
os.chdir(ROOT_PATH + "/../benchmarks/RustPython")
arg = ROOT_PATH + "/../benchmarks/RustPython/benches/benchmarks/pystone.py"
safe_time, _, _ = runExpWithName("./test_bc-safe", arg, 10, False)
unsafe_time, _, _ = runExpWithName("./test_bc-unsafe", arg, 10, False)
perf_diff = (safe_time - unsafe_time) / unsafe_time
print("Performance difference of RustPython is: {:2.2%}".format(perf_diff))
os.chdir(ROOT_PATH + "/../benchmarks")
test_iron()
test_swc()
test_warp()
test_zola()
test_rustpython()
| true | true |
1c4a1240142afa6606892ba452cdab5d9517361e | 396 | py | Python | apps/goods/migrations/0013_auto_20181010_1629.py | lianxiaopang/camel-store-api | b8021250bf3d8cf7adc566deebdba55225148316 | [
"Apache-2.0"
] | 12 | 2020-02-01T01:52:01.000Z | 2021-04-28T15:06:43.000Z | apps/goods/migrations/0013_auto_20181010_1629.py | lianxiaopang/camel-store-api | b8021250bf3d8cf7adc566deebdba55225148316 | [
"Apache-2.0"
] | 5 | 2020-02-06T08:07:58.000Z | 2020-06-02T13:03:45.000Z | apps/goods/migrations/0013_auto_20181010_1629.py | lianxiaopang/camel-store-api | b8021250bf3d8cf7adc566deebdba55225148316 | [
"Apache-2.0"
] | 11 | 2020-02-03T13:07:46.000Z | 2020-11-29T01:44:06.000Z | # Generated by Django 2.1.2 on 2018-10-10 08:29
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('goods', '0012_auto_20181010_1558'),
]
operations = [
migrations.AlterModelOptions(
name='goods',
options={'ordering': ('status',), 'verbose_name': '商品', 'verbose_name_plural': '商品'},
),
]
| 22 | 97 | 0.598485 |
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('goods', '0012_auto_20181010_1558'),
]
operations = [
migrations.AlterModelOptions(
name='goods',
options={'ordering': ('status',), 'verbose_name': '商品', 'verbose_name_plural': '商品'},
),
]
| true | true |
1c4a12fad6dad46559aac33e6d98b82995bb2863 | 4,099 | py | Python | src/one_stack.py | noemiefedon/RELAY | 1bf9c27ee1bcf1be0a7652fcca0ea38dd47b14b8 | [
"MIT"
] | 1 | 2020-12-07T22:18:22.000Z | 2020-12-07T22:18:22.000Z | src/one_stack.py | noemiefedon/RELAY | 1bf9c27ee1bcf1be0a7652fcca0ea38dd47b14b8 | [
"MIT"
] | null | null | null | src/one_stack.py | noemiefedon/RELAY | 1bf9c27ee1bcf1be0a7652fcca0ea38dd47b14b8 | [
"MIT"
] | 1 | 2021-12-02T22:19:02.000Z | 2021-12-02T22:19:02.000Z | # -*- coding: utf-8 -*-
"""
Functions to check a design manufacturability
- check_ss_manufacturability
checks the manufacturability of a stacking sequence list
"""
__version__ = '1.0'
__author__ = 'Noemie Fedon'
import sys
import numpy as np
sys.path.append(r'C:\RELAY')
from src.contiguity import is_contig
from src.disorientation import is_diso_ss
from src.balance import is_balanced
from src.dam_tol import is_dam_tol
from src.ten_percent_rule import is_ten_percent_rule
from src.lp_functions_2 import calc_lampamA
from src.constraints import Constraints
from src.pretty_print import print_ss
def check_ss_manufacturability(
ss, constraints, no_ipo_check=False, no_bal_check=False,
equality_45_135=False, equality_0_90=False, n_plies=None):
"""
checks the manufacturability of a stacking sequence list
"""
if n_plies is not None and ss.size != n_plies:
raise Exception("Wrong number of plies")
if constraints.dam_tol:
if not is_dam_tol(ss, constraints):
print_ss(ss)
raise Exception("Damage tolerance constraint not satisfied")
if not no_bal_check and constraints.bal:
if not is_balanced(ss, constraints):
raise Exception("Balance constraint not satisfied")
if not no_ipo_check and constraints.ipo:
lampamA = calc_lampamA(ss, constraints)
if (abs(lampamA[2:4]) > 1e-10).any():
print_ss(ss)
print('lampamA', lampamA)
# print('ipo')
raise Exception("In plane orthotropy constraint not satisfied")
if constraints.diso:
if hasattr(constraints, 'dam_tol_rule'):
if not is_diso_ss(ss, constraints.delta_angle,
constraints.dam_tol, constraints.dam_tol_rule):
raise Exception("Disorientation constraint not satisfied")
else:
if not is_diso_ss(ss, constraints.delta_angle,
constraints.dam_tol, constraints.n_plies_dam_tol):
raise Exception("Disorientation constraint not satisfied")
if constraints.contig:
if not is_contig(ss, constraints.n_contig):
raise Exception("Contiguity constraint not satisfied")
if constraints.rule_10_percent:
if not is_ten_percent_rule(
constraints, stack=ss,
equality_45_135=equality_45_135,
equality_0_90=equality_0_90):
raise Exception("10% rule not satisfied")
return 0
if __name__ == "__main__":
print('\n*** Test for the function check_ss_manufacturability ***')
constraints = Constraints(
sym=True,
bal=True,
ipo=True,
oopo=False,
dam_tol=False,
rule_10_percent=True,
percent_0=10,
percent_45=0,
percent_90=10,
percent_135=0,
percent_45_135=10,
diso=True,
contig=True,
n_contig=5,
delta_angle=45,
set_of_angles=np.array([0, 45, -45, 90]))
ss = np.array([ 0, 0, 0, 0, 0, -45, 0, 0, 0, 0, 0, -45, 0, 0, 0, 0, 0, -45, 0, 0, 0, 0, 0, -45, 0, 0, 0, 0, 0, 45, 0, 0, 0, 45, 0, 0, 0, 0, 45, 90, 90, 90, 90, 90, -45, 90, 90, 90, 90, 45, 0, 0, 0, 0, 0, 45, 0, 0, 0, 0, -45, 90, 90, 90, 90, 90, -45, 90, 90, 90, 90, 90, 45, 0, 0, 45, 90, 90, 90, 90, 45, 0, 0, 0, 0, 0, -45, 0, 0, 0, 0, 0, 45, 90, 90, 90, -45, 0, 0, 45, 45, 0, 0, -45, 90, 90, 90, 45, 0, 0, 0, 0, 0, -45, 0, 0, 0, 0, 0, 45, 90, 90, 90, 90, 45, 0, 0, 45, 90, 90, 90, 90, 90, -45, 90, 90, 90, 90, 90, -45, 0, 0, 0, 0, 45, 0, 0, 0, 0, 0, 45, 90, 90, 90, 90, -45, 90, 90, 90, 90, 90, 45, 0, 0, 0, 0, 45, 0, 0, 0, 45, 0, 0, 0, 0, 0, -45, 0, 0, 0, 0, 0, -45, 0, 0, 0, 0, 0, -45, 0, 0, 0, 0, 0, -45, 0, 0, 0, 0, 0], int)
check_ss_manufacturability(ss, constraints)
| 42.257732 | 1,024 | 0.56648 | __version__ = '1.0'
__author__ = 'Noemie Fedon'
import sys
import numpy as np
sys.path.append(r'C:\RELAY')
from src.contiguity import is_contig
from src.disorientation import is_diso_ss
from src.balance import is_balanced
from src.dam_tol import is_dam_tol
from src.ten_percent_rule import is_ten_percent_rule
from src.lp_functions_2 import calc_lampamA
from src.constraints import Constraints
from src.pretty_print import print_ss
def check_ss_manufacturability(
ss, constraints, no_ipo_check=False, no_bal_check=False,
equality_45_135=False, equality_0_90=False, n_plies=None):
if n_plies is not None and ss.size != n_plies:
raise Exception("Wrong number of plies")
if constraints.dam_tol:
if not is_dam_tol(ss, constraints):
print_ss(ss)
raise Exception("Damage tolerance constraint not satisfied")
if not no_bal_check and constraints.bal:
if not is_balanced(ss, constraints):
raise Exception("Balance constraint not satisfied")
if not no_ipo_check and constraints.ipo:
lampamA = calc_lampamA(ss, constraints)
if (abs(lampamA[2:4]) > 1e-10).any():
print_ss(ss)
print('lampamA', lampamA)
raise Exception("In plane orthotropy constraint not satisfied")
if constraints.diso:
if hasattr(constraints, 'dam_tol_rule'):
if not is_diso_ss(ss, constraints.delta_angle,
constraints.dam_tol, constraints.dam_tol_rule):
raise Exception("Disorientation constraint not satisfied")
else:
if not is_diso_ss(ss, constraints.delta_angle,
constraints.dam_tol, constraints.n_plies_dam_tol):
raise Exception("Disorientation constraint not satisfied")
if constraints.contig:
if not is_contig(ss, constraints.n_contig):
raise Exception("Contiguity constraint not satisfied")
if constraints.rule_10_percent:
if not is_ten_percent_rule(
constraints, stack=ss,
equality_45_135=equality_45_135,
equality_0_90=equality_0_90):
raise Exception("10% rule not satisfied")
return 0
if __name__ == "__main__":
print('\n*** Test for the function check_ss_manufacturability ***')
constraints = Constraints(
sym=True,
bal=True,
ipo=True,
oopo=False,
dam_tol=False,
rule_10_percent=True,
percent_0=10,
percent_45=0,
percent_90=10,
percent_135=0,
percent_45_135=10,
diso=True,
contig=True,
n_contig=5,
delta_angle=45,
set_of_angles=np.array([0, 45, -45, 90]))
ss = np.array([ 0, 0, 0, 0, 0, -45, 0, 0, 0, 0, 0, -45, 0, 0, 0, 0, 0, -45, 0, 0, 0, 0, 0, -45, 0, 0, 0, 0, 0, 45, 0, 0, 0, 45, 0, 0, 0, 0, 45, 90, 90, 90, 90, 90, -45, 90, 90, 90, 90, 45, 0, 0, 0, 0, 0, 45, 0, 0, 0, 0, -45, 90, 90, 90, 90, 90, -45, 90, 90, 90, 90, 90, 45, 0, 0, 45, 90, 90, 90, 90, 45, 0, 0, 0, 0, 0, -45, 0, 0, 0, 0, 0, 45, 90, 90, 90, -45, 0, 0, 45, 45, 0, 0, -45, 90, 90, 90, 45, 0, 0, 0, 0, 0, -45, 0, 0, 0, 0, 0, 45, 90, 90, 90, 90, 45, 0, 0, 45, 90, 90, 90, 90, 90, -45, 90, 90, 90, 90, 90, -45, 0, 0, 0, 0, 45, 0, 0, 0, 0, 0, 45, 90, 90, 90, 90, -45, 90, 90, 90, 90, 90, 45, 0, 0, 0, 0, 45, 0, 0, 0, 45, 0, 0, 0, 0, 0, -45, 0, 0, 0, 0, 0, -45, 0, 0, 0, 0, 0, -45, 0, 0, 0, 0, 0, -45, 0, 0, 0, 0, 0], int)
check_ss_manufacturability(ss, constraints)
| true | true |
1c4a1315d41ea7313fba33ea72615b897facc135 | 2,937 | py | Python | bayesian/__init__.py | prashbnair/fabric8-analytics-server | af1f71018b82ebafae7701a443412eed34de5a15 | [
"Apache-2.0"
] | null | null | null | bayesian/__init__.py | prashbnair/fabric8-analytics-server | af1f71018b82ebafae7701a443412eed34de5a15 | [
"Apache-2.0"
] | null | null | null | bayesian/__init__.py | prashbnair/fabric8-analytics-server | af1f71018b82ebafae7701a443412eed34de5a15 | [
"Apache-2.0"
] | null | null | null | """Module with the declaration of web application and its basic endpoints."""
import logging
import os
from f8a_worker.setup_celery import init_selinon
from flask import Flask
from flask import g
from flask import redirect
from flask import url_for
from flask_appconfig import AppConfig
from flask_cache import Cache
from flask_sqlalchemy import SQLAlchemy
from raven.contrib.flask import Sentry
def setup_logging(app):
"""Set up logger, the log level is read from the environment variable."""
if not app.debug:
handler = logging.StreamHandler()
log_level = os.environ.get('FLASK_LOGGING_LEVEL', logging.getLevelName(logging.WARNING))
handler.setLevel(log_level)
app.logger.addHandler(handler)
# we must initialize DB here to not create import loop with .auth...
# flask really sucks at this
rdb = SQLAlchemy()
cache = Cache(config={'CACHE_TYPE': 'simple'})
def create_app(configfile=None):
"""Create the web application and define basic endpoints."""
# do the imports here to not shadow e.g. "import bayesian.frontend.api_v1"
# by Blueprint imported here
from bayesian.api_v1 import api_v1
from bayesian.api.api_v2 import api_v2
from bayesian.api.user_api import user_api
from bayesian.utils import JSONEncoderWithExtraTypes
app = Flask(__name__)
AppConfig(app, configfile)
cache.init_app(app)
# actually init the DB with config values now
rdb.init_app(app)
app.rdb = rdb
# We need JSON encoder that can serialize datetime.datetime
app.json_encoder = JSONEncoderWithExtraTypes
app.register_blueprint(api_v1)
app.register_blueprint(api_v2)
app.register_blueprint(user_api)
# Redirect to latest API version if /api is accessed
app.route('/api')(lambda: redirect(url_for('api_v1.apiendpoints__slashless')))
# Likewise for base URL, and make that accessible by name
# Configure CORS.
from flask_cors import CORS
CORS(app, resources={r"/api/*": {"origins": "*"}})
@app.route('/')
def base_url():
return redirect(url_for('api_v1.apiendpoints__slashless'))
setup_logging(app)
@app.before_request
def set_current_user():
g.current_user = None
@app.after_request
def access_control_allow_origin(response):
response.headers["Access-Control-Allow-Origin"] = "*"
response.headers["Access-Control-Allow-Headers"] = "authorization, content-type, " \
"x-3scale-account-secret"
response.headers["Access-Control-Allow-Methods"] = "DELETE, GET, HEAD, OPTIONS, " \
"PATCH, POST, PUT"
response.headers["Allow"] = "GET, HEAD, OPTIONS, PATCH, POST, PUT"
return response
return app
init_selinon()
app = create_app()
SENTRY_DSN = os.environ.get("SENTRY_DSN", "")
sentry = Sentry(app, dsn=SENTRY_DSN, logging=True, level=logging.ERROR)
app.logger.info('App initialized, ready to roll...')
| 31.244681 | 96 | 0.712632 |
import logging
import os
from f8a_worker.setup_celery import init_selinon
from flask import Flask
from flask import g
from flask import redirect
from flask import url_for
from flask_appconfig import AppConfig
from flask_cache import Cache
from flask_sqlalchemy import SQLAlchemy
from raven.contrib.flask import Sentry
def setup_logging(app):
if not app.debug:
handler = logging.StreamHandler()
log_level = os.environ.get('FLASK_LOGGING_LEVEL', logging.getLevelName(logging.WARNING))
handler.setLevel(log_level)
app.logger.addHandler(handler)
rdb = SQLAlchemy()
cache = Cache(config={'CACHE_TYPE': 'simple'})
def create_app(configfile=None):
from bayesian.api_v1 import api_v1
from bayesian.api.api_v2 import api_v2
from bayesian.api.user_api import user_api
from bayesian.utils import JSONEncoderWithExtraTypes
app = Flask(__name__)
AppConfig(app, configfile)
cache.init_app(app)
rdb.init_app(app)
app.rdb = rdb
app.json_encoder = JSONEncoderWithExtraTypes
app.register_blueprint(api_v1)
app.register_blueprint(api_v2)
app.register_blueprint(user_api)
app.route('/api')(lambda: redirect(url_for('api_v1.apiendpoints__slashless')))
from flask_cors import CORS
CORS(app, resources={r"/api/*": {"origins": "*"}})
@app.route('/')
def base_url():
return redirect(url_for('api_v1.apiendpoints__slashless'))
setup_logging(app)
@app.before_request
def set_current_user():
g.current_user = None
@app.after_request
def access_control_allow_origin(response):
response.headers["Access-Control-Allow-Origin"] = "*"
response.headers["Access-Control-Allow-Headers"] = "authorization, content-type, " \
"x-3scale-account-secret"
response.headers["Access-Control-Allow-Methods"] = "DELETE, GET, HEAD, OPTIONS, " \
"PATCH, POST, PUT"
response.headers["Allow"] = "GET, HEAD, OPTIONS, PATCH, POST, PUT"
return response
return app
init_selinon()
app = create_app()
SENTRY_DSN = os.environ.get("SENTRY_DSN", "")
sentry = Sentry(app, dsn=SENTRY_DSN, logging=True, level=logging.ERROR)
app.logger.info('App initialized, ready to roll...')
| true | true |
1c4a13f112efc10a3c692c96f72757500976a370 | 688 | py | Python | leetcode/122/122.best-time-to-buy-and-sell-stock-ii.py | Yu-Ren-NEU/Leetcode | e82bc2734680606f676fe867dbcb9b9e71635bf5 | [
"MIT"
] | 1 | 2020-01-06T06:54:22.000Z | 2020-01-06T06:54:22.000Z | leetcode/122/122.best-time-to-buy-and-sell-stock-ii.py | Yu-Ren-NEU/Leetcode | e82bc2734680606f676fe867dbcb9b9e71635bf5 | [
"MIT"
] | 1 | 2020-01-07T02:22:06.000Z | 2020-01-07T02:22:06.000Z | leetcode/122/122.best-time-to-buy-and-sell-stock-ii.py | Yu-Ren-NEU/Leetcode | e82bc2734680606f676fe867dbcb9b9e71635bf5 | [
"MIT"
] | 2 | 2020-01-06T20:04:04.000Z | 2020-01-10T08:24:01.000Z | #
# @lc app=leetcode id=122 lang=python3
#
# [122] Best Time to Buy and Sell Stock II
#
# @lc code=start
class Solution:
def maxProfit(self, prices: []) -> int:
# 我们将数值画到x-y坐标系里
# 其实要求的就是单调上升的曲线, 找到相应的波峰
if not prices or len(prices) == 1:
return 0
result = 0
for i in range(1, len(prices)):
if prices[i] > prices[i-1]:
result += prices[i] - prices[i-1]
return result
def test(self):
assert(self.maxProfit([7,1,5,3,6,4]) == 7)
assert(self.maxProfit([1,2,3,4,5]) == 4)
assert(self.maxProfit([7,6,4,3,1]) == 0)
sol = Solution()
sol.test()
# @lc code=end
| 20.848485 | 50 | 0.52907 |
class Solution:
def maxProfit(self, prices: []) -> int:
if not prices or len(prices) == 1:
return 0
result = 0
for i in range(1, len(prices)):
if prices[i] > prices[i-1]:
result += prices[i] - prices[i-1]
return result
def test(self):
assert(self.maxProfit([7,1,5,3,6,4]) == 7)
assert(self.maxProfit([1,2,3,4,5]) == 4)
assert(self.maxProfit([7,6,4,3,1]) == 0)
sol = Solution()
sol.test()
| true | true |
1c4a14c58058bc5cfda3413c3de34fb336ef5ce8 | 1,418 | py | Python | sw/scripts/jlink.py | alvarop/chaac | a86d3c71acf3f87584ba260cbfe207b3a09213ad | [
"MIT"
] | 21 | 2019-01-27T03:15:27.000Z | 2021-07-03T06:40:40.000Z | sw/scripts/jlink.py | alvarop/chaac | a86d3c71acf3f87584ba260cbfe207b3a09213ad | [
"MIT"
] | 1 | 2021-07-08T04:46:42.000Z | 2021-07-08T04:46:42.000Z | sw/scripts/jlink.py | alvarop/chaac | a86d3c71acf3f87584ba260cbfe207b3a09213ad | [
"MIT"
] | 3 | 2019-02-19T19:57:30.000Z | 2020-07-11T11:27:51.000Z | #!/usr/bin/env python
import argparse
import os
import sys
import time
import tempfile
import subprocess
JLINK_PATH = "/usr/bin/JLinkExe"
parser = argparse.ArgumentParser()
parser.add_argument("--dump", action="store_true")
parser.add_argument("--erase", action="store_true")
parser.add_argument("--device", default="stm32l432kc", help="device name")
parser.add_argument("--addr", default=0, help="start addr")
parser.add_argument("--len", default=0x40000, help="read/write len")
parser.add_argument("--filename", help="filename")
args = parser.parse_args()
def write_line(file, line):
if line[-1] != "\n":
line += "\n"
file.write(line.encode("utf-8"))
with tempfile.NamedTemporaryFile(prefix="jlink", delete=False) as scriptfile:
write_line(scriptfile, "if swd")
write_line(scriptfile, "device {}".format(args.device))
write_line(scriptfile, "speed 4000")
write_line(scriptfile, "connect")
write_line(scriptfile, "halt")
if args.erase == True:
write_line(scriptfile, "erase")
elif args.dump == True:
write_line(
scriptfile,
"savebin {} 0x{:X} 0x{:X}".format(
args.filename, int(args.addr, 0), int(args.len, 0)
),
)
write_line(scriptfile, "go")
write_line(scriptfile, "exit")
command_file = scriptfile.name
if command_file:
subprocess.call([JLINK_PATH, command_file])
| 25.781818 | 77 | 0.665021 |
import argparse
import os
import sys
import time
import tempfile
import subprocess
JLINK_PATH = "/usr/bin/JLinkExe"
parser = argparse.ArgumentParser()
parser.add_argument("--dump", action="store_true")
parser.add_argument("--erase", action="store_true")
parser.add_argument("--device", default="stm32l432kc", help="device name")
parser.add_argument("--addr", default=0, help="start addr")
parser.add_argument("--len", default=0x40000, help="read/write len")
parser.add_argument("--filename", help="filename")
args = parser.parse_args()
def write_line(file, line):
if line[-1] != "\n":
line += "\n"
file.write(line.encode("utf-8"))
with tempfile.NamedTemporaryFile(prefix="jlink", delete=False) as scriptfile:
write_line(scriptfile, "if swd")
write_line(scriptfile, "device {}".format(args.device))
write_line(scriptfile, "speed 4000")
write_line(scriptfile, "connect")
write_line(scriptfile, "halt")
if args.erase == True:
write_line(scriptfile, "erase")
elif args.dump == True:
write_line(
scriptfile,
"savebin {} 0x{:X} 0x{:X}".format(
args.filename, int(args.addr, 0), int(args.len, 0)
),
)
write_line(scriptfile, "go")
write_line(scriptfile, "exit")
command_file = scriptfile.name
if command_file:
subprocess.call([JLINK_PATH, command_file])
| true | true |
1c4a14e406aa1e3b330cf49b14434163e125ec06 | 78,955 | py | Python | futu/quote/open_quote_context.py | postpascal/py-futu-api | cb274d5ab5387dca190b739d161f2bc8eabe073d | [
"Apache-2.0"
] | 1 | 2019-09-01T08:49:46.000Z | 2019-09-01T08:49:46.000Z | futu/quote/open_quote_context.py | faruto/py-futu-api | cb274d5ab5387dca190b739d161f2bc8eabe073d | [
"Apache-2.0"
] | null | null | null | futu/quote/open_quote_context.py | faruto/py-futu-api | cb274d5ab5387dca190b739d161f2bc8eabe073d | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Market quote and trade context setting
"""
import datetime
import math
from time import sleep
import pandas as pd
from futu.common.open_context_base import OpenContextBase, ContextStatus
from futu.quote.quote_query import *
class OpenQuoteContext(OpenContextBase):
"""行情上下文对象类"""
def __init__(self, host='127.0.0.1', port=11111):
"""
初始化Context对象
:param host: host地址
:param port: 端口
"""
self._ctx_subscribe = {}
super(OpenQuoteContext, self).__init__(host, port, True)
def close(self):
"""
关闭上下文对象。
.. code:: python
from futu import *
quote_ctx = OpenQuoteContext(host='127.0.0.1', port=11111)
quote_ctx.close()
"""
super(OpenQuoteContext, self).close()
def on_api_socket_reconnected(self):
"""for API socket reconnected"""
# auto subscriber
resub_count = 0
subtype_list = []
code_list = []
resub_dict = copy(self._ctx_subscribe)
subtype_all_cnt = len(resub_dict.keys())
subtype_cur_cnt = 0
ret_code = RET_OK
ret_msg = ''
for subtype in resub_dict.keys():
subtype_cur_cnt += 1
code_set = resub_dict[subtype]
code_list_new = [code for code in code_set]
if len(code_list_new) == 0:
continue
if len(code_list) == 0:
code_list = code_list_new
subtype_list = [subtype]
is_need_sub = False
if code_list == code_list_new:
if subtype not in subtype_list:
subtype_list.append(subtype) # 合并subtype请求
else:
ret_code, ret_msg = self._reconnect_subscribe(code_list, subtype_list)
logger.debug("reconnect subscribe code_count={} ret_code={} ret_msg={} subtype_list={} code_list={}".format(
len(code_list), ret_code, ret_msg, subtype_list, code_list))
if ret_code != RET_OK:
break
resub_count += len(code_list)
code_list = code_list_new
subtype_list = [subtype]
# 循环即将结束
if subtype_cur_cnt == subtype_all_cnt and len(code_list):
ret_code, ret_msg = self._reconnect_subscribe(code_list, subtype_list)
logger.debug("reconnect subscribe code_count={} ret_code={} ret_msg={} subtype_list={} code_list={}".format(len(code_list), ret_code, ret_msg, subtype_list, code_list))
if ret_code != RET_OK:
break
resub_count += len(code_list)
code_list = []
subtype_list = []
logger.debug("reconnect subscribe all code_count={} ret_code={} ret_msg={}".format(resub_count, ret_code, ret_msg))
# 重定阅失败,重连
if ret_code != RET_OK:
logger.error("reconnect subscribe error, close connect and retry!!")
self._status = ContextStatus.Start
self._wait_reconnect()
return ret_code, ret_msg
def get_trading_days(self, market, start=None, end=None):
"""获取交易日
:param market: 市场类型,Market_
:param start: 起始日期。例如'2018-01-01'。
:param end: 结束日期。例如'2018-01-01'。
start和end的组合如下:
========== ========== ========================================
start类型 end类型 说明
========== ========== ========================================
str str start和end分别为指定的日期
None str start为end往前365天
str None end为start往后365天
None None end为当前日期,start为end往前365天
========== ========== ========================================
:return: 成功时返回(RET_OK, data),data是[{'trade_date_type': 0, 'time': '2018-01-05'}]数组;失败时返回(RET_ERROR, data),其中data是错误描述字符串
"""
if market is None or is_str(market) is False:
error_str = ERROR_STR_PREFIX + "the type of market param is wrong"
return RET_ERROR, error_str
ret, msg, start, end = normalize_start_end_date(start, end, 365)
if ret != RET_OK:
return ret, msg
query_processor = self._get_sync_query_processor(
TradeDayQuery.pack_req, TradeDayQuery.unpack_rsp)
# the keys of kargs should be corresponding to the actual function arguments
kargs = {
'market': market,
'start_date': start,
'end_date': end,
'conn_id': self.get_sync_conn_id()
}
ret_code, msg, trade_day_list = query_processor(**kargs)
if ret_code != RET_OK:
return RET_ERROR, msg
return RET_OK, trade_day_list
def get_stock_basicinfo(self, market, stock_type=SecurityType.STOCK, code_list=None):
"""
获取指定市场中特定类型的股票基本信息
:param market: 市场类型,futu.common.constant.Market
:param stock_type: 股票类型, futu.common.constant.SecurityType
:param code_list: 如果不为None,应该是股票code的iterable类型,将只返回指定的股票信息
:return: (ret_code, content)
ret_code 等于RET_OK时, content为Pandas.DataFrame数据, 否则为错误原因字符串, 数据列格式如下
================= =========== ==============================================================================
参数 类型 说明
================= =========== ==============================================================================
code str 股票代码
name str 名字
lot_size int 每手数量
stock_type str 股票类型,参见SecurityType
stock_child_type str 涡轮子类型,参见WrtType
stock_owner str 所属正股的代码
option_type str 期权类型,Qot_Common.OptionType
strike_time str 行权日
strike_price float 行权价
suspension bool 是否停牌(True表示停牌)
listing_date str 上市时间
stock_id int 股票id
delisting bool 是否退市
================= =========== ==============================================================================
:example:
.. code-block:: python
from futu import *
quote_ctx = OpenQuoteContext(host='127.0.0.1', port=11111)
print(quote_ctx.get_stock_basicinfo(Market.HK, SecurityType.WARRANT))
print(quote_ctx.get_stock_basicinfo(Market.US, SecurityType.DRVT, 'US.AAPL210115C185000'))
quote_ctx.close()
"""
param_table = {'market': market, 'stock_type': stock_type}
for x in param_table:
param = param_table[x]
if param is None or is_str(param) is False:
error_str = ERROR_STR_PREFIX + "the type of %s param is wrong" % x
return RET_ERROR, error_str
if code_list is not None:
if is_str(code_list):
code_list = code_list.split(',')
elif isinstance(code_list, list):
pass
else:
return RET_ERROR, "code list must be like ['HK.00001', 'HK.00700'] or 'HK.00001,HK.00700'"
query_processor = self._get_sync_query_processor(
StockBasicInfoQuery.pack_req, StockBasicInfoQuery.unpack_rsp)
kargs = {
"market": market,
'stock_type': stock_type,
'code_list': code_list,
'conn_id': self.get_sync_conn_id()
}
ret_code, msg, basic_info_list = query_processor(**kargs)
if ret_code != RET_OK:
return ret_code, msg
col_list = [
'code', 'name', 'lot_size', 'stock_type', 'stock_child_type', 'stock_owner',
'option_type', 'strike_time', 'strike_price', 'suspension',
'listing_date', 'stock_id', 'delisting'
]
basic_info_table = pd.DataFrame(basic_info_list, columns=col_list)
return RET_OK, basic_info_table
def get_multiple_history_kline(self,
codelist,
start=None,
end=None,
ktype=KLType.K_DAY,
autype=AuType.QFQ):
"""
获取多只股票的本地历史k线数据
:param codelist: 股票代码列表,list或str。例如:['HK.00700', 'HK.00001'],'HK.00700,HK.00001'
:param start: 起始时间,例如'2017-06-20'
:param end: 结束时间, 例如'2017-07-20',start与end组合关系参见 get_history_kline_
:param ktype: k线类型,参见KLType
:param autype: 复权类型,参见AuType
:return: 成功时返回(RET_OK, [data]),data是DataFrame数据, 数据列格式如下
================= =========== ==============================================================================
参数 类型 说明
================= =========== ==============================================================================
code str 股票代码
time_key str k线时间
open float 开盘价
close float 收盘价
high float 最高价
low float 最低价
pe_ratio float 市盈率(该字段为比例字段,默认不展示%)
turnover_rate float 换手率
volume int 成交量
turnover float 成交额
change_rate float 涨跌幅
last_close float 昨收价
================= =========== ==============================================================================
失败时返回(RET_ERROR, data),其中data是错误描述字符串
"""
if is_str(codelist):
codelist = codelist.split(',')
elif isinstance(codelist, list):
pass
else:
return RET_ERROR, "code list must be like ['HK.00001', 'HK.00700'] or 'HK.00001,HK.00700'"
result = []
for code in codelist:
ret, data = self.get_history_kline(code, start, end, ktype, autype)
if ret != RET_OK:
return RET_ERROR, 'get history kline error: {}, {},{},{},{}'.format(data, code, start, end, ktype)
result.append(data)
return 0, result
def _get_history_kline_impl(self,
query_cls,
code,
start=None,
end=None,
ktype=KLType.K_DAY,
autype=AuType.QFQ,
fields=[KL_FIELD.ALL]
):
ret, msg, req_start, end = normalize_start_end_date(start, end, 365)
if ret != RET_OK:
return ret, msg
req_fields = unique_and_normalize_list(fields)
if not fields:
req_fields = copy(KL_FIELD.ALL_REAL)
req_fields = KL_FIELD.normalize_field_list(req_fields)
if not req_fields:
error_str = ERROR_STR_PREFIX + "the type of fields param is wrong"
return RET_ERROR, error_str
if autype is None:
autype = 'None'
param_table = {'code': code, 'ktype': ktype, 'autype': autype}
for x in param_table:
param = param_table[x]
if param is None or is_str(param) is False:
error_str = ERROR_STR_PREFIX + "the type of %s param is wrong" % x
return RET_ERROR, error_str
max_kl_num = 1000
data_finish = False
list_ret = []
# 循环请求数据,避免一次性取太多超时
while not data_finish:
kargs = {
"code": code,
"start_date": req_start,
"end_date": end,
"ktype": ktype,
"autype": autype,
"fields": copy(req_fields),
"max_num": max_kl_num,
"conn_id": self.get_sync_conn_id()
}
query_processor = self._get_sync_query_processor(query_cls.pack_req, query_cls.unpack_rsp)
ret_code, msg, content = query_processor(**kargs)
if ret_code != RET_OK:
return ret_code, msg
list_kline, has_next, next_time = content
data_finish = (not has_next) or (not next_time)
req_start = next_time
for dict_item in list_kline:
list_ret.append(dict_item)
# 表头列
col_list = ['code']
for field in req_fields:
str_field = KL_FIELD.DICT_KL_FIELD_STR[field]
if str_field not in col_list:
col_list.append(str_field)
kline_frame_table = pd.DataFrame(list_ret, columns=col_list)
return RET_OK, kline_frame_table
def get_history_kline(self,
code,
start=None,
end=None,
ktype=KLType.K_DAY,
autype=AuType.QFQ,
fields=[KL_FIELD.ALL]):
"""
得到本地历史k线,需先参照帮助文档下载k线
:param code: 股票代码
:param start: 开始时间,例如'2017-06-20'
:param end: 结束时间,例如'2017-06-30'
start和end的组合如下:
========== ========== ========================================
start类型 end类型 说明
========== ========== ========================================
str str start和end分别为指定的日期
None str start为end往前365天
str None end为start往后365天
None None end为当前日期,start为end往前365天
========== ========== ========================================
:param ktype: k线类型, 参见 KLType 定义
:param autype: 复权类型, 参见 AuType 定义
:param fields: 需返回的字段列表,参见 KL_FIELD 定义 KL_FIELD.ALL KL_FIELD.OPEN ....
:return: (ret, data)
ret == RET_OK 返回pd dataframe数据,data.DataFrame数据, 数据列格式如下
ret != RET_OK 返回错误字符串
================= =========== ==============================================================================
参数 类型 说明
================= =========== ==============================================================================
code str 股票代码
time_key str k线时间
open float 开盘价
close float 收盘价
high float 最高价
low float 最低价
pe_ratio float 市盈率(该字段为比例字段,默认不展示%)
turnover_rate float 换手率
volume int 成交量
turnover float 成交额
change_rate float 涨跌幅
last_close float 昨收价
================= =========== ==============================================================================
:example:
.. code:: python
from futu import *
quote_ctx = OpenQuoteContext(host='127.0.0.1', port=11111)
print(quote_ctx.get_history_kline('HK.00700', start='2017-06-20', end='2017-06-22'))
quote_ctx.close()
"""
return self._get_history_kline_impl(GetHistoryKlineQuery, code, start=start, end=end,
ktype=ktype, autype=autype, fields=fields)
def request_history_kline(self,
code,
start=None,
end=None,
ktype=KLType.K_DAY,
autype=AuType.QFQ,
fields=[KL_FIELD.ALL],
max_count=1000,
page_req_key=None):
"""
拉取历史k线,不需要先下载历史数据。
:param code: 股票代码
:param start: 开始时间,例如'2017-06-20'
:param end: 结束时间,例如'2017-07-20'。
start和end的组合如下:
========== ========== ========================================
start类型 end类型 说明
========== ========== ========================================
str str start和end分别为指定的日期
None str start为end往前365天
str None end为start往后365天
None None end为当前日期,start为end往前365天
========== ========== ========================================
:param ktype: k线类型, 参见 KLType 定义
:param autype: 复权类型, 参见 AuType 定义
:param fields: 需返回的字段列表,参见 KL_FIELD 定义 KL_FIELD.ALL KL_FIELD.OPEN ....
:param max_count: 本次请求最大返回的数据点个数,传None表示返回start和end之间所有的数据。
:param page_req_key: 分页请求的key。如果start和end之间的数据点多于max_count,那么后续请求时,要传入上次调用返回的page_req_key。初始请求时应该传None。
:return: (ret, data, page_req_key)
ret == RET_OK 返回pd dataframe数据,data.DataFrame数据, 数据列格式如下。page_req_key在分页请求时(即max_count>0)
可能返回,并且需要在后续的请求中传入。如果没有更多数据,page_req_key返回None。
ret != RET_OK 返回错误字符串
================= =========== ==============================================================================
参数 类型 说明
================= =========== ==============================================================================
code str 股票代码
time_key str k线时间
open float 开盘价
close float 收盘价
high float 最高价
low float 最低价
pe_ratio float 市盈率(该字段为比例字段,默认不展示%)
turnover_rate float 换手率
volume int 成交量
turnover float 成交额
change_rate float 涨跌幅
last_close float 昨收价
================= =========== ==============================================================================
:note
:example:
.. code:: python
from futu import *
quote_ctx = OpenQuoteContext(host='127.0.0.1', port=11111)
ret, data, page_req_key = quote_ctx.request_history_kline('HK.00700', start='2017-06-20', end='2018-06-22', max_count=50)
print(ret, data)
ret, data, page_req_key = quote_ctx.request_history_kline('HK.00700', start='2017-06-20', end='2018-06-22', max_count=50, page_req_key=page_req_key)
print(ret, data)
quote_ctx.close()
"""
next_page_req_key = None
ret, msg, req_start, end = normalize_start_end_date(start, end, 365)
if ret != RET_OK:
return ret, msg, next_page_req_key
req_fields = unique_and_normalize_list(fields)
if not fields:
req_fields = copy(KL_FIELD.ALL_REAL)
req_fields = KL_FIELD.normalize_field_list(req_fields)
if not req_fields:
error_str = ERROR_STR_PREFIX + "the type of fields param is wrong"
return RET_ERROR, error_str, next_page_req_key
if autype is None:
autype = 'None'
param_table = {'code': code, 'ktype': ktype, 'autype': autype}
for x in param_table:
param = param_table[x]
if param is None or is_str(param) is False:
error_str = ERROR_STR_PREFIX + "the type of %s param is wrong" % x
return RET_ERROR, error_str, next_page_req_key
max_kl_num = min(1000, max_count) if max_count is not None else 1000
data_finish = False
list_ret = []
# 循环请求数据,避免一次性取太多超时
while not data_finish:
kargs = {
"code": code,
"start_date": req_start,
"end_date": end,
"ktype": ktype,
"autype": autype,
"fields": copy(req_fields),
"max_num": max_kl_num,
"conn_id": self.get_sync_conn_id(),
"next_req_key": page_req_key
}
query_processor = self._get_sync_query_processor(RequestHistoryKlineQuery.pack_req,
RequestHistoryKlineQuery.unpack_rsp)
ret_code, msg, content = query_processor(**kargs)
if ret_code != RET_OK:
return ret_code, msg, next_page_req_key
list_kline, has_next, page_req_key = content
list_ret.extend(list_kline)
next_page_req_key = page_req_key
if max_count is not None:
if max_count > len(list_ret) and has_next:
data_finish = False
max_kl_num = min(max_count - len(list_ret), 1000)
else:
data_finish = True
else:
data_finish = not has_next
# 表头列
col_list = ['code']
for field in req_fields:
str_field = KL_FIELD.DICT_KL_FIELD_STR[field]
if str_field not in col_list:
col_list.append(str_field)
kline_frame_table = pd.DataFrame(list_ret, columns=col_list)
return RET_OK, kline_frame_table, next_page_req_key
def get_autype_list(self, code_list):
"""
获取给定股票列表的复权因子
:param code_list: 股票列表,例如['HK.00700']
:return: (ret, data)
ret == RET_OK 返回pd dataframe数据,data.DataFrame数据, 数据列格式如下
ret != RET_OK 返回错误字符串
===================== =========== =================================================================================
参数 类型 说明
===================== =========== =================================================================================
code str 股票代码
ex_div_date str 除权除息日
split_ratio float 拆合股比例(该字段为比例字段,默认不展示%),例如,对于5股合1股为1/5,对于1股拆5股为5/1
per_cash_div float 每股派现
per_share_div_ratio float 每股送股比例(该字段为比例字段,默认不展示%)
per_share_trans_ratio float 每股转增股比例(该字段为比例字段,默认不展示%)
allotment_ratio float 每股配股比例(该字段为比例字段,默认不展示%)
allotment_price float 配股价
stk_spo_ratio float 增发比例(该字段为比例字段,默认不展示%)
stk_spo_price float 增发价格
forward_adj_factorA float 前复权因子A
forward_adj_factorB float 前复权因子B
backward_adj_factorA float 后复权因子A
backward_adj_factorB float 后复权因子B
===================== =========== =================================================================================
"""
code_list = unique_and_normalize_list(code_list)
for code in code_list:
if code is None or is_str(code) is False:
error_str = ERROR_STR_PREFIX + "the type of param in code_list is wrong"
return RET_ERROR, error_str
query_processor = self._get_sync_query_processor(
ExrightQuery.pack_req, ExrightQuery.unpack_rsp)
kargs = {
"stock_list": code_list,
"conn_id": self.get_sync_conn_id()
}
ret_code, msg, exr_record = query_processor(**kargs)
if ret_code == RET_ERROR:
return ret_code, msg
col_list = [
'code', 'ex_div_date', 'split_ratio', 'per_cash_div',
'per_share_div_ratio', 'per_share_trans_ratio', 'allotment_ratio',
'allotment_price', 'stk_spo_ratio', 'stk_spo_price',
'forward_adj_factorA', 'forward_adj_factorB',
'backward_adj_factorA', 'backward_adj_factorB'
]
exr_frame_table = pd.DataFrame(exr_record, columns=col_list)
return RET_OK, exr_frame_table
def get_market_snapshot(self, code_list):
"""
获取市场快照
:param code_list: 股票列表
:return: (ret, data)
ret == RET_OK 返回pd dataframe数据,data.DataFrame数据, 数据列格式如下
ret != RET_OK 返回错误字符串
======================= ============= ==============================================================================
参数 类型 说明
======================= ============= ==============================================================================
code str 股票代码
update_time str 更新时间(yyyy-MM-dd HH:mm:ss),(美股默认是美东时间,港股A股默认是北京时间)
last_price float 最新价格
open_price float 今日开盘价
high_price float 最高价格
low_price float 最低价格
prev_close_price float 昨收盘价格
volume int 成交数量
turnover float 成交金额
turnover_rate float 换手率
suspension bool 是否停牌(True表示停牌)
listing_date str 上市日期 (yyyy-MM-dd)
equity_valid bool 是否正股(为true时以下正股相关字段才有合法数值)
issued_shares int 发行股本
total_market_val float 总市值
net_asset int 资产净值
net_profit int 净利润
earning_per_share float 每股盈利
outstanding_shares int 流通股本
net_asset_per_share float 每股净资产
circular_market_val float 流通市值
ey_ratio float 收益率(该字段为比例字段,默认不展示%)
pe_ratio float 市盈率(该字段为比例字段,默认不展示%)
pb_ratio float 市净率(该字段为比例字段,默认不展示%)
pe_ttm_ratio float 市盈率TTM(该字段为比例字段,默认不展示%)
stock_owner str 涡轮所属正股的代码或期权的标的股代码
wrt_valid bool 是否是窝轮(为true时以下涡轮相关的字段才有合法数据)
wrt_conversion_ratio float 换股比率(该字段为比例字段,默认不展示%)
wrt_type str 窝轮类型,参见WrtType
wrt_strike_price float 行使价格
wrt_maturity_date str 格式化窝轮到期时间
wrt_end_trade str 格式化窝轮最后交易时间
wrt_code str 窝轮对应的正股(此字段已废除,修改为stock_owner)
wrt_recovery_price float 窝轮回收价
wrt_street_vol float 窝轮街货量
wrt_issue_vol float 窝轮发行量
wrt_street_ratio float 窝轮街货占比(该字段为比例字段,默认不展示%)
wrt_delta float 窝轮对冲值
wrt_implied_volatility float 窝轮引伸波幅
wrt_premium float 窝轮溢价
lot_size int 每手股数
price_spread float 当前摆盘价差亦即摆盘数据的买档或卖档的相邻档位的报价差
option_valid bool 是否是期权(为true时以下期权相关的字段才有合法数值)
option_type str 期权类型,参见OptionType
strike_time str 行权日(美股默认是美东时间,港股A股默认是北京时间)
option_strike_price float 行权价
option_contract_size int 每份合约数
option_open_interest int 未平仓合约数
option_implied_volatility float 隐含波动率
option_premium float 溢价
option_delta float 希腊值 Delta
option_gamma float 希腊值 Gamma
option_vega float 希腊值 Vega
option_theta float 希腊值 Theta
option_rho float 希腊值 Rho
======================= ============= ==============================================================================
"""
code_list = unique_and_normalize_list(code_list)
if not code_list:
error_str = ERROR_STR_PREFIX + "the type of code param is wrong"
return RET_ERROR, error_str
query_processor = self._get_sync_query_processor(
MarketSnapshotQuery.pack_req, MarketSnapshotQuery.unpack_rsp)
kargs = {
"stock_list": code_list,
"conn_id": self.get_sync_conn_id()
}
ret_code, msg, snapshot_list = query_processor(**kargs)
if ret_code == RET_ERROR:
return ret_code, msg
equity_col_list = ['issued_shares',
'total_market_val',
'net_asset',
'net_profit',
'earning_per_share',
'outstanding_shares',
'circular_market_val',
'net_asset_per_share',
'ey_ratio',
'pe_ratio',
'pb_ratio',
'pe_ttm_ratio'
]
wrt_col_list = ['wrt_conversion_ratio',
'wrt_type',
'wrt_strike_price',
'wrt_maturity_date',
'wrt_end_trade',
'wrt_recovery_price',
'wrt_street_vol',
'wrt_issue_vol',
'wrt_street_ratio',
'wrt_delta',
'wrt_implied_volatility',
'wrt_premium'
]
option_col_list = ['option_type',
'strike_time',
'option_strike_price',
'option_contract_size',
'option_open_interest',
'option_implied_volatility',
'option_premium',
'option_delta',
'option_gamma',
'option_vega',
'option_theta',
'option_rho'
]
col_list = [
'code',
'update_time',
'last_price',
'open_price',
'high_price',
'low_price',
'prev_close_price',
'volume',
'turnover',
'turnover_rate',
'suspension',
'listing_date',
'lot_size',
'price_spread',
'stock_owner',
'ask_price',
'bid_price',
'ask_vol',
'bid_vol'
]
col_list.append('equity_valid')
col_list.extend(equity_col_list)
col_list.append('wrt_valid')
col_list.extend(wrt_col_list)
col_list.append('option_valid')
col_list.extend(option_col_list)
snapshot_frame_table = pd.DataFrame(snapshot_list, columns=col_list)
return RET_OK, snapshot_frame_table
def get_rt_data(self, code):
"""
获取指定股票的分时数据
:param code: 股票代码,例如,HK.00700,US.APPL
:return: (ret, data)
ret == RET_OK 返回pd dataframe数据,data.DataFrame数据, 数据列格式如下
ret != RET_OK 返回错误字符串
===================== =========== ==========================================================================
参数 类型 说明
===================== =========== ==========================================================================
code str 股票代码
time str 时间(yyyy-MM-dd HH:mm:ss)(美股默认是美东时间,港股A股默认是北京时间)
is_blank bool 数据状态;正常数据为False,伪造数据为True
opened_mins int 零点到当前多少分钟
cur_price float 当前价格
last_close float 昨天收盘的价格
avg_price float 平均价格
volume float 成交量
turnover float 成交金额
===================== =========== ==========================================================================
"""
if code is None or is_str(code) is False:
error_str = ERROR_STR_PREFIX + "the type of param in code is wrong"
return RET_ERROR, error_str
query_processor = self._get_sync_query_processor(
RtDataQuery.pack_req, RtDataQuery.unpack_rsp)
kargs = {
"code": code,
"conn_id": self.get_sync_conn_id()
}
ret_code, msg, rt_data_list = query_processor(**kargs)
if ret_code == RET_ERROR:
return ret_code, msg
for x in rt_data_list:
x['code'] = code
col_list = [
'code', 'time', 'is_blank', 'opened_mins', 'cur_price',
'last_close', 'avg_price', 'volume', 'turnover'
]
rt_data_table = pd.DataFrame(rt_data_list, columns=col_list)
return RET_OK, rt_data_table
def get_plate_list(self, market, plate_class):
"""
获取板块集合下的子板块列表
:param market: 市场标识,注意这里不区分沪,深,输入沪或者深都会返回沪深市场的子板块(这个是和客户端保持一致的)参见Market
:param plate_class: 板块分类,参见Plate
:return: ret == RET_OK 返回pd dataframe数据,data.DataFrame数据, 数据列格式如下
ret != RET_OK 返回错误字符串
===================== =========== ==============================================================
参数 类型 说明
===================== =========== ==============================================================
code str 股票代码
plate_name str 板块名字
plate_id str 板块id
===================== =========== ==============================================================
"""
param_table = {'market': market, 'plate_class': plate_class}
for x in param_table:
param = param_table[x]
if param is None or is_str(market) is False:
error_str = ERROR_STR_PREFIX + "the type of market param is wrong"
return RET_ERROR, error_str
if market not in MKT_MAP:
error_str = ERROR_STR_PREFIX + "the value of market param is wrong "
return RET_ERROR, error_str
if plate_class not in PLATE_CLASS_MAP:
error_str = ERROR_STR_PREFIX + "the class of plate is wrong"
return RET_ERROR, error_str
query_processor = self._get_sync_query_processor(
SubplateQuery.pack_req, SubplateQuery.unpack_rsp)
kargs = {
'market': market,
'plate_class': plate_class,
'conn_id': self.get_sync_conn_id()
}
ret_code, msg, subplate_list = query_processor(**kargs)
if ret_code == RET_ERROR:
return ret_code, msg
col_list = ['code', 'plate_name', 'plate_id']
subplate_frame_table = pd.DataFrame(subplate_list, columns=col_list)
return RET_OK, subplate_frame_table
def get_plate_stock(self, plate_code):
"""
获取特定板块下的股票列表
:param plate_code: 板块代码, string, 例如,”SH.BK0001”,”SH.BK0002”,先利用获取子版块列表函数获取子版块代码
:return: (ret, data)
ret == RET_OK 返回pd dataframe数据,data.DataFrame数据, 数据列格式如下
ret != RET_OK 返回错误字符串
===================== =========== ==============================================================
参数 类型 说明
===================== =========== ==============================================================
code str 股票代码
lot_size int 每手股数
stock_name str 股票名称
stock_owner str 所属正股的代码
stock_child_type str 股票子类型,参见WrtType
stock_type str 股票类型,参见SecurityType
list_time str 上市时间(美股默认是美东时间,港股A股默认是北京时间)
stock_id int 股票id
===================== =========== ==============================================================
"""
if plate_code is None or is_str(plate_code) is False:
error_str = ERROR_STR_PREFIX + "the type of code is wrong"
return RET_ERROR, error_str
query_processor = self._get_sync_query_processor(
PlateStockQuery.pack_req, PlateStockQuery.unpack_rsp)
kargs = {
"plate_code": plate_code,
"conn_id": self.get_sync_conn_id()
}
ret_code, msg, plate_stock_list = query_processor(**kargs)
if ret_code == RET_ERROR:
return ret_code, msg
col_list = [
'code', 'lot_size', 'stock_name', 'stock_owner',
'stock_child_type', 'stock_type', 'list_time', 'stock_id',
]
plate_stock_table = pd.DataFrame(plate_stock_list, columns=col_list)
return RET_OK, plate_stock_table
def get_broker_queue(self, code):
"""
获取股票的经纪队列
:param code: 股票代码
:return: (ret, bid_frame_table, ask_frame_table)或(ret, err_message)
ret == RET_OK 返回pd dataframe数据,数据列格式如下
ret != RET_OK 后面两项为错误字符串
bid_frame_table 经纪买盘数据
===================== =========== ==============================================================
参数 类型 说明
===================== =========== ==============================================================
code str 股票代码
bid_broker_id int 经纪买盘id
bid_broker_name str 经纪买盘名称
bid_broker_pos int 经纪档位
===================== =========== ==============================================================
ask_frame_table 经纪卖盘数据
===================== =========== ==============================================================
参数 类型 说明
===================== =========== ==============================================================
code str 股票代码
ask_broker_id int 经纪卖盘id
ask_broker_name str 经纪卖盘名称
ask_broker_pos int 经纪档位
===================== =========== ==============================================================
"""
if code is None or is_str(code) is False:
error_str = ERROR_STR_PREFIX + "the type of param in code is wrong"
return RET_ERROR, error_str
query_processor = self._get_sync_query_processor(
BrokerQueueQuery.pack_req, BrokerQueueQuery.unpack_rsp)
kargs = {
"code": code,
"conn_id": self.get_sync_conn_id()
}
ret_code, ret_msg, content = query_processor(**kargs)
if ret_code != RET_OK:
return ret_code, ret_msg, ret_msg
(_, bid_list, ask_list) = content
col_bid_list = [
'code', 'bid_broker_id', 'bid_broker_name', 'bid_broker_pos'
]
col_ask_list = [
'code', 'ask_broker_id', 'ask_broker_name', 'ask_broker_pos'
]
bid_frame_table = pd.DataFrame(bid_list, columns=col_bid_list)
ask_frame_table = pd.DataFrame(ask_list, columns=col_ask_list)
return RET_OK, bid_frame_table, ask_frame_table
def _check_subscribe_param(self, code_list, subtype_list):
code_list = unique_and_normalize_list(code_list)
subtype_list = unique_and_normalize_list(subtype_list)
if len(code_list) == 0:
msg = ERROR_STR_PREFIX + 'code_list is null'
return RET_ERROR, msg, code_list, subtype_list
if len(subtype_list) == 0:
msg = ERROR_STR_PREFIX + 'subtype_list is null'
return RET_ERROR, msg, code_list, subtype_list
for subtype in subtype_list:
if subtype not in SUBTYPE_MAP:
subtype_str = ','.join([x for x in SUBTYPE_MAP])
msg = ERROR_STR_PREFIX + 'subtype is %s , which is wrong. (%s)' % (
subtype, subtype_str)
return RET_ERROR, msg, code_list, subtype_list
for code in code_list:
ret, msg = split_stock_str(code)
if ret != RET_OK:
return RET_ERROR, msg, code_list, subtype_list
return RET_OK, "", code_list, subtype_list
def subscribe(self, code_list, subtype_list, is_first_push=True, subscribe_push=True):
"""
订阅注册需要的实时信息,指定股票和订阅的数据类型即可
注意:len(code_list) * 订阅的K线类型的数量 <= 100
:param code_list: 需要订阅的股票代码列表
:param subtype_list: 需要订阅的数据类型列表,参见SubType
:param is_first_push: 订阅成功后是否马上推送一次数据
:param subscribe_push: 订阅后不推送
:return: (ret, err_message)
ret == RET_OK err_message为None
ret != RET_OK err_message为错误描述字符串
:example:
.. code:: python
from futu import *
quote_ctx = OpenQuoteContext(host='127.0.0.1', port=11111)
print(quote_ctx.subscribe(['HK.00700'], [SubType.QUOTE)])
quote_ctx.close()
"""
return self._subscribe_impl(code_list, subtype_list, is_first_push, subscribe_push)
def _subscribe_impl(self, code_list, subtype_list, is_first_push, subscribe_push=True):
ret, msg, code_list, subtype_list = self._check_subscribe_param(code_list, subtype_list)
if ret != RET_OK:
return ret, msg
kline_sub_count = 0
for sub_type in subtype_list:
if sub_type in KLINE_SUBTYPE_LIST:
kline_sub_count += 1
# if kline_sub_count * len(code_list) > MAX_KLINE_SUB_COUNT:
# return RET_ERROR, 'Too many subscription'
query_processor = self._get_sync_query_processor(SubscriptionQuery.pack_subscribe_req,
SubscriptionQuery.unpack_subscribe_rsp)
kargs = {
'code_list': code_list,
'subtype_list': subtype_list,
'conn_id': self.get_sync_conn_id(),
'is_first_push': is_first_push,
'subscribe_push': subscribe_push
}
ret_code, msg, _ = query_processor(**kargs)
if ret_code != RET_OK:
return RET_ERROR, msg
for subtype in subtype_list:
if subtype not in self._ctx_subscribe:
self._ctx_subscribe[subtype] = set()
code_set = self._ctx_subscribe[subtype]
code_set.update(code_list)
#
# ret_code, msg, push_req_str = SubscriptionQuery.pack_push_req(
# code_list, subtype_list, self.get_async_conn_id(), is_first_push)
#
# if ret_code != RET_OK:
# return RET_ERROR, msg
#
# ret_code, msg = self._send_async_req(push_req_str)
# if ret_code != RET_OK:
# return RET_ERROR, msg
return RET_OK, None
def _reconnect_subscribe(self, code_list, subtype_list):
# 将k线定阅和其它定阅区分开来
kline_sub_list = []
other_sub_list = []
for sub in subtype_list:
if sub in KLINE_SUBTYPE_LIST:
kline_sub_list.append(sub)
else:
other_sub_list.append(sub)
# 连接断开时,可能会有大批股票需要重定阅,分次定阅,提高成功率
kline_sub_one_size = 1
if len(kline_sub_list) > 0:
kline_sub_one_size = math.floor(100 / len(kline_sub_list))
sub_info_list = [
{"sub_list": kline_sub_list, "one_size": kline_sub_one_size},
{"sub_list": other_sub_list, "one_size": 100},
]
ret_code = RET_OK
ret_data = None
for info in sub_info_list:
sub_list = info["sub_list"]
one_size = info["one_size"]
all_count = len(code_list)
start_idx = 0
while start_idx < all_count and len(sub_list):
sub_count = one_size if start_idx + one_size <= all_count else (all_count - start_idx)
sub_codes = code_list[start_idx: start_idx + sub_count]
start_idx += sub_count
ret_code, ret_data = self._subscribe_impl(sub_codes, sub_list, False)
if ret_code != RET_OK:
break
if ret_code != RET_OK:
break
return ret_code, ret_data
def unsubscribe(self, code_list, subtype_list):
"""
取消订阅
:param code_list: 取消订阅的股票代码列表
:param subtype_list: 取消订阅的类型,参见SubType
:return: (ret, err_message)
ret == RET_OK err_message为None
ret != RET_OK err_message为错误描述字符串
"""
ret, msg, code_list, subtype_list = self._check_subscribe_param(code_list, subtype_list)
if ret != RET_OK:
return ret, msg
query_processor = self._get_sync_query_processor(SubscriptionQuery.pack_unsubscribe_req,
SubscriptionQuery.unpack_unsubscribe_rsp)
kargs = {
'code_list': code_list,
'subtype_list': subtype_list,
"conn_id": self.get_sync_conn_id()
}
for subtype in subtype_list:
if subtype not in self._ctx_subscribe:
continue
code_set = self._ctx_subscribe[subtype]
for code in code_list:
if code not in code_set:
continue
code_set.remove(code)
ret_code, msg, _ = query_processor(**kargs)
if ret_code != RET_OK:
return RET_ERROR, msg
ret_code, msg, unpush_req_str = SubscriptionQuery.pack_unpush_req(code_list, subtype_list, self.get_async_conn_id())
if ret_code != RET_OK:
return RET_ERROR, msg
ret_code, msg = self._send_async_req(unpush_req_str)
if ret_code != RET_OK:
return RET_ERROR, msg
return RET_OK, None
def query_subscription(self, is_all_conn=True):
"""
查询已订阅的实时信息
:param is_all_conn: 是否返回所有连接的订阅状态,不传或者传False只返回当前连接数据
:return: (ret, data)
ret != RET_OK 返回错误字符串
ret == RET_OK 返回 定阅信息的字典数据 ,格式如下:
{
'total_used': 4, # 所有连接已使用的定阅额度
'own_used': 0, # 当前连接已使用的定阅额度
'remain': 496, # 剩余的定阅额度
'sub_list': # 每种定阅类型对应的股票列表
{
'BROKER': ['HK.00700', 'HK.02318'],
'RT_DATA': ['HK.00700', 'HK.02318']
}
}
"""
is_all_conn = bool(is_all_conn)
query_processor = self._get_sync_query_processor(
SubscriptionQuery.pack_subscription_query_req,
SubscriptionQuery.unpack_subscription_query_rsp)
kargs = {
"is_all_conn": is_all_conn,
"conn_id": self.get_sync_conn_id()
}
ret_code, msg, sub_table = query_processor(**kargs)
if ret_code == RET_ERROR:
return ret_code, msg
ret_dict = {}
ret_dict['total_used'] = sub_table['total_used']
ret_dict['remain'] = sub_table['remain']
ret_dict['own_used'] = 0
ret_dict['sub_list'] = {}
for conn_sub in sub_table['conn_sub_list']:
is_own_conn = conn_sub['is_own_conn']
if is_own_conn:
ret_dict['own_used'] = conn_sub['used']
if not is_all_conn and not is_own_conn:
continue
for sub_info in conn_sub['sub_list']:
subtype = sub_info['subtype']
if subtype not in ret_dict['sub_list']:
ret_dict['sub_list'][subtype] = []
code_list = ret_dict['sub_list'][subtype]
for code in sub_info['code_list']:
if code not in code_list:
code_list.append(code)
return RET_OK, ret_dict
def get_stock_quote(self, code_list):
"""
获取订阅股票报价的实时数据,有订阅要求限制。
对于异步推送,参见StockQuoteHandlerBase
:param code_list: 股票代码列表,必须确保code_list中的股票均订阅成功后才能够执行
:return: (ret, data)
ret == RET_OK 返回pd dataframe数据,数据列格式如下
ret != RET_OK 返回错误字符串
===================== =========== ==============================================================
参数 类型 说明
===================== =========== ==============================================================
code str 股票代码
data_date str 日期
data_time str 时间(美股默认是美东时间,港股A股默认是北京时间)
last_price float 最新价格
open_price float 今日开盘价
high_price float 最高价格
low_price float 最低价格
prev_close_price float 昨收盘价格
volume int 成交数量
turnover float 成交金额
turnover_rate float 换手率
amplitude int 振幅
suspension bool 是否停牌(True表示停牌)
listing_date str 上市日期 (yyyy-MM-dd)
price_spread float 当前价差,亦即摆盘数据的买档或卖档的相邻档位的报价差
dark_status str 暗盘交易状态,见DarkStatus
strike_price float 行权价
contract_size int 每份合约数
open_interest int 未平仓合约数
implied_volatility float 隐含波动率
premium float 溢价
delta float 希腊值 Delta
gamma float 希腊值 Gamma
vega float 希腊值 Vega
theta float 希腊值 Theta
rho float 希腊值 Rho
===================== =========== ==============================================================
"""
code_list = unique_and_normalize_list(code_list)
if not code_list:
error_str = ERROR_STR_PREFIX + "the type of code_list param is wrong"
return RET_ERROR, error_str
query_processor = self._get_sync_query_processor(
StockQuoteQuery.pack_req,
StockQuoteQuery.unpack_rsp,
)
kargs = {
"stock_list": code_list,
"conn_id": self.get_sync_conn_id()
}
ret_code, msg, quote_list = query_processor(**kargs)
if ret_code == RET_ERROR:
return ret_code, msg
col_list = [
'code', 'data_date', 'data_time', 'last_price', 'open_price',
'high_price', 'low_price', 'prev_close_price', 'volume',
'turnover', 'turnover_rate', 'amplitude', 'suspension',
'listing_date', 'price_spread', 'dark_status', 'strike_price',
'contract_size', 'open_interest', 'implied_volatility',
'premium', 'delta', 'gamma', 'vega', 'theta', 'rho'
]
quote_frame_table = pd.DataFrame(quote_list, columns=col_list)
return RET_OK, quote_frame_table
def get_rt_ticker(self, code, num=500):
"""
获取指定股票的实时逐笔。取最近num个逐笔
:param code: 股票代码
:param num: 最近ticker个数(有最大个数限制,最近1000个)
:return: (ret, data)
ret == RET_OK 返回pd dataframe数据,数据列格式如下
ret != RET_OK 返回错误字符串
===================== =========== ==============================================================
参数 类型 说明
===================== =========== ==============================================================
stock_code str 股票代码
sequence int 逐笔序号
time str 成交时间(美股默认是美东时间,港股A股默认是北京时间)
price float 成交价格
volume int 成交数量(股数)
turnover float 成交金额
ticker_direction str 逐笔方向
type str 逐笔类型,参见TickerType
===================== =========== ==============================================================
"""
if code is None or is_str(code) is False:
error_str = ERROR_STR_PREFIX + "the type of code param is wrong"
return RET_ERROR, error_str
if num is None or isinstance(num, int) is False:
error_str = ERROR_STR_PREFIX + "the type of num param is wrong"
return RET_ERROR, error_str
query_processor = self._get_sync_query_processor(
TickerQuery.pack_req,
TickerQuery.unpack_rsp,
)
kargs = {
"code": code,
"num": num,
"conn_id": self.get_sync_conn_id()
}
ret_code, msg, ticker_list = query_processor(**kargs)
if ret_code == RET_ERROR:
return ret_code, msg
col_list = [
'code', 'time', 'price', 'volume', 'turnover', "ticker_direction",
'sequence', 'type'
]
ticker_frame_table = pd.DataFrame(ticker_list, columns=col_list)
return RET_OK, ticker_frame_table
def get_cur_kline(self, code, num, ktype=SubType.K_DAY, autype=AuType.QFQ):
"""
实时获取指定股票最近num个K线数据,最多1000根
:param code: 股票代码
:param num: k线数据个数
:param ktype: k线类型,参见KLType
:param autype: 复权类型,参见AuType
:return: (ret, data)
ret == RET_OK 返回pd dataframe数据,数据列格式如下
ret != RET_OK 返回错误字符串
===================== =========== ==============================================================
参数 类型 说明
===================== =========== ==============================================================
code str 股票代码
time_key str 时间(美股默认是美东时间,港股A股默认是北京时间)
open float 开盘价
close float 收盘价
high float 最高价
low float 最低价
volume int 成交量
turnover float 成交额
pe_ratio float 市盈率(该字段为比例字段,默认不展示%)
turnover_rate float 换手率
===================== =========== ==============================================================
"""
param_table = {'code': code, 'ktype': ktype}
for x in param_table:
param = param_table[x]
if param is None or is_str(param) is False:
error_str = ERROR_STR_PREFIX + "the type of %s param is wrong" % x
return RET_ERROR, error_str
if num is None or isinstance(num, int) is False:
error_str = ERROR_STR_PREFIX + "the type of num param is wrong"
return RET_ERROR, error_str
if autype is not None and is_str(autype) is False:
error_str = ERROR_STR_PREFIX + "the type of autype param is wrong"
return RET_ERROR, error_str
query_processor = self._get_sync_query_processor(
CurKlineQuery.pack_req,
CurKlineQuery.unpack_rsp,
)
kargs = {
"code": code,
"num": num,
"ktype": ktype,
"autype": autype,
"conn_id": self.get_sync_conn_id()
}
ret_code, msg, kline_list = query_processor(**kargs)
if ret_code == RET_ERROR:
return ret_code, msg
col_list = [
'code', 'time_key', 'open', 'close', 'high', 'low', 'volume',
'turnover', 'pe_ratio', 'turnover_rate'
]
kline_frame_table = pd.DataFrame(kline_list, columns=col_list)
return RET_OK, kline_frame_table
def get_order_book(self, code):
"""
获取实时摆盘数据
:param code: 股票代码
:return: (ret, data)
ret == RET_OK 返回字典,数据格式如下
ret != RET_OK 返回错误字符串
{‘code’: 股票代码
‘Ask’:[ (ask_price1, ask_volume1,order_num), (ask_price2, ask_volume2, order_num),…]
‘Bid’: [ (bid_price1, bid_volume1, order_num), (bid_price2, bid_volume2, order_num),…]
}
'Ask':卖盘, 'Bid'买盘。每个元组的含义是(委托价格,委托数量,委托订单数)
"""
if code is None or is_str(code) is False:
error_str = ERROR_STR_PREFIX + "the type of code param is wrong"
return RET_ERROR, error_str
query_processor = self._get_sync_query_processor(
OrderBookQuery.pack_req,
OrderBookQuery.unpack_rsp,
)
kargs = {
"code": code,
"conn_id": self.get_sync_conn_id()
}
ret_code, msg, orderbook = query_processor(**kargs)
if ret_code == RET_ERROR:
return ret_code, msg
return RET_OK, orderbook
def get_multi_points_history_kline(self,
code_list,
dates,
fields,
ktype=KLType.K_DAY,
autype=AuType.QFQ,
no_data_mode=KLNoDataMode.FORWARD):
'''
从本地历史K线中获取多支股票多个时间点的指定数据列
:param code_list: 单个或多个股票 'HK.00700' or ['HK.00700', 'HK.00001']
:param dates: 单个或多个日期 '2017-01-01' or ['2017-01-01', '2017-01-02']
:param fields: 单个或多个数据列 KL_FIELD.ALL or [KL_FIELD.DATE_TIME, KL_FIELD.OPEN]
:param ktype: K线类型
:param autype: 复权类型
:param no_data_mode: 指定时间为非交易日时,对应的k线数据取值模式,参见KLNoDataMode
:return: (ret, data)
ret == RET_OK 返回pd dataframe数据,固定表头包括'code'(代码) 'time_point'(指定的日期) 'data_status' (KLDataStatus)。数据列格式如下
ret != RET_OK 返回错误字符串
================= =========== ==============================================================================
参数 类型 说明
================= =========== ==============================================================================
code str 股票代码
time_point str 请求的时间
data_status str 数据点是否有效,参见KLDataStatus
time_key str k线时间(美股默认是美东时间,港股A股默认是北京时间)
open float 开盘价
close float 收盘价
high float 最高价
low float 最低价
pe_ratio float 市盈率(该字段为比例字段,默认不展示%)
turnover_rate float 换手率
volume int 成交量
turnover float 成交额
change_rate float 涨跌幅
last_close float 昨收价
================= =========== ==============================================================================
'''
req_codes = unique_and_normalize_list(code_list)
if not code_list:
error_str = ERROR_STR_PREFIX + "the type of code param is wrong"
return RET_ERROR, error_str
req_dates = unique_and_normalize_list(dates)
if not dates:
error_str = ERROR_STR_PREFIX + "the type of dates param is wrong"
return RET_ERROR, error_str
req_fields = unique_and_normalize_list(fields)
if not fields:
req_fields = copy(KL_FIELD.ALL_REAL)
req_fields = KL_FIELD.normalize_field_list(req_fields)
if not req_fields:
error_str = ERROR_STR_PREFIX + "the type of fields param is wrong"
return RET_ERROR, error_str
query_processor = self._get_sync_query_processor(
MultiPointsHisKLine.pack_req, MultiPointsHisKLine.unpack_rsp)
# 一次性最多取100支股票的数据
max_req_code_num = 50
data_finish = False
list_ret = []
# 循环请求数据,避免一次性取太多超时
while not data_finish:
logger.debug('get_multi_points_history_kline - wait ... %s' % datetime.now())
kargs = {
"code_list": req_codes,
"dates": req_dates,
"fields": copy(req_fields),
"ktype": ktype,
"autype": autype,
"max_req": max_req_code_num,
"no_data_mode": int(no_data_mode),
"conn_id": self.get_sync_conn_id()
}
ret_code, msg, content = query_processor(**kargs)
if ret_code == RET_ERROR:
return ret_code, msg
list_kline, has_next = content
data_finish = (not has_next)
for dict_item in list_kline:
item_code = dict_item['code']
list_ret.append(dict_item)
if item_code in req_codes:
req_codes.remove(item_code)
if 0 == len(req_codes):
data_finish = True
# 表头列
col_list = ['code', 'time_point', 'data_status']
for field in req_fields:
str_field = KL_FIELD.DICT_KL_FIELD_STR[field]
if str_field not in col_list:
col_list.append(str_field)
pd_frame = pd.DataFrame(list_ret, columns=col_list)
return RET_OK, pd_frame
def get_referencestock_list(self, code, reference_type):
"""
获取证券的关联数据
:param code: 证券id,str,例如HK.00700
:param reference_type: 要获得的相关数据,参见SecurityReferenceType。例如WARRANT,表示获取正股相关的涡轮
:return: (ret, data)
ret == RET_OK 返回pd dataframe数据,数据列格式如下
ret != RET_OK 返回错误字符串
================= =========== ==============================================================================
参数 类型 说明
================= =========== ==============================================================================
code str 证券代码
lot_size int 每手数量
stock_type str 证券类型,参见SecurityType
stock_name str 证券名字
list_time str 上市时间(美股默认是美东时间,港股A股默认是北京时间)
wrt_valid bool 是否是涡轮,如果为True,下面wrt开头的字段有效
wrt_type str 涡轮类型,参见WrtType
wrt_code str 所属正股
================= =========== ==============================================================================
"""
if code is None or is_str(code) is False:
error_str = ERROR_STR_PREFIX + "the type of code param is wrong"
return RET_ERROR, error_str
query_processor = self._get_sync_query_processor(
StockReferenceList.pack_req,
StockReferenceList.unpack_rsp,
)
kargs = {
"code": code,
'ref_type': reference_type,
"conn_id": self.get_sync_conn_id()
}
ret_code, msg, data_list = query_processor(**kargs)
if ret_code == RET_ERROR:
return ret_code, msg
col_list = [
'code', 'lot_size', 'stock_type', 'stock_name', 'list_time', 'wrt_valid', 'wrt_type', 'wrt_code'
]
pd_frame = pd.DataFrame(data_list, columns=col_list)
return RET_OK, pd_frame
def get_owner_plate(self, code_list):
"""
获取单支或多支股票的所属板块信息列表
:param code_list: 股票代码列表,仅支持正股、指数。list或str。例如:['HK.00700', 'HK.00001']或者'HK.00700,HK.00001'。
:return: (ret, data)
ret == RET_OK 返回pd dataframe数据,data.DataFrame数据, 数据列格式如下
ret != RET_OK 返回错误字符串
===================== =========== ==============================================================
参数 类型 说明
===================== =========== ==============================================================
code str 证券代码
plate_code str 板块代码
plate_name str 板块名字
plate_type str 板块类型(行业板块或概念板块),futu.common.constant.Plate
===================== =========== ==============================================================
"""
if is_str(code_list):
code_list = code_list.split(',')
elif isinstance(code_list, list):
pass
else:
return RET_ERROR, "code list must be like ['HK.00001', 'HK.00700'] or 'HK.00001,HK.00700'"
code_list = unique_and_normalize_list(code_list)
for code in code_list:
if code is None or is_str(code) is False:
error_str = ERROR_STR_PREFIX + "the type of param in code_list is wrong"
return RET_ERROR, error_str
query_processor = self._get_sync_query_processor(
OwnerPlateQuery.pack_req, OwnerPlateQuery.unpack_rsp)
kargs = {
"code_list": code_list,
"conn_id": self.get_sync_conn_id()
}
ret_code, msg, owner_plate_list = query_processor(**kargs)
if ret_code == RET_ERROR:
return ret_code, msg
col_list = [
'code', 'plate_code', 'plate_name', 'plate_type'
]
owner_plate_table = pd.DataFrame(owner_plate_list, columns=col_list)
return RET_OK, owner_plate_table
def get_holding_change_list(self, code, holder_type, start=None, end=None):
"""
获取大股东持股变动列表,只提供美股数据
:param code: 股票代码. 例如:'US.AAPL'
:param holder_type: 持有者类别,StockHolder_
:param start: 开始时间. 例如:'2016-10-01'
:param end: 结束时间,例如:'2017-10-01'。
start与end的组合如下:
========== ========== ========================================
start类型 end类型 说明
========== ========== ========================================
str str start和end分别为指定的日期
None str start为end往前365天
str None end为start往后365天
None None end为当前日期,start为end往前365天
========== ========== ========================================
:return: (ret, data)
ret == RET_OK 返回pd dataframe数据,data.DataFrame数据, 数据列格式如下
ret != RET_OK 返回错误字符串
===================== =========== ==============================================================
参数 类型 说明
===================== =========== ==============================================================
holder_name str 高管名称
holding_qty float 持股数
holding_ratio float 持股比例(该字段为比例字段,默认不展示%)
change_qty float 变动数
change_ratio float 变动比例(该字段为比例字段,默认不展示%)
time str 发布时间(美股的时间默认是美东)
===================== =========== ==============================================================
"""
holder_type = STOCK_HOLDER_CLASS_MAP[holder_type]
if code is None or is_str(code) is False:
msg = ERROR_STR_PREFIX + "the type of code param is wrong"
return RET_ERROR, msg
if holder_type < 1 or holder_type > len(STOCK_HOLDER_CLASS_MAP):
msg = ERROR_STR_PREFIX + "the type {0} is wrong, total number of types is {1}".format(holder_type, len(STOCK_HOLDER_CLASS_MAP))
return RET_ERROR, msg
ret_code, msg, start, end = normalize_start_end_date(start, end, delta_days=365)
if ret_code != RET_OK:
return ret_code, msg
query_processor = self._get_sync_query_processor(
HoldingChangeList.pack_req, HoldingChangeList.unpack_rsp)
kargs = {
"code": code,
"holder_type": holder_type,
"conn_id": self.get_sync_conn_id(),
"start_date": start,
"end_date": end
}
ret_code, msg, owner_plate_list = query_processor(**kargs)
if ret_code == RET_ERROR:
return ret_code, msg
col_list = [
'holder_name', 'holding_qty', 'holding_ratio', 'change_qty', 'change_ratio', 'time'
]
holding_change_list = pd.DataFrame(owner_plate_list, columns=col_list)
return RET_OK, holding_change_list
def get_option_chain(self, code, start=None, end=None, option_type=OptionType.ALL, option_cond_type=OptionCondType.ALL):
"""
通过标的股查询期权
:param code: 股票代码,例如:'HK.02318'
:param start: 开始日期,该日期指到期日,例如'2017-08-01'
:param end: 结束日期(包括这一天),该日期指到期日,例如'2017-08-30'。 注意,时间范围最多30天
start和end的组合如下:
========== ========== ========================================
start类型 end类型 说明
========== ========== ========================================
str str start和end分别为指定的日期
None str start为end往前30天
str None end为start往后30天
None None start为当前日期,end往后30天
========== ========== ========================================
:param option_type: 期权类型,默认全部,全部/看涨/看跌,futu.common.constant.OptionType
:param option_cond_type: 默认全部,全部/价内/价外,futu.common.constant.OptionCondType
:return: (ret, data)
ret == RET_OK 返回pd dataframe数据,数据列格式如下
ret != RET_OK 返回错误字符串
================== =========== ==============================================================
参数 类型 说明
================== =========== ==============================================================
code str 股票代码
name str 名字
lot_size int 每手数量
stock_type str 股票类型,参见SecurityType
option_type str 期权类型,Qot_Common.OptionType
stock_owner str 标的股
strike_time str 行权日(美股默认是美东时间,港股A股默认是北京时间)
strike_price float 行权价
suspension bool 是否停牌(True表示停牌)
stock_id int 股票id
================== =========== ==============================================================
"""
if code is None or is_str(code) is False:
error_str = ERROR_STR_PREFIX + "the type of code param is wrong"
return RET_ERROR, error_str
ret_code, msg, start, end = normalize_start_end_date(start, end, delta_days=29, default_time_end='00:00:00', prefer_end_now=False)
if ret_code != RET_OK:
return ret_code, msg
query_processor = self._get_sync_query_processor(
OptionChain.pack_req, OptionChain.unpack_rsp)
kargs = {
"code": code,
"conn_id": self.get_sync_conn_id(),
"start_date": start,
"end_date": end,
"option_cond_type": option_cond_type,
"option_type": option_type
}
ret_code, msg, option_chain_list = query_processor(**kargs)
if ret_code == RET_ERROR:
return ret_code, msg
col_list = [
'code', 'name', 'lot_size', 'stock_type',
'option_type', 'stock_owner', 'strike_time', 'strike_price', 'suspension',
'stock_id'
]
option_chain = pd.DataFrame(option_chain_list, columns=col_list)
option_chain.sort_values(by=["strike_time", "strike_price"], axis=0, ascending=True, inplace=True)
option_chain.index = range(len(option_chain))
return RET_OK, option_chain
def get_order_detail(self, code):
return RET_ERROR, "this service has been cancelled"
"""
查询A股Level 2权限下提供的委托明细
:param code: 股票代码,例如:'HK.02318'
:return: (ret, data)
ret == RET_OK data为1个dict,包含以下数据
ret != RET_OK data为错误字符串
{‘code’: 股票代码
‘Ask’:[ order_num, [order_volume1, order_volume2] ]
‘Bid’: [ order_num, [order_volume1, order_volume2] ]
}
'Ask':卖盘, 'Bid'买盘。order_num指委托订单数量,order_volume是每笔委托的委托量,当前最多返回前50笔委托的委托数量。即order_num有可能多于后面的order_volume
"""
if code is None or is_str(code) is False:
error_str = ERROR_STR_PREFIX + "the type of code param is wrong"
return RET_ERROR, error_str
query_processor = self._get_sync_query_processor(
OrderDetail.pack_req, OrderDetail.unpack_rsp)
kargs = {
"code": code,
"conn_id": self.get_sync_conn_id()
}
ret_code, msg, order_detail = query_processor(**kargs)
if ret_code == RET_ERROR:
return ret_code, msg
return RET_OK, order_detail
def get_warrant(self, stock_owner='', req=None):
"""
:param stock_owner:所属正股
:param req:futu.quote.quote_get_warrant.Request
"""
from futu.quote.quote_get_warrant import Request
if (req is None) or (not isinstance(req, Request)):
req = Request()
if stock_owner is not None:
req.stock_owner = stock_owner
query_processor = self._get_sync_query_processor(QuoteWarrant.pack_req, QuoteWarrant.unpack_rsp)
kargs = {
"req": req,
"conn_id": self.get_sync_conn_id()
}
ret_code, msg, content = query_processor(**kargs)
if ret_code != RET_OK:
return ret_code, msg
else:
warrant_data_list, last_page, all_count = content
col_list = ['stock', 'name', 'stock_owner', 'type', 'issuer', 'maturity_time',
'list_time', 'last_trade_time', 'recovery_price', 'conversion_ratio',
'lot_size', 'strike_price', 'last_close_price', 'cur_price', 'price_change_val', 'change_rate',
'status', 'bid_price', 'ask_price', 'bid_vol', 'ask_vol', 'volume', 'turnover', 'score',
'premium', 'break_even_point', 'leverage', 'ipop', 'price_recovery_ratio', 'conversion_price',
'street_rate', 'street_vol', 'amplitude', 'issue_size', 'high_price', 'low_price',
'implied_volatility', 'delta', 'effective_leverage', 'list_timestamp', 'last_trade_timestamp',
'maturity_timestamp']
warrant_data_frame = pd.DataFrame(warrant_data_list, columns=col_list)
#1120400921001028854
return ret_code, (warrant_data_frame, last_page, all_count)
| 41.841547 | 184 | 0.45184 |
import datetime
import math
from time import sleep
import pandas as pd
from futu.common.open_context_base import OpenContextBase, ContextStatus
from futu.quote.quote_query import *
class OpenQuoteContext(OpenContextBase):
def __init__(self, host='127.0.0.1', port=11111):
self._ctx_subscribe = {}
super(OpenQuoteContext, self).__init__(host, port, True)
def close(self):
super(OpenQuoteContext, self).close()
def on_api_socket_reconnected(self):
resub_count = 0
subtype_list = []
code_list = []
resub_dict = copy(self._ctx_subscribe)
subtype_all_cnt = len(resub_dict.keys())
subtype_cur_cnt = 0
ret_code = RET_OK
ret_msg = ''
for subtype in resub_dict.keys():
subtype_cur_cnt += 1
code_set = resub_dict[subtype]
code_list_new = [code for code in code_set]
if len(code_list_new) == 0:
continue
if len(code_list) == 0:
code_list = code_list_new
subtype_list = [subtype]
is_need_sub = False
if code_list == code_list_new:
if subtype not in subtype_list:
subtype_list.append(subtype) else:
ret_code, ret_msg = self._reconnect_subscribe(code_list, subtype_list)
logger.debug("reconnect subscribe code_count={} ret_code={} ret_msg={} subtype_list={} code_list={}".format(
len(code_list), ret_code, ret_msg, subtype_list, code_list))
if ret_code != RET_OK:
break
resub_count += len(code_list)
code_list = code_list_new
subtype_list = [subtype]
if subtype_cur_cnt == subtype_all_cnt and len(code_list):
ret_code, ret_msg = self._reconnect_subscribe(code_list, subtype_list)
logger.debug("reconnect subscribe code_count={} ret_code={} ret_msg={} subtype_list={} code_list={}".format(len(code_list), ret_code, ret_msg, subtype_list, code_list))
if ret_code != RET_OK:
break
resub_count += len(code_list)
code_list = []
subtype_list = []
logger.debug("reconnect subscribe all code_count={} ret_code={} ret_msg={}".format(resub_count, ret_code, ret_msg))
if ret_code != RET_OK:
logger.error("reconnect subscribe error, close connect and retry!!")
self._status = ContextStatus.Start
self._wait_reconnect()
return ret_code, ret_msg
def get_trading_days(self, market, start=None, end=None):
if market is None or is_str(market) is False:
error_str = ERROR_STR_PREFIX + "the type of market param is wrong"
return RET_ERROR, error_str
ret, msg, start, end = normalize_start_end_date(start, end, 365)
if ret != RET_OK:
return ret, msg
query_processor = self._get_sync_query_processor(
TradeDayQuery.pack_req, TradeDayQuery.unpack_rsp)
kargs = {
'market': market,
'start_date': start,
'end_date': end,
'conn_id': self.get_sync_conn_id()
}
ret_code, msg, trade_day_list = query_processor(**kargs)
if ret_code != RET_OK:
return RET_ERROR, msg
return RET_OK, trade_day_list
def get_stock_basicinfo(self, market, stock_type=SecurityType.STOCK, code_list=None):
param_table = {'market': market, 'stock_type': stock_type}
for x in param_table:
param = param_table[x]
if param is None or is_str(param) is False:
error_str = ERROR_STR_PREFIX + "the type of %s param is wrong" % x
return RET_ERROR, error_str
if code_list is not None:
if is_str(code_list):
code_list = code_list.split(',')
elif isinstance(code_list, list):
pass
else:
return RET_ERROR, "code list must be like ['HK.00001', 'HK.00700'] or 'HK.00001,HK.00700'"
query_processor = self._get_sync_query_processor(
StockBasicInfoQuery.pack_req, StockBasicInfoQuery.unpack_rsp)
kargs = {
"market": market,
'stock_type': stock_type,
'code_list': code_list,
'conn_id': self.get_sync_conn_id()
}
ret_code, msg, basic_info_list = query_processor(**kargs)
if ret_code != RET_OK:
return ret_code, msg
col_list = [
'code', 'name', 'lot_size', 'stock_type', 'stock_child_type', 'stock_owner',
'option_type', 'strike_time', 'strike_price', 'suspension',
'listing_date', 'stock_id', 'delisting'
]
basic_info_table = pd.DataFrame(basic_info_list, columns=col_list)
return RET_OK, basic_info_table
def get_multiple_history_kline(self,
codelist,
start=None,
end=None,
ktype=KLType.K_DAY,
autype=AuType.QFQ):
if is_str(codelist):
codelist = codelist.split(',')
elif isinstance(codelist, list):
pass
else:
return RET_ERROR, "code list must be like ['HK.00001', 'HK.00700'] or 'HK.00001,HK.00700'"
result = []
for code in codelist:
ret, data = self.get_history_kline(code, start, end, ktype, autype)
if ret != RET_OK:
return RET_ERROR, 'get history kline error: {}, {},{},{},{}'.format(data, code, start, end, ktype)
result.append(data)
return 0, result
def _get_history_kline_impl(self,
query_cls,
code,
start=None,
end=None,
ktype=KLType.K_DAY,
autype=AuType.QFQ,
fields=[KL_FIELD.ALL]
):
ret, msg, req_start, end = normalize_start_end_date(start, end, 365)
if ret != RET_OK:
return ret, msg
req_fields = unique_and_normalize_list(fields)
if not fields:
req_fields = copy(KL_FIELD.ALL_REAL)
req_fields = KL_FIELD.normalize_field_list(req_fields)
if not req_fields:
error_str = ERROR_STR_PREFIX + "the type of fields param is wrong"
return RET_ERROR, error_str
if autype is None:
autype = 'None'
param_table = {'code': code, 'ktype': ktype, 'autype': autype}
for x in param_table:
param = param_table[x]
if param is None or is_str(param) is False:
error_str = ERROR_STR_PREFIX + "the type of %s param is wrong" % x
return RET_ERROR, error_str
max_kl_num = 1000
data_finish = False
list_ret = []
while not data_finish:
kargs = {
"code": code,
"start_date": req_start,
"end_date": end,
"ktype": ktype,
"autype": autype,
"fields": copy(req_fields),
"max_num": max_kl_num,
"conn_id": self.get_sync_conn_id()
}
query_processor = self._get_sync_query_processor(query_cls.pack_req, query_cls.unpack_rsp)
ret_code, msg, content = query_processor(**kargs)
if ret_code != RET_OK:
return ret_code, msg
list_kline, has_next, next_time = content
data_finish = (not has_next) or (not next_time)
req_start = next_time
for dict_item in list_kline:
list_ret.append(dict_item)
col_list = ['code']
for field in req_fields:
str_field = KL_FIELD.DICT_KL_FIELD_STR[field]
if str_field not in col_list:
col_list.append(str_field)
kline_frame_table = pd.DataFrame(list_ret, columns=col_list)
return RET_OK, kline_frame_table
def get_history_kline(self,
code,
start=None,
end=None,
ktype=KLType.K_DAY,
autype=AuType.QFQ,
fields=[KL_FIELD.ALL]):
return self._get_history_kline_impl(GetHistoryKlineQuery, code, start=start, end=end,
ktype=ktype, autype=autype, fields=fields)
def request_history_kline(self,
code,
start=None,
end=None,
ktype=KLType.K_DAY,
autype=AuType.QFQ,
fields=[KL_FIELD.ALL],
max_count=1000,
page_req_key=None):
next_page_req_key = None
ret, msg, req_start, end = normalize_start_end_date(start, end, 365)
if ret != RET_OK:
return ret, msg, next_page_req_key
req_fields = unique_and_normalize_list(fields)
if not fields:
req_fields = copy(KL_FIELD.ALL_REAL)
req_fields = KL_FIELD.normalize_field_list(req_fields)
if not req_fields:
error_str = ERROR_STR_PREFIX + "the type of fields param is wrong"
return RET_ERROR, error_str, next_page_req_key
if autype is None:
autype = 'None'
param_table = {'code': code, 'ktype': ktype, 'autype': autype}
for x in param_table:
param = param_table[x]
if param is None or is_str(param) is False:
error_str = ERROR_STR_PREFIX + "the type of %s param is wrong" % x
return RET_ERROR, error_str, next_page_req_key
max_kl_num = min(1000, max_count) if max_count is not None else 1000
data_finish = False
list_ret = []
while not data_finish:
kargs = {
"code": code,
"start_date": req_start,
"end_date": end,
"ktype": ktype,
"autype": autype,
"fields": copy(req_fields),
"max_num": max_kl_num,
"conn_id": self.get_sync_conn_id(),
"next_req_key": page_req_key
}
query_processor = self._get_sync_query_processor(RequestHistoryKlineQuery.pack_req,
RequestHistoryKlineQuery.unpack_rsp)
ret_code, msg, content = query_processor(**kargs)
if ret_code != RET_OK:
return ret_code, msg, next_page_req_key
list_kline, has_next, page_req_key = content
list_ret.extend(list_kline)
next_page_req_key = page_req_key
if max_count is not None:
if max_count > len(list_ret) and has_next:
data_finish = False
max_kl_num = min(max_count - len(list_ret), 1000)
else:
data_finish = True
else:
data_finish = not has_next
col_list = ['code']
for field in req_fields:
str_field = KL_FIELD.DICT_KL_FIELD_STR[field]
if str_field not in col_list:
col_list.append(str_field)
kline_frame_table = pd.DataFrame(list_ret, columns=col_list)
return RET_OK, kline_frame_table, next_page_req_key
def get_autype_list(self, code_list):
code_list = unique_and_normalize_list(code_list)
for code in code_list:
if code is None or is_str(code) is False:
error_str = ERROR_STR_PREFIX + "the type of param in code_list is wrong"
return RET_ERROR, error_str
query_processor = self._get_sync_query_processor(
ExrightQuery.pack_req, ExrightQuery.unpack_rsp)
kargs = {
"stock_list": code_list,
"conn_id": self.get_sync_conn_id()
}
ret_code, msg, exr_record = query_processor(**kargs)
if ret_code == RET_ERROR:
return ret_code, msg
col_list = [
'code', 'ex_div_date', 'split_ratio', 'per_cash_div',
'per_share_div_ratio', 'per_share_trans_ratio', 'allotment_ratio',
'allotment_price', 'stk_spo_ratio', 'stk_spo_price',
'forward_adj_factorA', 'forward_adj_factorB',
'backward_adj_factorA', 'backward_adj_factorB'
]
exr_frame_table = pd.DataFrame(exr_record, columns=col_list)
return RET_OK, exr_frame_table
def get_market_snapshot(self, code_list):
code_list = unique_and_normalize_list(code_list)
if not code_list:
error_str = ERROR_STR_PREFIX + "the type of code param is wrong"
return RET_ERROR, error_str
query_processor = self._get_sync_query_processor(
MarketSnapshotQuery.pack_req, MarketSnapshotQuery.unpack_rsp)
kargs = {
"stock_list": code_list,
"conn_id": self.get_sync_conn_id()
}
ret_code, msg, snapshot_list = query_processor(**kargs)
if ret_code == RET_ERROR:
return ret_code, msg
equity_col_list = ['issued_shares',
'total_market_val',
'net_asset',
'net_profit',
'earning_per_share',
'outstanding_shares',
'circular_market_val',
'net_asset_per_share',
'ey_ratio',
'pe_ratio',
'pb_ratio',
'pe_ttm_ratio'
]
wrt_col_list = ['wrt_conversion_ratio',
'wrt_type',
'wrt_strike_price',
'wrt_maturity_date',
'wrt_end_trade',
'wrt_recovery_price',
'wrt_street_vol',
'wrt_issue_vol',
'wrt_street_ratio',
'wrt_delta',
'wrt_implied_volatility',
'wrt_premium'
]
option_col_list = ['option_type',
'strike_time',
'option_strike_price',
'option_contract_size',
'option_open_interest',
'option_implied_volatility',
'option_premium',
'option_delta',
'option_gamma',
'option_vega',
'option_theta',
'option_rho'
]
col_list = [
'code',
'update_time',
'last_price',
'open_price',
'high_price',
'low_price',
'prev_close_price',
'volume',
'turnover',
'turnover_rate',
'suspension',
'listing_date',
'lot_size',
'price_spread',
'stock_owner',
'ask_price',
'bid_price',
'ask_vol',
'bid_vol'
]
col_list.append('equity_valid')
col_list.extend(equity_col_list)
col_list.append('wrt_valid')
col_list.extend(wrt_col_list)
col_list.append('option_valid')
col_list.extend(option_col_list)
snapshot_frame_table = pd.DataFrame(snapshot_list, columns=col_list)
return RET_OK, snapshot_frame_table
def get_rt_data(self, code):
if code is None or is_str(code) is False:
error_str = ERROR_STR_PREFIX + "the type of param in code is wrong"
return RET_ERROR, error_str
query_processor = self._get_sync_query_processor(
RtDataQuery.pack_req, RtDataQuery.unpack_rsp)
kargs = {
"code": code,
"conn_id": self.get_sync_conn_id()
}
ret_code, msg, rt_data_list = query_processor(**kargs)
if ret_code == RET_ERROR:
return ret_code, msg
for x in rt_data_list:
x['code'] = code
col_list = [
'code', 'time', 'is_blank', 'opened_mins', 'cur_price',
'last_close', 'avg_price', 'volume', 'turnover'
]
rt_data_table = pd.DataFrame(rt_data_list, columns=col_list)
return RET_OK, rt_data_table
def get_plate_list(self, market, plate_class):
param_table = {'market': market, 'plate_class': plate_class}
for x in param_table:
param = param_table[x]
if param is None or is_str(market) is False:
error_str = ERROR_STR_PREFIX + "the type of market param is wrong"
return RET_ERROR, error_str
if market not in MKT_MAP:
error_str = ERROR_STR_PREFIX + "the value of market param is wrong "
return RET_ERROR, error_str
if plate_class not in PLATE_CLASS_MAP:
error_str = ERROR_STR_PREFIX + "the class of plate is wrong"
return RET_ERROR, error_str
query_processor = self._get_sync_query_processor(
SubplateQuery.pack_req, SubplateQuery.unpack_rsp)
kargs = {
'market': market,
'plate_class': plate_class,
'conn_id': self.get_sync_conn_id()
}
ret_code, msg, subplate_list = query_processor(**kargs)
if ret_code == RET_ERROR:
return ret_code, msg
col_list = ['code', 'plate_name', 'plate_id']
subplate_frame_table = pd.DataFrame(subplate_list, columns=col_list)
return RET_OK, subplate_frame_table
def get_plate_stock(self, plate_code):
if plate_code is None or is_str(plate_code) is False:
error_str = ERROR_STR_PREFIX + "the type of code is wrong"
return RET_ERROR, error_str
query_processor = self._get_sync_query_processor(
PlateStockQuery.pack_req, PlateStockQuery.unpack_rsp)
kargs = {
"plate_code": plate_code,
"conn_id": self.get_sync_conn_id()
}
ret_code, msg, plate_stock_list = query_processor(**kargs)
if ret_code == RET_ERROR:
return ret_code, msg
col_list = [
'code', 'lot_size', 'stock_name', 'stock_owner',
'stock_child_type', 'stock_type', 'list_time', 'stock_id',
]
plate_stock_table = pd.DataFrame(plate_stock_list, columns=col_list)
return RET_OK, plate_stock_table
def get_broker_queue(self, code):
if code is None or is_str(code) is False:
error_str = ERROR_STR_PREFIX + "the type of param in code is wrong"
return RET_ERROR, error_str
query_processor = self._get_sync_query_processor(
BrokerQueueQuery.pack_req, BrokerQueueQuery.unpack_rsp)
kargs = {
"code": code,
"conn_id": self.get_sync_conn_id()
}
ret_code, ret_msg, content = query_processor(**kargs)
if ret_code != RET_OK:
return ret_code, ret_msg, ret_msg
(_, bid_list, ask_list) = content
col_bid_list = [
'code', 'bid_broker_id', 'bid_broker_name', 'bid_broker_pos'
]
col_ask_list = [
'code', 'ask_broker_id', 'ask_broker_name', 'ask_broker_pos'
]
bid_frame_table = pd.DataFrame(bid_list, columns=col_bid_list)
ask_frame_table = pd.DataFrame(ask_list, columns=col_ask_list)
return RET_OK, bid_frame_table, ask_frame_table
def _check_subscribe_param(self, code_list, subtype_list):
code_list = unique_and_normalize_list(code_list)
subtype_list = unique_and_normalize_list(subtype_list)
if len(code_list) == 0:
msg = ERROR_STR_PREFIX + 'code_list is null'
return RET_ERROR, msg, code_list, subtype_list
if len(subtype_list) == 0:
msg = ERROR_STR_PREFIX + 'subtype_list is null'
return RET_ERROR, msg, code_list, subtype_list
for subtype in subtype_list:
if subtype not in SUBTYPE_MAP:
subtype_str = ','.join([x for x in SUBTYPE_MAP])
msg = ERROR_STR_PREFIX + 'subtype is %s , which is wrong. (%s)' % (
subtype, subtype_str)
return RET_ERROR, msg, code_list, subtype_list
for code in code_list:
ret, msg = split_stock_str(code)
if ret != RET_OK:
return RET_ERROR, msg, code_list, subtype_list
return RET_OK, "", code_list, subtype_list
def subscribe(self, code_list, subtype_list, is_first_push=True, subscribe_push=True):
return self._subscribe_impl(code_list, subtype_list, is_first_push, subscribe_push)
def _subscribe_impl(self, code_list, subtype_list, is_first_push, subscribe_push=True):
ret, msg, code_list, subtype_list = self._check_subscribe_param(code_list, subtype_list)
if ret != RET_OK:
return ret, msg
kline_sub_count = 0
for sub_type in subtype_list:
if sub_type in KLINE_SUBTYPE_LIST:
kline_sub_count += 1
query_processor = self._get_sync_query_processor(SubscriptionQuery.pack_subscribe_req,
SubscriptionQuery.unpack_subscribe_rsp)
kargs = {
'code_list': code_list,
'subtype_list': subtype_list,
'conn_id': self.get_sync_conn_id(),
'is_first_push': is_first_push,
'subscribe_push': subscribe_push
}
ret_code, msg, _ = query_processor(**kargs)
if ret_code != RET_OK:
return RET_ERROR, msg
for subtype in subtype_list:
if subtype not in self._ctx_subscribe:
self._ctx_subscribe[subtype] = set()
code_set = self._ctx_subscribe[subtype]
code_set.update(code_list)
return RET_OK, None
def _reconnect_subscribe(self, code_list, subtype_list):
kline_sub_list = []
other_sub_list = []
for sub in subtype_list:
if sub in KLINE_SUBTYPE_LIST:
kline_sub_list.append(sub)
else:
other_sub_list.append(sub)
kline_sub_one_size = 1
if len(kline_sub_list) > 0:
kline_sub_one_size = math.floor(100 / len(kline_sub_list))
sub_info_list = [
{"sub_list": kline_sub_list, "one_size": kline_sub_one_size},
{"sub_list": other_sub_list, "one_size": 100},
]
ret_code = RET_OK
ret_data = None
for info in sub_info_list:
sub_list = info["sub_list"]
one_size = info["one_size"]
all_count = len(code_list)
start_idx = 0
while start_idx < all_count and len(sub_list):
sub_count = one_size if start_idx + one_size <= all_count else (all_count - start_idx)
sub_codes = code_list[start_idx: start_idx + sub_count]
start_idx += sub_count
ret_code, ret_data = self._subscribe_impl(sub_codes, sub_list, False)
if ret_code != RET_OK:
break
if ret_code != RET_OK:
break
return ret_code, ret_data
def unsubscribe(self, code_list, subtype_list):
ret, msg, code_list, subtype_list = self._check_subscribe_param(code_list, subtype_list)
if ret != RET_OK:
return ret, msg
query_processor = self._get_sync_query_processor(SubscriptionQuery.pack_unsubscribe_req,
SubscriptionQuery.unpack_unsubscribe_rsp)
kargs = {
'code_list': code_list,
'subtype_list': subtype_list,
"conn_id": self.get_sync_conn_id()
}
for subtype in subtype_list:
if subtype not in self._ctx_subscribe:
continue
code_set = self._ctx_subscribe[subtype]
for code in code_list:
if code not in code_set:
continue
code_set.remove(code)
ret_code, msg, _ = query_processor(**kargs)
if ret_code != RET_OK:
return RET_ERROR, msg
ret_code, msg, unpush_req_str = SubscriptionQuery.pack_unpush_req(code_list, subtype_list, self.get_async_conn_id())
if ret_code != RET_OK:
return RET_ERROR, msg
ret_code, msg = self._send_async_req(unpush_req_str)
if ret_code != RET_OK:
return RET_ERROR, msg
return RET_OK, None
def query_subscription(self, is_all_conn=True):
is_all_conn = bool(is_all_conn)
query_processor = self._get_sync_query_processor(
SubscriptionQuery.pack_subscription_query_req,
SubscriptionQuery.unpack_subscription_query_rsp)
kargs = {
"is_all_conn": is_all_conn,
"conn_id": self.get_sync_conn_id()
}
ret_code, msg, sub_table = query_processor(**kargs)
if ret_code == RET_ERROR:
return ret_code, msg
ret_dict = {}
ret_dict['total_used'] = sub_table['total_used']
ret_dict['remain'] = sub_table['remain']
ret_dict['own_used'] = 0
ret_dict['sub_list'] = {}
for conn_sub in sub_table['conn_sub_list']:
is_own_conn = conn_sub['is_own_conn']
if is_own_conn:
ret_dict['own_used'] = conn_sub['used']
if not is_all_conn and not is_own_conn:
continue
for sub_info in conn_sub['sub_list']:
subtype = sub_info['subtype']
if subtype not in ret_dict['sub_list']:
ret_dict['sub_list'][subtype] = []
code_list = ret_dict['sub_list'][subtype]
for code in sub_info['code_list']:
if code not in code_list:
code_list.append(code)
return RET_OK, ret_dict
def get_stock_quote(self, code_list):
code_list = unique_and_normalize_list(code_list)
if not code_list:
error_str = ERROR_STR_PREFIX + "the type of code_list param is wrong"
return RET_ERROR, error_str
query_processor = self._get_sync_query_processor(
StockQuoteQuery.pack_req,
StockQuoteQuery.unpack_rsp,
)
kargs = {
"stock_list": code_list,
"conn_id": self.get_sync_conn_id()
}
ret_code, msg, quote_list = query_processor(**kargs)
if ret_code == RET_ERROR:
return ret_code, msg
col_list = [
'code', 'data_date', 'data_time', 'last_price', 'open_price',
'high_price', 'low_price', 'prev_close_price', 'volume',
'turnover', 'turnover_rate', 'amplitude', 'suspension',
'listing_date', 'price_spread', 'dark_status', 'strike_price',
'contract_size', 'open_interest', 'implied_volatility',
'premium', 'delta', 'gamma', 'vega', 'theta', 'rho'
]
quote_frame_table = pd.DataFrame(quote_list, columns=col_list)
return RET_OK, quote_frame_table
def get_rt_ticker(self, code, num=500):
if code is None or is_str(code) is False:
error_str = ERROR_STR_PREFIX + "the type of code param is wrong"
return RET_ERROR, error_str
if num is None or isinstance(num, int) is False:
error_str = ERROR_STR_PREFIX + "the type of num param is wrong"
return RET_ERROR, error_str
query_processor = self._get_sync_query_processor(
TickerQuery.pack_req,
TickerQuery.unpack_rsp,
)
kargs = {
"code": code,
"num": num,
"conn_id": self.get_sync_conn_id()
}
ret_code, msg, ticker_list = query_processor(**kargs)
if ret_code == RET_ERROR:
return ret_code, msg
col_list = [
'code', 'time', 'price', 'volume', 'turnover', "ticker_direction",
'sequence', 'type'
]
ticker_frame_table = pd.DataFrame(ticker_list, columns=col_list)
return RET_OK, ticker_frame_table
def get_cur_kline(self, code, num, ktype=SubType.K_DAY, autype=AuType.QFQ):
param_table = {'code': code, 'ktype': ktype}
for x in param_table:
param = param_table[x]
if param is None or is_str(param) is False:
error_str = ERROR_STR_PREFIX + "the type of %s param is wrong" % x
return RET_ERROR, error_str
if num is None or isinstance(num, int) is False:
error_str = ERROR_STR_PREFIX + "the type of num param is wrong"
return RET_ERROR, error_str
if autype is not None and is_str(autype) is False:
error_str = ERROR_STR_PREFIX + "the type of autype param is wrong"
return RET_ERROR, error_str
query_processor = self._get_sync_query_processor(
CurKlineQuery.pack_req,
CurKlineQuery.unpack_rsp,
)
kargs = {
"code": code,
"num": num,
"ktype": ktype,
"autype": autype,
"conn_id": self.get_sync_conn_id()
}
ret_code, msg, kline_list = query_processor(**kargs)
if ret_code == RET_ERROR:
return ret_code, msg
col_list = [
'code', 'time_key', 'open', 'close', 'high', 'low', 'volume',
'turnover', 'pe_ratio', 'turnover_rate'
]
kline_frame_table = pd.DataFrame(kline_list, columns=col_list)
return RET_OK, kline_frame_table
def get_order_book(self, code):
if code is None or is_str(code) is False:
error_str = ERROR_STR_PREFIX + "the type of code param is wrong"
return RET_ERROR, error_str
query_processor = self._get_sync_query_processor(
OrderBookQuery.pack_req,
OrderBookQuery.unpack_rsp,
)
kargs = {
"code": code,
"conn_id": self.get_sync_conn_id()
}
ret_code, msg, orderbook = query_processor(**kargs)
if ret_code == RET_ERROR:
return ret_code, msg
return RET_OK, orderbook
def get_multi_points_history_kline(self,
code_list,
dates,
fields,
ktype=KLType.K_DAY,
autype=AuType.QFQ,
no_data_mode=KLNoDataMode.FORWARD):
req_codes = unique_and_normalize_list(code_list)
if not code_list:
error_str = ERROR_STR_PREFIX + "the type of code param is wrong"
return RET_ERROR, error_str
req_dates = unique_and_normalize_list(dates)
if not dates:
error_str = ERROR_STR_PREFIX + "the type of dates param is wrong"
return RET_ERROR, error_str
req_fields = unique_and_normalize_list(fields)
if not fields:
req_fields = copy(KL_FIELD.ALL_REAL)
req_fields = KL_FIELD.normalize_field_list(req_fields)
if not req_fields:
error_str = ERROR_STR_PREFIX + "the type of fields param is wrong"
return RET_ERROR, error_str
query_processor = self._get_sync_query_processor(
MultiPointsHisKLine.pack_req, MultiPointsHisKLine.unpack_rsp)
max_req_code_num = 50
data_finish = False
list_ret = []
while not data_finish:
logger.debug('get_multi_points_history_kline - wait ... %s' % datetime.now())
kargs = {
"code_list": req_codes,
"dates": req_dates,
"fields": copy(req_fields),
"ktype": ktype,
"autype": autype,
"max_req": max_req_code_num,
"no_data_mode": int(no_data_mode),
"conn_id": self.get_sync_conn_id()
}
ret_code, msg, content = query_processor(**kargs)
if ret_code == RET_ERROR:
return ret_code, msg
list_kline, has_next = content
data_finish = (not has_next)
for dict_item in list_kline:
item_code = dict_item['code']
list_ret.append(dict_item)
if item_code in req_codes:
req_codes.remove(item_code)
if 0 == len(req_codes):
data_finish = True
col_list = ['code', 'time_point', 'data_status']
for field in req_fields:
str_field = KL_FIELD.DICT_KL_FIELD_STR[field]
if str_field not in col_list:
col_list.append(str_field)
pd_frame = pd.DataFrame(list_ret, columns=col_list)
return RET_OK, pd_frame
def get_referencestock_list(self, code, reference_type):
if code is None or is_str(code) is False:
error_str = ERROR_STR_PREFIX + "the type of code param is wrong"
return RET_ERROR, error_str
query_processor = self._get_sync_query_processor(
StockReferenceList.pack_req,
StockReferenceList.unpack_rsp,
)
kargs = {
"code": code,
'ref_type': reference_type,
"conn_id": self.get_sync_conn_id()
}
ret_code, msg, data_list = query_processor(**kargs)
if ret_code == RET_ERROR:
return ret_code, msg
col_list = [
'code', 'lot_size', 'stock_type', 'stock_name', 'list_time', 'wrt_valid', 'wrt_type', 'wrt_code'
]
pd_frame = pd.DataFrame(data_list, columns=col_list)
return RET_OK, pd_frame
def get_owner_plate(self, code_list):
if is_str(code_list):
code_list = code_list.split(',')
elif isinstance(code_list, list):
pass
else:
return RET_ERROR, "code list must be like ['HK.00001', 'HK.00700'] or 'HK.00001,HK.00700'"
code_list = unique_and_normalize_list(code_list)
for code in code_list:
if code is None or is_str(code) is False:
error_str = ERROR_STR_PREFIX + "the type of param in code_list is wrong"
return RET_ERROR, error_str
query_processor = self._get_sync_query_processor(
OwnerPlateQuery.pack_req, OwnerPlateQuery.unpack_rsp)
kargs = {
"code_list": code_list,
"conn_id": self.get_sync_conn_id()
}
ret_code, msg, owner_plate_list = query_processor(**kargs)
if ret_code == RET_ERROR:
return ret_code, msg
col_list = [
'code', 'plate_code', 'plate_name', 'plate_type'
]
owner_plate_table = pd.DataFrame(owner_plate_list, columns=col_list)
return RET_OK, owner_plate_table
def get_holding_change_list(self, code, holder_type, start=None, end=None):
holder_type = STOCK_HOLDER_CLASS_MAP[holder_type]
if code is None or is_str(code) is False:
msg = ERROR_STR_PREFIX + "the type of code param is wrong"
return RET_ERROR, msg
if holder_type < 1 or holder_type > len(STOCK_HOLDER_CLASS_MAP):
msg = ERROR_STR_PREFIX + "the type {0} is wrong, total number of types is {1}".format(holder_type, len(STOCK_HOLDER_CLASS_MAP))
return RET_ERROR, msg
ret_code, msg, start, end = normalize_start_end_date(start, end, delta_days=365)
if ret_code != RET_OK:
return ret_code, msg
query_processor = self._get_sync_query_processor(
HoldingChangeList.pack_req, HoldingChangeList.unpack_rsp)
kargs = {
"code": code,
"holder_type": holder_type,
"conn_id": self.get_sync_conn_id(),
"start_date": start,
"end_date": end
}
ret_code, msg, owner_plate_list = query_processor(**kargs)
if ret_code == RET_ERROR:
return ret_code, msg
col_list = [
'holder_name', 'holding_qty', 'holding_ratio', 'change_qty', 'change_ratio', 'time'
]
holding_change_list = pd.DataFrame(owner_plate_list, columns=col_list)
return RET_OK, holding_change_list
def get_option_chain(self, code, start=None, end=None, option_type=OptionType.ALL, option_cond_type=OptionCondType.ALL):
if code is None or is_str(code) is False:
error_str = ERROR_STR_PREFIX + "the type of code param is wrong"
return RET_ERROR, error_str
ret_code, msg, start, end = normalize_start_end_date(start, end, delta_days=29, default_time_end='00:00:00', prefer_end_now=False)
if ret_code != RET_OK:
return ret_code, msg
query_processor = self._get_sync_query_processor(
OptionChain.pack_req, OptionChain.unpack_rsp)
kargs = {
"code": code,
"conn_id": self.get_sync_conn_id(),
"start_date": start,
"end_date": end,
"option_cond_type": option_cond_type,
"option_type": option_type
}
ret_code, msg, option_chain_list = query_processor(**kargs)
if ret_code == RET_ERROR:
return ret_code, msg
col_list = [
'code', 'name', 'lot_size', 'stock_type',
'option_type', 'stock_owner', 'strike_time', 'strike_price', 'suspension',
'stock_id'
]
option_chain = pd.DataFrame(option_chain_list, columns=col_list)
option_chain.sort_values(by=["strike_time", "strike_price"], axis=0, ascending=True, inplace=True)
option_chain.index = range(len(option_chain))
return RET_OK, option_chain
def get_order_detail(self, code):
return RET_ERROR, "this service has been cancelled"
if code is None or is_str(code) is False:
error_str = ERROR_STR_PREFIX + "the type of code param is wrong"
return RET_ERROR, error_str
query_processor = self._get_sync_query_processor(
OrderDetail.pack_req, OrderDetail.unpack_rsp)
kargs = {
"code": code,
"conn_id": self.get_sync_conn_id()
}
ret_code, msg, order_detail = query_processor(**kargs)
if ret_code == RET_ERROR:
return ret_code, msg
return RET_OK, order_detail
def get_warrant(self, stock_owner='', req=None):
from futu.quote.quote_get_warrant import Request
if (req is None) or (not isinstance(req, Request)):
req = Request()
if stock_owner is not None:
req.stock_owner = stock_owner
query_processor = self._get_sync_query_processor(QuoteWarrant.pack_req, QuoteWarrant.unpack_rsp)
kargs = {
"req": req,
"conn_id": self.get_sync_conn_id()
}
ret_code, msg, content = query_processor(**kargs)
if ret_code != RET_OK:
return ret_code, msg
else:
warrant_data_list, last_page, all_count = content
col_list = ['stock', 'name', 'stock_owner', 'type', 'issuer', 'maturity_time',
'list_time', 'last_trade_time', 'recovery_price', 'conversion_ratio',
'lot_size', 'strike_price', 'last_close_price', 'cur_price', 'price_change_val', 'change_rate',
'status', 'bid_price', 'ask_price', 'bid_vol', 'ask_vol', 'volume', 'turnover', 'score',
'premium', 'break_even_point', 'leverage', 'ipop', 'price_recovery_ratio', 'conversion_price',
'street_rate', 'street_vol', 'amplitude', 'issue_size', 'high_price', 'low_price',
'implied_volatility', 'delta', 'effective_leverage', 'list_timestamp', 'last_trade_timestamp',
'maturity_timestamp']
warrant_data_frame = pd.DataFrame(warrant_data_list, columns=col_list)
return ret_code, (warrant_data_frame, last_page, all_count)
| true | true |
1c4a1518d68c3a0ac4df0c03d4e9484fa9bf6c93 | 4,468 | py | Python | autocomplete_light/tests/autocomplete/generic.py | andybak/django-autocomplete-light | 19e46261a01a578d73bfae02bf772bc4d81984f9 | [
"MIT"
] | null | null | null | autocomplete_light/tests/autocomplete/generic.py | andybak/django-autocomplete-light | 19e46261a01a578d73bfae02bf772bc4d81984f9 | [
"MIT"
] | null | null | null | autocomplete_light/tests/autocomplete/generic.py | andybak/django-autocomplete-light | 19e46261a01a578d73bfae02bf772bc4d81984f9 | [
"MIT"
] | null | null | null | from __future__ import unicode_literals
from .case import *
from django.contrib.contenttypes.models import ContentType
from django.contrib.auth.models import Permission
from ...example_apps.autocomplete_test_case_app.models import User, Group
class AutocompleteGenericMock(autocomplete_light.AutocompleteGenericBase):
choices = (
User.objects.filter(pk__lt=10),
Group.objects.filter(pk__lt=10),
)
search_fields = (
('username', 'email'),
('name',),
)
limit_choices = 3
class FormMock(forms.Form):
x = autocomplete_light.GenericModelChoiceField(
widget=autocomplete_light.ChoiceWidget(
autocomplete=AutocompleteGenericMock))
class AutocompleteGenericTestCase(AutocompleteTestCase):
autocomplete_mock = AutocompleteGenericMock
def assert_choices_equal(self, result, test):
self.assertEqual(list(result), test['expected'])
def get_choices_for_values_tests(self):
return (
{
'fixture': [
'%s-%s' % (self.user_ctype.pk, self.james.pk),
'%s-%s' % (self.group_ctype.pk, self.bluesmen.pk),
],
'expected': [
self.james,
self.bluesmen,
]
},
{
'fixture': [
'%s-%s' % (self.user_ctype.pk, self.james.pk),
'%s-%s' % (self.user_ctype.pk, self.elton.pk),
'%s-%s' % (self.group_ctype.pk, self.bluesmen.pk),
'%s-%s' % (self.group_ctype.pk, self.emos.pk),
],
'expected': [
self.james,
self.bluesmen,
],
'name': 'should ignore values that are not in the querysets',
},
)
def get_choices_for_request_tests(self):
return (
{
'fixture': make_get_request('j'),
'expected': [
self.abe,
self.rockers,
self.bluesmen,
],
},
{
'fixture': make_get_request('q=elton'),
'expected': [],
'name': 'should not propose models that are not in the qs',
},
)
def get_validate_tests(self):
return (
{
'fixture': [
'%s-%s' % (self.user_ctype.pk, self.james.pk),
'%s-%s' % (self.group_ctype.pk, self.bluesmen.pk),
'%s-%s' % (self.group_ctype.pk, self.emos.pk),
],
'expected': False,
},
{
'fixture': [
'%s-%s' % (self.user_ctype.pk, self.james.pk),
'%s-%s' % (self.group_ctype.pk, self.bluesmen.pk),
],
'expected': True,
},
{
'fixture': [],
'expected': True,
},
{
'fixture': ['bla'],
'expected': False,
},
{
'fixture': ['123123-123123'],
'expected': False,
},
)
def get_autocomplete_html_tests(self):
return []
def get_widget_tests(self):
return (
{
'form_class': FormMock,
'fixture': 'x=%s-%s' % (
self.group_ctype.pk, self.bluesmen.pk),
'expected_valid': True,
'expected_data': self.bluesmen,
},
{
'form_class': FormMock,
'fixture': 'x=%s-%s' % (
self.group_ctype.pk, self.emos.pk),
'expected_valid': False,
},
{
'form_class': FormMock,
'fixture': 'x=12343-2',
'expected_valid': False,
},
{
'form_class': FormMock,
'fixture': 'x=%s-2' % ContentType.objects.get_for_model(
Permission).pk,
'expected_valid': False,
},
)
def test_default_search_fields(self):
class MyGeneric(autocomplete_light.AutocompleteGenericBase):
choices = [Group.objects.all()]
self.assertEqual(MyGeneric.search_fields, [('name',)])
| 30.60274 | 77 | 0.456356 | from __future__ import unicode_literals
from .case import *
from django.contrib.contenttypes.models import ContentType
from django.contrib.auth.models import Permission
from ...example_apps.autocomplete_test_case_app.models import User, Group
class AutocompleteGenericMock(autocomplete_light.AutocompleteGenericBase):
choices = (
User.objects.filter(pk__lt=10),
Group.objects.filter(pk__lt=10),
)
search_fields = (
('username', 'email'),
('name',),
)
limit_choices = 3
class FormMock(forms.Form):
x = autocomplete_light.GenericModelChoiceField(
widget=autocomplete_light.ChoiceWidget(
autocomplete=AutocompleteGenericMock))
class AutocompleteGenericTestCase(AutocompleteTestCase):
autocomplete_mock = AutocompleteGenericMock
def assert_choices_equal(self, result, test):
self.assertEqual(list(result), test['expected'])
def get_choices_for_values_tests(self):
return (
{
'fixture': [
'%s-%s' % (self.user_ctype.pk, self.james.pk),
'%s-%s' % (self.group_ctype.pk, self.bluesmen.pk),
],
'expected': [
self.james,
self.bluesmen,
]
},
{
'fixture': [
'%s-%s' % (self.user_ctype.pk, self.james.pk),
'%s-%s' % (self.user_ctype.pk, self.elton.pk),
'%s-%s' % (self.group_ctype.pk, self.bluesmen.pk),
'%s-%s' % (self.group_ctype.pk, self.emos.pk),
],
'expected': [
self.james,
self.bluesmen,
],
'name': 'should ignore values that are not in the querysets',
},
)
def get_choices_for_request_tests(self):
return (
{
'fixture': make_get_request('j'),
'expected': [
self.abe,
self.rockers,
self.bluesmen,
],
},
{
'fixture': make_get_request('q=elton'),
'expected': [],
'name': 'should not propose models that are not in the qs',
},
)
def get_validate_tests(self):
return (
{
'fixture': [
'%s-%s' % (self.user_ctype.pk, self.james.pk),
'%s-%s' % (self.group_ctype.pk, self.bluesmen.pk),
'%s-%s' % (self.group_ctype.pk, self.emos.pk),
],
'expected': False,
},
{
'fixture': [
'%s-%s' % (self.user_ctype.pk, self.james.pk),
'%s-%s' % (self.group_ctype.pk, self.bluesmen.pk),
],
'expected': True,
},
{
'fixture': [],
'expected': True,
},
{
'fixture': ['bla'],
'expected': False,
},
{
'fixture': ['123123-123123'],
'expected': False,
},
)
def get_autocomplete_html_tests(self):
return []
def get_widget_tests(self):
return (
{
'form_class': FormMock,
'fixture': 'x=%s-%s' % (
self.group_ctype.pk, self.bluesmen.pk),
'expected_valid': True,
'expected_data': self.bluesmen,
},
{
'form_class': FormMock,
'fixture': 'x=%s-%s' % (
self.group_ctype.pk, self.emos.pk),
'expected_valid': False,
},
{
'form_class': FormMock,
'fixture': 'x=12343-2',
'expected_valid': False,
},
{
'form_class': FormMock,
'fixture': 'x=%s-2' % ContentType.objects.get_for_model(
Permission).pk,
'expected_valid': False,
},
)
def test_default_search_fields(self):
class MyGeneric(autocomplete_light.AutocompleteGenericBase):
choices = [Group.objects.all()]
self.assertEqual(MyGeneric.search_fields, [('name',)])
| true | true |
1c4a1587700058c4fc116b57e305bc04604cf101 | 168 | py | Python | cemm/exceptions.py | klaasnicolaas/python-cemm | fa1d9787bdf4d41e1850015e4d9df833d0b97b07 | [
"MIT"
] | 1 | 2022-02-20T17:26:02.000Z | 2022-02-20T17:26:02.000Z | cemm/exceptions.py | klaasnicolaas/python-cemm | fa1d9787bdf4d41e1850015e4d9df833d0b97b07 | [
"MIT"
] | 109 | 2021-10-02T02:55:42.000Z | 2022-03-30T04:32:25.000Z | cemm/exceptions.py | klaasnicolaas/python-cemm | fa1d9787bdf4d41e1850015e4d9df833d0b97b07 | [
"MIT"
] | null | null | null | """Exceptions for CEMM."""
class CEMMError(Exception):
"""General CEMM exception."""
class CEMMConnectionError(CEMMError):
"""CEMM connection exception."""
| 16.8 | 37 | 0.690476 |
class CEMMError(Exception):
class CEMMConnectionError(CEMMError):
| true | true |
1c4a15ad041bcfb763454dea5efe21d533781450 | 2,980 | py | Python | tests/unit/test_common.py | HemangChothani/google-resumable-media-python | 7dc40de34533e4474240fc831b79cee2baa82c6e | [
"Apache-2.0"
] | 1 | 2019-07-30T14:24:08.000Z | 2019-07-30T14:24:08.000Z | tests/unit/test_common.py | HemangChothani/google-resumable-media-python | 7dc40de34533e4474240fc831b79cee2baa82c6e | [
"Apache-2.0"
] | 3 | 2019-07-07T17:55:56.000Z | 2019-08-05T01:13:27.000Z | tests/unit/test_common.py | HemangChothani/google-resumable-media-python | 7dc40de34533e4474240fc831b79cee2baa82c6e | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
import pytest
from google.resumable_media import common
class TestInvalidResponse(object):
def test_constructor(self):
response = mock.sentinel.response
error = common.InvalidResponse(
response, 1, u'a', [b'm'], True)
assert error.response is response
assert error.args == (1, u'a', [b'm'], True)
class TestRetryStrategy(object):
def test_constructor_defaults(self):
retry_strategy = common.RetryStrategy()
assert retry_strategy.max_sleep == common.MAX_SLEEP
assert (
retry_strategy.max_cumulative_retry == common.MAX_CUMULATIVE_RETRY)
assert retry_strategy.max_retries is None
def test_constructor_failure(self):
with pytest.raises(ValueError) as exc_info:
common.RetryStrategy(max_cumulative_retry=600.0, max_retries=12)
exc_info.match(common._SLEEP_RETRY_ERROR_MSG)
def test_constructor_explicit_bound_cumulative(self):
max_sleep = 10.0
max_cumulative_retry = 100.0
retry_strategy = common.RetryStrategy(
max_sleep=max_sleep, max_cumulative_retry=max_cumulative_retry)
assert retry_strategy.max_sleep == max_sleep
assert retry_strategy.max_cumulative_retry == max_cumulative_retry
assert retry_strategy.max_retries is None
def test_constructor_explicit_bound_retries(self):
max_sleep = 13.75
max_retries = 14
retry_strategy = common.RetryStrategy(
max_sleep=max_sleep, max_retries=max_retries)
assert retry_strategy.max_sleep == max_sleep
assert retry_strategy.max_cumulative_retry is None
assert retry_strategy.max_retries == max_retries
def test_retry_allowed_bound_cumulative(self):
retry_strategy = common.RetryStrategy(max_cumulative_retry=100.0)
assert retry_strategy.retry_allowed(50.0, 10)
assert retry_strategy.retry_allowed(99.0, 7)
assert retry_strategy.retry_allowed(100.0, 4)
assert not retry_strategy.retry_allowed(101.0, 11)
assert not retry_strategy.retry_allowed(200.0, 6)
def test_retry_allowed_bound_retries(self):
retry_strategy = common.RetryStrategy(max_retries=6)
assert retry_strategy.retry_allowed(1000.0, 5)
assert retry_strategy.retry_allowed(99.0, 6)
assert not retry_strategy.retry_allowed(625.5, 7)
| 37.25 | 79 | 0.72349 |
import mock
import pytest
from google.resumable_media import common
class TestInvalidResponse(object):
def test_constructor(self):
response = mock.sentinel.response
error = common.InvalidResponse(
response, 1, u'a', [b'm'], True)
assert error.response is response
assert error.args == (1, u'a', [b'm'], True)
class TestRetryStrategy(object):
def test_constructor_defaults(self):
retry_strategy = common.RetryStrategy()
assert retry_strategy.max_sleep == common.MAX_SLEEP
assert (
retry_strategy.max_cumulative_retry == common.MAX_CUMULATIVE_RETRY)
assert retry_strategy.max_retries is None
def test_constructor_failure(self):
with pytest.raises(ValueError) as exc_info:
common.RetryStrategy(max_cumulative_retry=600.0, max_retries=12)
exc_info.match(common._SLEEP_RETRY_ERROR_MSG)
def test_constructor_explicit_bound_cumulative(self):
max_sleep = 10.0
max_cumulative_retry = 100.0
retry_strategy = common.RetryStrategy(
max_sleep=max_sleep, max_cumulative_retry=max_cumulative_retry)
assert retry_strategy.max_sleep == max_sleep
assert retry_strategy.max_cumulative_retry == max_cumulative_retry
assert retry_strategy.max_retries is None
def test_constructor_explicit_bound_retries(self):
max_sleep = 13.75
max_retries = 14
retry_strategy = common.RetryStrategy(
max_sleep=max_sleep, max_retries=max_retries)
assert retry_strategy.max_sleep == max_sleep
assert retry_strategy.max_cumulative_retry is None
assert retry_strategy.max_retries == max_retries
def test_retry_allowed_bound_cumulative(self):
retry_strategy = common.RetryStrategy(max_cumulative_retry=100.0)
assert retry_strategy.retry_allowed(50.0, 10)
assert retry_strategy.retry_allowed(99.0, 7)
assert retry_strategy.retry_allowed(100.0, 4)
assert not retry_strategy.retry_allowed(101.0, 11)
assert not retry_strategy.retry_allowed(200.0, 6)
def test_retry_allowed_bound_retries(self):
retry_strategy = common.RetryStrategy(max_retries=6)
assert retry_strategy.retry_allowed(1000.0, 5)
assert retry_strategy.retry_allowed(99.0, 6)
assert not retry_strategy.retry_allowed(625.5, 7)
| true | true |
1c4a160721a6fe7cb7d9b89d16df5a107e587737 | 1,182 | py | Python | ckanext/harvest/logic/auth/__init__.py | alphagov-mirror/ckanext-harvest | be4d134cf2e4d4548c67dc2f61b200948f0f74e0 | [
"PostgreSQL"
] | 86 | 2015-01-09T19:21:20.000Z | 2022-03-23T07:17:27.000Z | ckanext/harvest/logic/auth/__init__.py | alphagov-mirror/ckanext-harvest | be4d134cf2e4d4548c67dc2f61b200948f0f74e0 | [
"PostgreSQL"
] | 319 | 2015-01-13T13:40:08.000Z | 2022-03-24T12:13:42.000Z | ckanext/harvest/logic/auth/__init__.py | alphagov-mirror/ckanext-harvest | be4d134cf2e4d4548c67dc2f61b200948f0f74e0 | [
"PostgreSQL"
] | 154 | 2015-01-13T21:06:03.000Z | 2022-03-15T12:10:57.000Z | from ckan.plugins import toolkit as pt
from ckanext.harvest import model as harvest_model
def user_is_sysadmin(context):
'''
Checks if the user defined in the context is a sysadmin
rtype: boolean
'''
model = context['model']
user = context['user']
user_obj = model.User.get(user)
if not user_obj:
raise pt.Objectpt.ObjectNotFound('User {0} not found').format(user)
return user_obj.sysadmin
def _get_object(context, data_dict, name, class_name):
'''
return the named item if in the data_dict, or get it from
model.class_name
'''
if name not in context:
id = data_dict.get('id', None)
obj = getattr(harvest_model, class_name).get(id)
if not obj:
raise pt.ObjectNotFound
else:
obj = context[name]
return obj
def get_source_object(context, data_dict={}):
return _get_object(context, data_dict, 'source', 'HarvestSource')
def get_job_object(context, data_dict={}):
return _get_object(context, data_dict, 'job', 'HarvestJob')
def get_obj_object(context, data_dict={}):
return _get_object(context, data_dict, 'obj', 'HarvestObject')
| 26.266667 | 75 | 0.665821 | from ckan.plugins import toolkit as pt
from ckanext.harvest import model as harvest_model
def user_is_sysadmin(context):
model = context['model']
user = context['user']
user_obj = model.User.get(user)
if not user_obj:
raise pt.Objectpt.ObjectNotFound('User {0} not found').format(user)
return user_obj.sysadmin
def _get_object(context, data_dict, name, class_name):
if name not in context:
id = data_dict.get('id', None)
obj = getattr(harvest_model, class_name).get(id)
if not obj:
raise pt.ObjectNotFound
else:
obj = context[name]
return obj
def get_source_object(context, data_dict={}):
return _get_object(context, data_dict, 'source', 'HarvestSource')
def get_job_object(context, data_dict={}):
return _get_object(context, data_dict, 'job', 'HarvestJob')
def get_obj_object(context, data_dict={}):
return _get_object(context, data_dict, 'obj', 'HarvestObject')
| true | true |
1c4a16408d89c66b6f8bf1f9a92e53a8e91f5fe6 | 8,378 | py | Python | fabric/fabfile.py | jim8786453/kiln_share | 2d70c8863f7db18069d13cdea319cd113a2d0bbb | [
"BSD-3-Clause"
] | 1 | 2018-03-21T12:27:56.000Z | 2018-03-21T12:27:56.000Z | fabric/fabfile.py | jim8786453/kiln_share | 2d70c8863f7db18069d13cdea319cd113a2d0bbb | [
"BSD-3-Clause"
] | null | null | null | fabric/fabfile.py | jim8786453/kiln_share | 2d70c8863f7db18069d13cdea319cd113a2d0bbb | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
import os
from fabric.api import run, env, settings, cd, task, put, execute
from fabric.contrib.files import exists, upload_template
from fabric.operations import _prefix_commands, _prefix_env_vars, require, sudo, local as local_
env.use_ssh_config = True
LOCAL_HOST = os.environ.get('LOCAL_HOST')
LOCAL_USER = os.environ.get('LOCAL_USER')
LOCAL_PASSWORD = os.environ.get('LOCAL_PASSWORD')
LOCAL_SERVER_NAME = os.environ.get('LOCAL_SERVER_NAME')
LOCAL_JWT_SECRET = os.environ.get('LOCAL_JWT_SECRET')
WWW_HOST = os.environ.get('WWW_HOST')
WWW_USER = os.environ.get('WWW_USER')
WWW_PASSWORD = os.environ.get('WWW_PASSWORD')
WWW_SERVER_NAME = os.environ.get('WWW_SERVER_NAME')
WWW_JWT_SECRET = os.environ.get('WWW_JWT_SECRET')
STAGES = {
'local': {
'hosts': [LOCAL_HOST],
'user': LOCAL_USER,
'pasword': LOCAL_PASSWORD,
'server_name': LOCAL_SERVER_NAME,
'jwt_secret': LOCAL_JWT_SECRET
},
'www': {
'hosts': [WWW_HOST],
'user': WWW_USER,
'server_name': WWW_SERVER_NAME,
'jwt_secret': WWW_JWT_SECRET
}
}
def stage_set(stage_name='local'):
"""Utility function to set an environment up for Fabric.
"""
env.stage = stage_name
for option, value in STAGES[env.stage].items():
setattr(env, option, value)
def stage_require():
"""Ensure a valid stage is enabled.
"""
require('stage', provided_by=(
local,
www))
@task
def local():
"""Use the local environment.
"""
stage_set('local')
@task
def www():
"""Use the live environment.
"""
stage_set('www')
@task
def check_sudo():
"""Run a command that uses sudo.
"""
stage_require()
sudo("date")
@task
def install():
"""Install all kilnshare.co.uk webserver components.
"""
stage_require()
remove()
setup_dirs()
install_openresty()
install_lua()
install_nginxjwt()
install_modules()
install_swaggerui()
install_mithril()
configure_firewall()
configure_certs()
configure_openresty()
restart_server()
@task
def remove():
stage_require()
sudo('rm -Rf /home/%s/deploy' % env.user)
sudo('rm -Rf /usr/local/openresty/')
@task
def setup_dirs():
stage_require()
run('mkdir -p /home/%s/deploy' % env.user)
run('mkdir -p /home/%s/deploy/api' % env.user)
run('mkdir -p /home/%s/deploy/bin' % env.user)
run('mkdir -p /home/%s/deploy/config' % env.user)
run('mkdir -p /home/%s/deploy/downloads' % env.user)
run('mkdir -p /home/%s/deploy/scripts' % env.user)
run('mkdir -p /home/%s/deploy/www' % env.user)
run('mkdir -p /home/%s/deploy/swagger-ui' % env.user)
@task
def install_openresty():
"""
"""
stage_require()
put('../webserver/scripts/install_openresty.sh',
'/home/%s/deploy/scripts/install_openresty.sh' % env.user,
mode=0755)
sudo('/home/%s/deploy/scripts/install_openresty.sh' % env.user)
@task
def install_lua():
"""Install and configure Lua and dependencies.
"""
stage_require()
template_dir = os.path.join(os.path.dirname(__file__), '../webserver')
upload_template(
'scripts/install_lua.sh',
'/home/%s/deploy/scripts/install_lua.sh' % env.user,
context=env,
use_jinja=True,
mode=0755,
backup=False,
template_dir=template_dir)
sudo('/home/%s/deploy/scripts/install_lua.sh' % env.user)
@task
def install_nginxjwt():
"""Install and configure Lua and dependencies.
"""
stage_require()
template_dir = os.path.join(os.path.dirname(__file__), '../webserver')
upload_template(
'scripts/install_nginxjwt.sh',
'/home/%s/deploy/scripts/install_nginxjwt.sh' % env.user,
context=env,
use_jinja=True,
mode=0755,
backup=False,
template_dir=template_dir)
sudo('/home/%s/deploy/scripts/install_nginxjwt.sh' % env.user)
@task
def install_modules():
"""
"""
stage_require()
put('../webserver/modules/kiln_share.lua', '/home/%s/deploy/bin/kiln_share.lua'
% env.user, use_sudo=True)
@task
def install_swaggerui():
"""
"""
stage_require()
put('../swagger-ui', '/home/%s/deploy/'
% env.user, use_sudo=True)
@task
def configure_firewall():
"""Configure Ubuntu firewall.
"""
stage_require()
sudo('ufw allow http')
sudo('ufw allow https')
@task
def configure_certs():
"""Create SSL certificates.
- Uses Letsencypt for all non local environments.
"""
stage_require()
if not env.stage == 'local':
template_dir = os.path.join(os.path.dirname(__file__), '../webserver')
upload_template(
'templates/letsencrypt.sh',
'/home/%s/deploy/scripts/letsencrypt.sh' % env.user,
context=env,
use_jinja=True,
mode=0755,
backup=False,
template_dir = template_dir)
sudo('/home/%s/deploy/scripts/letsencrypt.sh' % env.user)
return
# local.kilnshare.co.uk does not have a DNS entry so we can't use
# Letsencrypt. Self sign instead.
sudo('mkdir -p /etc/letsencrypt/live/local.kilnshare.co.uk/')
sudo('cp /etc/ssl/certs/ssl-cert-snakeoil.pem /etc/letsencrypt/live/local.kilnshare.co.uk/fullchain.pem')
sudo('cp /etc/ssl/private/ssl-cert-snakeoil.key /etc/letsencrypt/live/local.kilnshare.co.uk/privkey.pem')
sudo('openssl dhparam -out ~/deploy/dhparams.pem 2048')
@task
def configure_openresty():
"""Upload Openresty configuration files.
- Make logging directories
- Configure systemctl
"""
stage_require()
sudo('mkdir -p /var/log/openresty')
sudo('mkdir -p /usr/local/openresty/nginx/sites')
template_dir = os.path.join(os.path.dirname(__file__), '../webserver')
upload_template(
'templates/openresty.service',
'/etc/systemd/system/openresty.service',
context=env,
use_jinja=True,
use_sudo=True,
backup=False,
template_dir=template_dir)
upload_template(
'templates/nginx.conf',
'/usr/local/openresty/nginx/conf/nginx.conf',
context=env,
use_jinja=True,
use_sudo=True,
backup=False,
template_dir=template_dir)
upload_template(
'templates/default.conf',
'/usr/local/openresty/nginx/sites/default.conf',
context=env,
use_jinja=True,
use_sudo=True,
backup=False,
template_dir=template_dir)
sudo('sudo systemctl daemon-reload')
sudo('sudo systemctl enable openresty')
@task
def start_server():
"""Start Openresty webserver.
"""
stage_require()
sudo('systemctl start openresty')
@task
def stop_server():
"""Stop Openresty webserver.
"""
stage_require()
sudo('systemctl stop openresty')
@task
def restart_server():
"""Restart Openresty webserver.
"""
stage_require()
sudo('systemctl restart openresty')
@task
def start_mongo():
"""Start Mongodb
"""
stage_require()
sudo('service mongod restart')
@task
def restart_mongo():
"""Restart Mongodb
"""
stage_require()
sudo('service mongod restart')
@task
def install_api():
"""Create a new virtualenv and install the Eve app.
"""
sudo('pkill -9 gunicorn')
local_('cd ../api && python setup.py sdist --formats=gztar', capture=False)
dist = local_('cd ../api && python setup.py --fullname', capture=True).strip()
filename = '%s.tar.gz' % dist
put('../api/dist/%s' % filename, '/tmp/%s' % filename)
sudo('virtualenv /home/%s/deploy/api' % env.user)
sudo('/home/%s/deploy/api/bin/pip install gunicorn' % (env.user))
sudo('/home/%s/deploy/api/bin/pip install /tmp/%s' % (env.user, filename))
@task
def start_api():
"""Run the Python api using Gunicorn.
"""
sudo('. /home/%s/deploy/api/bin/activate && gunicorn --daemon -b 0.0.0.0:8080 kiln_share:app' % (env.user))
@task
def stop_api():
"""Run the Python api using Gunicorn.
"""
sudo('pkill -9 gunicorn')
@task
def install_gui():
"""Build using node and copy the result.
"""
stage_require()
local_('cd ../gui && npm run build')
put('../gui/dist/*', '/home/%s/deploy/www/'
% env.user, use_sudo=True)
| 23.337047 | 111 | 0.630461 | import os
from fabric.api import run, env, settings, cd, task, put, execute
from fabric.contrib.files import exists, upload_template
from fabric.operations import _prefix_commands, _prefix_env_vars, require, sudo, local as local_
env.use_ssh_config = True
LOCAL_HOST = os.environ.get('LOCAL_HOST')
LOCAL_USER = os.environ.get('LOCAL_USER')
LOCAL_PASSWORD = os.environ.get('LOCAL_PASSWORD')
LOCAL_SERVER_NAME = os.environ.get('LOCAL_SERVER_NAME')
LOCAL_JWT_SECRET = os.environ.get('LOCAL_JWT_SECRET')
WWW_HOST = os.environ.get('WWW_HOST')
WWW_USER = os.environ.get('WWW_USER')
WWW_PASSWORD = os.environ.get('WWW_PASSWORD')
WWW_SERVER_NAME = os.environ.get('WWW_SERVER_NAME')
WWW_JWT_SECRET = os.environ.get('WWW_JWT_SECRET')
STAGES = {
'local': {
'hosts': [LOCAL_HOST],
'user': LOCAL_USER,
'pasword': LOCAL_PASSWORD,
'server_name': LOCAL_SERVER_NAME,
'jwt_secret': LOCAL_JWT_SECRET
},
'www': {
'hosts': [WWW_HOST],
'user': WWW_USER,
'server_name': WWW_SERVER_NAME,
'jwt_secret': WWW_JWT_SECRET
}
}
def stage_set(stage_name='local'):
"""Utility function to set an environment up for Fabric.
"""
env.stage = stage_name
for option, value in STAGES[env.stage].items():
setattr(env, option, value)
def stage_require():
"""Ensure a valid stage is enabled.
"""
require('stage', provided_by=(
local,
www))
@task
def local():
"""Use the local environment.
"""
stage_set('local')
@task
def www():
"""Use the live environment.
"""
stage_set('www')
@task
def check_sudo():
"""Run a command that uses sudo.
"""
stage_require()
sudo("date")
@task
def install():
"""Install all kilnshare.co.uk webserver components.
"""
stage_require()
remove()
setup_dirs()
install_openresty()
install_lua()
install_nginxjwt()
install_modules()
install_swaggerui()
install_mithril()
configure_firewall()
configure_certs()
configure_openresty()
restart_server()
@task
def remove():
stage_require()
sudo('rm -Rf /home/%s/deploy' % env.user)
sudo('rm -Rf /usr/local/openresty/')
@task
def setup_dirs():
stage_require()
run('mkdir -p /home/%s/deploy' % env.user)
run('mkdir -p /home/%s/deploy/api' % env.user)
run('mkdir -p /home/%s/deploy/bin' % env.user)
run('mkdir -p /home/%s/deploy/config' % env.user)
run('mkdir -p /home/%s/deploy/downloads' % env.user)
run('mkdir -p /home/%s/deploy/scripts' % env.user)
run('mkdir -p /home/%s/deploy/www' % env.user)
run('mkdir -p /home/%s/deploy/swagger-ui' % env.user)
@task
def install_openresty():
"""
"""
stage_require()
put('../webserver/scripts/install_openresty.sh',
'/home/%s/deploy/scripts/install_openresty.sh' % env.user,
mode=0755)
sudo('/home/%s/deploy/scripts/install_openresty.sh' % env.user)
@task
def install_lua():
"""Install and configure Lua and dependencies.
"""
stage_require()
template_dir = os.path.join(os.path.dirname(__file__), '../webserver')
upload_template(
'scripts/install_lua.sh',
'/home/%s/deploy/scripts/install_lua.sh' % env.user,
context=env,
use_jinja=True,
mode=0755,
backup=False,
template_dir=template_dir)
sudo('/home/%s/deploy/scripts/install_lua.sh' % env.user)
@task
def install_nginxjwt():
"""Install and configure Lua and dependencies.
"""
stage_require()
template_dir = os.path.join(os.path.dirname(__file__), '../webserver')
upload_template(
'scripts/install_nginxjwt.sh',
'/home/%s/deploy/scripts/install_nginxjwt.sh' % env.user,
context=env,
use_jinja=True,
mode=0755,
backup=False,
template_dir=template_dir)
sudo('/home/%s/deploy/scripts/install_nginxjwt.sh' % env.user)
@task
def install_modules():
"""
"""
stage_require()
put('../webserver/modules/kiln_share.lua', '/home/%s/deploy/bin/kiln_share.lua'
% env.user, use_sudo=True)
@task
def install_swaggerui():
"""
"""
stage_require()
put('../swagger-ui', '/home/%s/deploy/'
% env.user, use_sudo=True)
@task
def configure_firewall():
"""Configure Ubuntu firewall.
"""
stage_require()
sudo('ufw allow http')
sudo('ufw allow https')
@task
def configure_certs():
"""Create SSL certificates.
- Uses Letsencypt for all non local environments.
"""
stage_require()
if not env.stage == 'local':
template_dir = os.path.join(os.path.dirname(__file__), '../webserver')
upload_template(
'templates/letsencrypt.sh',
'/home/%s/deploy/scripts/letsencrypt.sh' % env.user,
context=env,
use_jinja=True,
mode=0755,
backup=False,
template_dir = template_dir)
sudo('/home/%s/deploy/scripts/letsencrypt.sh' % env.user)
return
# Letsencrypt. Self sign instead.
sudo('mkdir -p /etc/letsencrypt/live/local.kilnshare.co.uk/')
sudo('cp /etc/ssl/certs/ssl-cert-snakeoil.pem /etc/letsencrypt/live/local.kilnshare.co.uk/fullchain.pem')
sudo('cp /etc/ssl/private/ssl-cert-snakeoil.key /etc/letsencrypt/live/local.kilnshare.co.uk/privkey.pem')
sudo('openssl dhparam -out ~/deploy/dhparams.pem 2048')
@task
def configure_openresty():
"""Upload Openresty configuration files.
- Make logging directories
- Configure systemctl
"""
stage_require()
sudo('mkdir -p /var/log/openresty')
sudo('mkdir -p /usr/local/openresty/nginx/sites')
template_dir = os.path.join(os.path.dirname(__file__), '../webserver')
upload_template(
'templates/openresty.service',
'/etc/systemd/system/openresty.service',
context=env,
use_jinja=True,
use_sudo=True,
backup=False,
template_dir=template_dir)
upload_template(
'templates/nginx.conf',
'/usr/local/openresty/nginx/conf/nginx.conf',
context=env,
use_jinja=True,
use_sudo=True,
backup=False,
template_dir=template_dir)
upload_template(
'templates/default.conf',
'/usr/local/openresty/nginx/sites/default.conf',
context=env,
use_jinja=True,
use_sudo=True,
backup=False,
template_dir=template_dir)
sudo('sudo systemctl daemon-reload')
sudo('sudo systemctl enable openresty')
@task
def start_server():
"""Start Openresty webserver.
"""
stage_require()
sudo('systemctl start openresty')
@task
def stop_server():
"""Stop Openresty webserver.
"""
stage_require()
sudo('systemctl stop openresty')
@task
def restart_server():
"""Restart Openresty webserver.
"""
stage_require()
sudo('systemctl restart openresty')
@task
def start_mongo():
"""Start Mongodb
"""
stage_require()
sudo('service mongod restart')
@task
def restart_mongo():
"""Restart Mongodb
"""
stage_require()
sudo('service mongod restart')
@task
def install_api():
"""Create a new virtualenv and install the Eve app.
"""
sudo('pkill -9 gunicorn')
local_('cd ../api && python setup.py sdist --formats=gztar', capture=False)
dist = local_('cd ../api && python setup.py --fullname', capture=True).strip()
filename = '%s.tar.gz' % dist
put('../api/dist/%s' % filename, '/tmp/%s' % filename)
sudo('virtualenv /home/%s/deploy/api' % env.user)
sudo('/home/%s/deploy/api/bin/pip install gunicorn' % (env.user))
sudo('/home/%s/deploy/api/bin/pip install /tmp/%s' % (env.user, filename))
@task
def start_api():
"""Run the Python api using Gunicorn.
"""
sudo('. /home/%s/deploy/api/bin/activate && gunicorn --daemon -b 0.0.0.0:8080 kiln_share:app' % (env.user))
@task
def stop_api():
"""Run the Python api using Gunicorn.
"""
sudo('pkill -9 gunicorn')
@task
def install_gui():
"""Build using node and copy the result.
"""
stage_require()
local_('cd ../gui && npm run build')
put('../gui/dist/*', '/home/%s/deploy/www/'
% env.user, use_sudo=True)
| false | true |
1c4a165fe953944a991af628f8e92d4c59b8d672 | 1,549 | py | Python | ml3d/torch/utils/roipool3d/roipool3d_utils.py | krshrimali/Open3D-ML | e6352ee84d38a4b90c71dd7f376f5570fe849537 | [
"MIT"
] | 447 | 2020-10-14T23:16:41.000Z | 2021-07-27T06:57:45.000Z | ml3d/torch/utils/roipool3d/roipool3d_utils.py | krshrimali/Open3D-ML | e6352ee84d38a4b90c71dd7f376f5570fe849537 | [
"MIT"
] | 179 | 2021-07-27T15:32:33.000Z | 2022-03-30T14:32:53.000Z | ml3d/torch/utils/roipool3d/roipool3d_utils.py | krshrimali/Open3D-ML | e6352ee84d38a4b90c71dd7f376f5570fe849537 | [
"MIT"
] | 92 | 2021-07-28T13:50:52.000Z | 2022-03-30T09:24:33.000Z | import torch
import open3d
if open3d.core.cuda.device_count() > 0:
from open3d.ml.torch.ops import roi_pool
import numpy as np
def enlarge_box3d(boxes3d, extra_width):
"""Enlarge 3D box.
Args:
boxes3d: (N, 7) [x, y, z, h, w, l, ry]
extra_width: extra width
"""
if isinstance(boxes3d, np.ndarray):
large_boxes3d = boxes3d.copy()
else:
large_boxes3d = boxes3d.clone()
large_boxes3d[:, 3:6] += extra_width * 2
large_boxes3d[:, 1] += extra_width
return large_boxes3d
def roipool3d_gpu(pts,
pts_feature,
boxes3d,
pool_extra_width,
sampled_pt_num=512):
"""Roipool3D GPU.
Args:
pts: (B, N, 3)
pts_feature: (B, N, C)
boxes3d: (B, M, 7)
pool_extra_width: float
sampled_pt_num: int
Returns:
pooled_features: (B, M, 512, 3 + C)
pooled_empty_flag: (B, M)
"""
if not open3d.core.cuda.device_count() > 0:
raise NotImplementedError
batch_size = pts.shape[0]
pooled_boxes3d = enlarge_box3d(boxes3d.view(-1, 7),
pool_extra_width).view(batch_size, -1, 7)
pooled_features, pooled_empty_flag = roi_pool(pts.contiguous(),
pooled_boxes3d.contiguous(),
pts_feature.contiguous(),
sampled_pt_num)
return pooled_features, pooled_empty_flag
| 28.163636 | 78 | 0.542285 | import torch
import open3d
if open3d.core.cuda.device_count() > 0:
from open3d.ml.torch.ops import roi_pool
import numpy as np
def enlarge_box3d(boxes3d, extra_width):
if isinstance(boxes3d, np.ndarray):
large_boxes3d = boxes3d.copy()
else:
large_boxes3d = boxes3d.clone()
large_boxes3d[:, 3:6] += extra_width * 2
large_boxes3d[:, 1] += extra_width
return large_boxes3d
def roipool3d_gpu(pts,
pts_feature,
boxes3d,
pool_extra_width,
sampled_pt_num=512):
if not open3d.core.cuda.device_count() > 0:
raise NotImplementedError
batch_size = pts.shape[0]
pooled_boxes3d = enlarge_box3d(boxes3d.view(-1, 7),
pool_extra_width).view(batch_size, -1, 7)
pooled_features, pooled_empty_flag = roi_pool(pts.contiguous(),
pooled_boxes3d.contiguous(),
pts_feature.contiguous(),
sampled_pt_num)
return pooled_features, pooled_empty_flag
| true | true |
1c4a18579c83b46e8f6e2e84863af984b6e3501c | 2,335 | py | Python | apricotlib/raptorx_secstr.py | malvikasharan/APRICOT | 529afadfb99fa8249fa4ecfb07253eab892c7a8e | [
"0BSD"
] | 5 | 2016-05-25T12:30:02.000Z | 2021-04-11T14:55:32.000Z | apricotlib/raptorx_secstr.py | malvikasharan/APRICOT | 529afadfb99fa8249fa4ecfb07253eab892c7a8e | [
"0BSD"
] | 1 | 2017-05-20T07:19:25.000Z | 2018-02-05T22:14:12.000Z | apricotlib/raptorx_secstr.py | malvikasharan/APRICOT | 529afadfb99fa8249fa4ecfb07253eab892c7a8e | [
"0BSD"
] | 6 | 2016-05-18T07:08:49.000Z | 2021-02-20T14:28:55.000Z | #!/usr/bin/env python
# Description = Predict 3 or 8 state secondary structure using RaptorX
import os
import subprocess
class RaptorxSecstrAnalysis(object):
def __init__(self, selected_proteins, raptorx_path,
fasta_path, outpath):
self._selected_proteins = selected_proteins
self._raptorx_path = raptorx_path
self._fasta_path = fasta_path
self._outpath = outpath
self._selected_protein_set = set()
def streamline_raptorx_secstr_analysis(self):
'''To call from apricot'''
self.parse_selected_data()
self.run_raptorx_analysis()
self.create_job_completion_file()
def parse_selected_data(self):
'''Parses selected data for uid'''
with open(self._selected_proteins, 'r') as in_fh:
for entry in in_fh:
if not entry.startswith('Entry'):
self._selected_protein_set.add(entry.split('\t')[0])
return self._selected_protein_set
def run_raptorx_analysis(self):
'''Runs RaptorX on the selected uids for 8-state secondary structure
prediction
'''
for files in os.listdir(self._fasta_path):
if files.split('.')[0] in self._selected_protein_set:
print("RaptorX 8-state secondary structure analysis for %s" %
files)
subprocess.Popen(
["perl %s %s/%s" %
(self._raptorx_path, self._fasta_path, files)],
shell=True).wait()
subprocess.Popen(["mv *.ss* %s" % self._outpath],
shell=True).wait()
subprocess.Popen(["mv *.horiz %s" % self._outpath],
shell=True).wait()
subprocess.Popen(["rm -rf tmp*.%s" % files.split('.')[0]],
shell=True).wait()
def create_job_completion_file(self):
with open(self._outpath+'/raptorx_analysis.txt', 'w') as out_fh:
out_fh.write("Secondary structures for the selected proteins are "
"generated by RaptorX.\n")
out_fh.write("The files generated by the analysis:.\n")
out_fh.write('\n'.join(os.listdir(self._outpath)))
| 40.258621 | 78 | 0.568737 |
import os
import subprocess
class RaptorxSecstrAnalysis(object):
def __init__(self, selected_proteins, raptorx_path,
fasta_path, outpath):
self._selected_proteins = selected_proteins
self._raptorx_path = raptorx_path
self._fasta_path = fasta_path
self._outpath = outpath
self._selected_protein_set = set()
def streamline_raptorx_secstr_analysis(self):
self.parse_selected_data()
self.run_raptorx_analysis()
self.create_job_completion_file()
def parse_selected_data(self):
with open(self._selected_proteins, 'r') as in_fh:
for entry in in_fh:
if not entry.startswith('Entry'):
self._selected_protein_set.add(entry.split('\t')[0])
return self._selected_protein_set
def run_raptorx_analysis(self):
for files in os.listdir(self._fasta_path):
if files.split('.')[0] in self._selected_protein_set:
print("RaptorX 8-state secondary structure analysis for %s" %
files)
subprocess.Popen(
["perl %s %s/%s" %
(self._raptorx_path, self._fasta_path, files)],
shell=True).wait()
subprocess.Popen(["mv *.ss* %s" % self._outpath],
shell=True).wait()
subprocess.Popen(["mv *.horiz %s" % self._outpath],
shell=True).wait()
subprocess.Popen(["rm -rf tmp*.%s" % files.split('.')[0]],
shell=True).wait()
def create_job_completion_file(self):
with open(self._outpath+'/raptorx_analysis.txt', 'w') as out_fh:
out_fh.write("Secondary structures for the selected proteins are "
"generated by RaptorX.\n")
out_fh.write("The files generated by the analysis:.\n")
out_fh.write('\n'.join(os.listdir(self._outpath)))
| true | true |
1c4a18e8aea20d95df3d8fe117a47346ed852aa9 | 357 | py | Python | tutorial/gallery.py | bricakeld/dash-docs | a79f52ac88c6ebff10a5b2e0af43e89410372dd4 | [
"MIT"
] | null | null | null | tutorial/gallery.py | bricakeld/dash-docs | a79f52ac88c6ebff10a5b2e0af43e89410372dd4 | [
"MIT"
] | 3 | 2021-03-31T19:16:27.000Z | 2021-12-13T20:27:16.000Z | tutorial/gallery.py | bricakeld/dash-docs | a79f52ac88c6ebff10a5b2e0af43e89410372dd4 | [
"MIT"
] | 1 | 2022-03-18T09:41:34.000Z | 2022-03-18T09:41:34.000Z | # -*- coding: utf-8 -*-
import dash_html_components as html
import dash_core_components as dcc
from textwrap import dedent
layout = html.Div(className='gallery', children=[
dcc.Markdown(dedent('''
## The Dash App Gallery has moved!
It is now at [https://dash-gallery.plotly.host/Portal/](https://dash-gallery.plotly.host/Portal/)
'''))
])
| 27.461538 | 101 | 0.697479 | import dash_html_components as html
import dash_core_components as dcc
from textwrap import dedent
layout = html.Div(className='gallery', children=[
dcc.Markdown(dedent('''
## The Dash App Gallery has moved!
It is now at [https://dash-gallery.plotly.host/Portal/](https://dash-gallery.plotly.host/Portal/)
'''))
])
| true | true |
1c4a1ab1f277c42ed8aa39fecf9d2cbfe95ad12c | 1,883 | py | Python | examples/ramps.py | plecto/motorway | ce42b77a9a2d48cf1a9fd2f3bc405accb98030df | [
"Apache-2.0"
] | 166 | 2015-01-19T05:39:24.000Z | 2022-02-26T15:09:34.000Z | examples/ramps.py | plecto/motorway | ce42b77a9a2d48cf1a9fd2f3bc405accb98030df | [
"Apache-2.0"
] | 17 | 2016-07-29T10:23:24.000Z | 2022-01-04T10:09:01.000Z | examples/ramps.py | plecto/motorway | ce42b77a9a2d48cf1a9fd2f3bc405accb98030df | [
"Apache-2.0"
] | 21 | 2016-09-08T12:26:50.000Z | 2021-07-08T09:54:16.000Z | import time
import uuid
from motorway.contrib.amazon_kinesis.ramps import KinesisRamp
from motorway.contrib.amazon_kinesis.intersections import KinesisInsertIntersection
from motorway.contrib.amazon_sqs.ramps import SQSRamp
from motorway.messages import Message
from motorway.ramp import Ramp
import random
class WordRamp(Ramp):
sentences = [
"Oak is strong and also gives shade.",
"Cats and dogs each hate the other.",
"The pipe began to rust while new.",
"Open the crate but don't break the glass.",
"Add the sum to the product of these three.",
"Thieves who rob friends deserve jail.",
"The ripe taste of cheese improves with age.",
"Act on these orders with great speed.",
"The hog crawled under the high fence.",
"Move the vat over the hot fire.",
]
def __init__(self, *args, **kwargs):
super(WordRamp, self).__init__(*args, **kwargs)
self.limit = 10000
self.progress = 1
def next(self):
# yield Message(uuid.uuid4().int, self.sentences[random.randint(0, len(self.sentences) -1)])
if self.progress <= self.limit:
self.progress += 1
# time.sleep(10)
sentence = self.sentences[random.randint(0, len(self.sentences) -1)]
yield Message(uuid.uuid4().int, sentence, grouping_value=sentence)
else:
time.sleep(1)
def success(self, _id):
pass
#print "WordRamp %s was successful" % _id
def failed(self, _id):
print("WordRamp %s has failed" % _id)
def should_run(self):
return True
class ExampleSQSRamp(SQSRamp):
queue_name = "tutorial_motorway"
class ExampleKinesisRamp(KinesisRamp):
stream_name = "data-pipeline-test"
class ExampleKinesisIntersection(KinesisInsertIntersection):
stream_name = "data-pipeline-test"
| 30.868852 | 100 | 0.659586 | import time
import uuid
from motorway.contrib.amazon_kinesis.ramps import KinesisRamp
from motorway.contrib.amazon_kinesis.intersections import KinesisInsertIntersection
from motorway.contrib.amazon_sqs.ramps import SQSRamp
from motorway.messages import Message
from motorway.ramp import Ramp
import random
class WordRamp(Ramp):
sentences = [
"Oak is strong and also gives shade.",
"Cats and dogs each hate the other.",
"The pipe began to rust while new.",
"Open the crate but don't break the glass.",
"Add the sum to the product of these three.",
"Thieves who rob friends deserve jail.",
"The ripe taste of cheese improves with age.",
"Act on these orders with great speed.",
"The hog crawled under the high fence.",
"Move the vat over the hot fire.",
]
def __init__(self, *args, **kwargs):
super(WordRamp, self).__init__(*args, **kwargs)
self.limit = 10000
self.progress = 1
def next(self):
# yield Message(uuid.uuid4().int, self.sentences[random.randint(0, len(self.sentences) -1)])
if self.progress <= self.limit:
self.progress += 1
# time.sleep(10)
sentence = self.sentences[random.randint(0, len(self.sentences) -1)]
yield Message(uuid.uuid4().int, sentence, grouping_value=sentence)
else:
time.sleep(1)
def success(self, _id):
pass
#print "WordRamp %s was successful" % _id
def failed(self, _id):
print("WordRamp %s has failed" % _id)
def should_run(self):
return True
class ExampleSQSRamp(SQSRamp):
queue_name = "tutorial_motorway"
class ExampleKinesisRamp(KinesisRamp):
stream_name = "data-pipeline-test"
class ExampleKinesisIntersection(KinesisInsertIntersection):
stream_name = "data-pipeline-test"
| true | true |
1c4a1e994f54a55d96dde9d569cf6f8d99b2a79c | 13,569 | py | Python | src/main/python/ui/batch.py | bmiller/beqdesigner | 36d0c780507a564536038e2c9fc3b03b75dedaf4 | [
"MIT"
] | 16 | 2019-04-12T00:04:56.000Z | 2022-03-15T14:26:56.000Z | src/main/python/ui/batch.py | bmiller/beqdesigner | 36d0c780507a564536038e2c9fc3b03b75dedaf4 | [
"MIT"
] | 400 | 2018-08-27T10:04:00.000Z | 2022-03-15T21:32:33.000Z | src/main/python/ui/batch.py | bmiller/beqdesigner | 36d0c780507a564536038e2c9fc3b03b75dedaf4 | [
"MIT"
] | 6 | 2018-09-19T21:02:27.000Z | 2020-10-18T04:11:01.000Z | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'batch.ui'
#
# Created by: PyQt5 UI code generator 5.15.2
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_batchExtractDialog(object):
def setupUi(self, batchExtractDialog):
batchExtractDialog.setObjectName("batchExtractDialog")
batchExtractDialog.resize(1727, 925)
self.verticalLayout = QtWidgets.QVBoxLayout(batchExtractDialog)
self.verticalLayout.setObjectName("verticalLayout")
self.controlFrame = QtWidgets.QFrame(batchExtractDialog)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.controlFrame.sizePolicy().hasHeightForWidth())
self.controlFrame.setSizePolicy(sizePolicy)
self.controlFrame.setFrameShape(QtWidgets.QFrame.Panel)
self.controlFrame.setFrameShadow(QtWidgets.QFrame.Sunken)
self.controlFrame.setObjectName("controlFrame")
self.gridLayout = QtWidgets.QGridLayout(self.controlFrame)
self.gridLayout.setObjectName("gridLayout")
self.controlsLayout = QtWidgets.QGridLayout()
self.controlsLayout.setObjectName("controlsLayout")
self.threads = QtWidgets.QSpinBox(self.controlFrame)
self.threads.setMinimum(1)
self.threads.setMaximum(64)
self.threads.setProperty("value", 1)
self.threads.setObjectName("threads")
self.controlsLayout.addWidget(self.threads, 3, 1, 1, 1)
self.searchButton = QtWidgets.QPushButton(self.controlFrame)
self.searchButton.setEnabled(False)
self.searchButton.setObjectName("searchButton")
self.controlsLayout.addWidget(self.searchButton, 5, 1, 1, 1)
self.outputDirLabel = QtWidgets.QLabel(self.controlFrame)
self.outputDirLabel.setObjectName("outputDirLabel")
self.controlsLayout.addWidget(self.outputDirLabel, 1, 0, 1, 1)
self.threadsLabel = QtWidgets.QLabel(self.controlFrame)
self.threadsLabel.setObjectName("threadsLabel")
self.controlsLayout.addWidget(self.threadsLabel, 3, 0, 1, 1)
self.filterLabel = QtWidgets.QLabel(self.controlFrame)
self.filterLabel.setToolTip("")
self.filterLabel.setObjectName("filterLabel")
self.controlsLayout.addWidget(self.filterLabel, 0, 0, 1, 1)
self.extractButton = QtWidgets.QPushButton(self.controlFrame)
self.extractButton.setEnabled(False)
self.extractButton.setObjectName("extractButton")
self.controlsLayout.addWidget(self.extractButton, 5, 2, 1, 1)
self.resetButton = QtWidgets.QPushButton(self.controlFrame)
self.resetButton.setEnabled(False)
self.resetButton.setObjectName("resetButton")
self.controlsLayout.addWidget(self.resetButton, 5, 3, 1, 1)
self.outputDirPicker = QtWidgets.QToolButton(self.controlFrame)
self.outputDirPicker.setObjectName("outputDirPicker")
self.controlsLayout.addWidget(self.outputDirPicker, 1, 4, 1, 1)
self.outputDir = QtWidgets.QLineEdit(self.controlFrame)
self.outputDir.setEnabled(False)
self.outputDir.setObjectName("outputDir")
self.controlsLayout.addWidget(self.outputDir, 1, 1, 1, 3)
self.filter = QtWidgets.QLineEdit(self.controlFrame)
font = QtGui.QFont()
font.setFamily("Consolas")
self.filter.setFont(font)
self.filter.setText("")
self.filter.setObjectName("filter")
self.controlsLayout.addWidget(self.filter, 0, 1, 1, 3)
self.monoMix = QtWidgets.QCheckBox(self.controlFrame)
self.monoMix.setChecked(True)
self.monoMix.setObjectName("monoMix")
self.controlsLayout.addWidget(self.monoMix, 5, 0, 1, 1)
self.controlsLayout.setColumnStretch(1, 1)
self.controlsLayout.setColumnStretch(2, 1)
self.controlsLayout.setColumnStretch(3, 1)
self.gridLayout.addLayout(self.controlsLayout, 0, 0, 1, 1)
self.verticalLayout.addWidget(self.controlFrame)
self.resultsFrame = QtWidgets.QFrame(batchExtractDialog)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.resultsFrame.sizePolicy().hasHeightForWidth())
self.resultsFrame.setSizePolicy(sizePolicy)
self.resultsFrame.setFrameShape(QtWidgets.QFrame.Box)
self.resultsFrame.setFrameShadow(QtWidgets.QFrame.Sunken)
self.resultsFrame.setObjectName("resultsFrame")
self.gridLayout_2 = QtWidgets.QGridLayout(self.resultsFrame)
self.gridLayout_2.setObjectName("gridLayout_2")
self.resultsTitle = QtWidgets.QLabel(self.resultsFrame)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.resultsTitle.setFont(font)
self.resultsTitle.setFrameShape(QtWidgets.QFrame.Box)
self.resultsTitle.setFrameShadow(QtWidgets.QFrame.Sunken)
self.resultsTitle.setAlignment(QtCore.Qt.AlignCenter)
self.resultsTitle.setObjectName("resultsTitle")
self.gridLayout_2.addWidget(self.resultsTitle, 0, 0, 1, 1)
self.resultsScrollArea = QtWidgets.QScrollArea(self.resultsFrame)
self.resultsScrollArea.setWidgetResizable(True)
self.resultsScrollArea.setObjectName("resultsScrollArea")
self.resultsScrollAreaContents = QtWidgets.QWidget()
self.resultsScrollAreaContents.setGeometry(QtCore.QRect(0, 0, 1669, 660))
self.resultsScrollAreaContents.setObjectName("resultsScrollAreaContents")
self.resultsScrollLayout = QtWidgets.QGridLayout(self.resultsScrollAreaContents)
self.resultsScrollLayout.setObjectName("resultsScrollLayout")
self.resultsLayout = QtWidgets.QGridLayout()
self.resultsLayout.setObjectName("resultsLayout")
self.statusHeaderLabel = QtWidgets.QLabel(self.resultsScrollAreaContents)
font = QtGui.QFont()
font.setUnderline(True)
self.statusHeaderLabel.setFont(font)
self.statusHeaderLabel.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignTop)
self.statusHeaderLabel.setObjectName("statusHeaderLabel")
self.resultsLayout.addWidget(self.statusHeaderLabel, 0, 0, 1, 1)
self.probeHeaderLabel = QtWidgets.QLabel(self.resultsScrollAreaContents)
font = QtGui.QFont()
font.setItalic(True)
font.setUnderline(True)
self.probeHeaderLabel.setFont(font)
self.probeHeaderLabel.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignTop)
self.probeHeaderLabel.setObjectName("probeHeaderLabel")
self.resultsLayout.addWidget(self.probeHeaderLabel, 0, 2, 1, 1)
self.streamHeaderLabel = QtWidgets.QLabel(self.resultsScrollAreaContents)
font = QtGui.QFont()
font.setBold(False)
font.setItalic(True)
font.setUnderline(True)
font.setWeight(50)
self.streamHeaderLabel.setFont(font)
self.streamHeaderLabel.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignTop)
self.streamHeaderLabel.setObjectName("streamHeaderLabel")
self.resultsLayout.addWidget(self.streamHeaderLabel, 0, 3, 1, 1)
self.inputFileHeaderLabel = QtWidgets.QLabel(self.resultsScrollAreaContents)
font = QtGui.QFont()
font.setBold(False)
font.setItalic(True)
font.setUnderline(True)
font.setWeight(50)
self.inputFileHeaderLabel.setFont(font)
self.inputFileHeaderLabel.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignTop)
self.inputFileHeaderLabel.setObjectName("inputFileHeaderLabel")
self.resultsLayout.addWidget(self.inputFileHeaderLabel, 0, 1, 1, 1)
self.channelsHeaderLabel = QtWidgets.QLabel(self.resultsScrollAreaContents)
font = QtGui.QFont()
font.setBold(False)
font.setItalic(True)
font.setUnderline(True)
font.setWeight(50)
self.channelsHeaderLabel.setFont(font)
self.channelsHeaderLabel.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignTop)
self.channelsHeaderLabel.setObjectName("channelsHeaderLabel")
self.resultsLayout.addWidget(self.channelsHeaderLabel, 0, 4, 1, 1)
self.outputFileHeaderLabel = QtWidgets.QLabel(self.resultsScrollAreaContents)
font = QtGui.QFont()
font.setBold(False)
font.setItalic(True)
font.setUnderline(True)
font.setWeight(50)
self.outputFileHeaderLabel.setFont(font)
self.outputFileHeaderLabel.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignTop)
self.outputFileHeaderLabel.setObjectName("outputFileHeaderLabel")
self.resultsLayout.addWidget(self.outputFileHeaderLabel, 0, 6, 1, 1)
self.progressHeaderLabel = QtWidgets.QLabel(self.resultsScrollAreaContents)
font = QtGui.QFont()
font.setBold(False)
font.setItalic(True)
font.setUnderline(True)
font.setWeight(50)
self.progressHeaderLabel.setFont(font)
self.progressHeaderLabel.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignTop)
self.progressHeaderLabel.setObjectName("progressHeaderLabel")
self.resultsLayout.addWidget(self.progressHeaderLabel, 0, 8, 1, 1)
self.lfeHeaderLabel = QtWidgets.QLabel(self.resultsScrollAreaContents)
font = QtGui.QFont()
font.setBold(False)
font.setItalic(True)
font.setUnderline(True)
font.setWeight(50)
self.lfeHeaderLabel.setFont(font)
self.lfeHeaderLabel.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignTop)
self.lfeHeaderLabel.setObjectName("lfeHeaderLabel")
self.resultsLayout.addWidget(self.lfeHeaderLabel, 0, 5, 1, 1)
self.ffmpegCliLabel = QtWidgets.QLabel(self.resultsScrollAreaContents)
font = QtGui.QFont()
font.setItalic(True)
font.setUnderline(True)
self.ffmpegCliLabel.setFont(font)
self.ffmpegCliLabel.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignTop)
self.ffmpegCliLabel.setObjectName("ffmpegCliLabel")
self.resultsLayout.addWidget(self.ffmpegCliLabel, 0, 7, 1, 1)
self.resultsLayout.setColumnStretch(1, 1)
self.resultsLayout.setColumnStretch(3, 2)
self.resultsLayout.setColumnStretch(6, 1)
self.resultsLayout.setColumnStretch(8, 1)
self.resultsScrollLayout.addLayout(self.resultsLayout, 0, 0, 1, 1)
self.resultsScrollArea.setWidget(self.resultsScrollAreaContents)
self.gridLayout_2.addWidget(self.resultsScrollArea, 1, 0, 1, 1)
self.verticalLayout.addWidget(self.resultsFrame)
self.retranslateUi(batchExtractDialog)
self.searchButton.clicked.connect(batchExtractDialog.search)
self.extractButton.clicked.connect(batchExtractDialog.extract)
self.outputDirPicker.clicked.connect(batchExtractDialog.select_output)
self.filter.textChanged['QString'].connect(batchExtractDialog.enable_search)
self.resetButton.clicked.connect(batchExtractDialog.reset_batch)
self.threads.valueChanged['int'].connect(batchExtractDialog.change_pool_size)
QtCore.QMetaObject.connectSlotsByName(batchExtractDialog)
def retranslateUi(self, batchExtractDialog):
_translate = QtCore.QCoreApplication.translate
batchExtractDialog.setWindowTitle(_translate("batchExtractDialog", "Extract Audio"))
self.searchButton.setText(_translate("batchExtractDialog", "Search"))
self.outputDirLabel.setText(_translate("batchExtractDialog", "Output Directory"))
self.threadsLabel.setText(_translate("batchExtractDialog", "Threads"))
self.filterLabel.setText(_translate("batchExtractDialog", "Search Filter"))
self.extractButton.setText(_translate("batchExtractDialog", "Extract"))
self.resetButton.setText(_translate("batchExtractDialog", "Reset"))
self.outputDirPicker.setText(_translate("batchExtractDialog", "..."))
self.filter.setPlaceholderText(_translate("batchExtractDialog", "Enter 1 or more search filters, e.g. w:/films/*.mkv;y:/videos/**/*.m2ts"))
self.monoMix.setText(_translate("batchExtractDialog", "Mix to Mono?"))
self.resultsTitle.setText(_translate("batchExtractDialog", "Results"))
self.statusHeaderLabel.setText(_translate("batchExtractDialog", "Status"))
self.probeHeaderLabel.setText(_translate("batchExtractDialog", "Probe"))
self.streamHeaderLabel.setText(_translate("batchExtractDialog", "Stream"))
self.inputFileHeaderLabel.setText(_translate("batchExtractDialog", "Input File"))
self.channelsHeaderLabel.setText(_translate("batchExtractDialog", "Channels"))
self.outputFileHeaderLabel.setText(_translate("batchExtractDialog", "Output File"))
self.progressHeaderLabel.setText(_translate("batchExtractDialog", "Progress"))
self.lfeHeaderLabel.setText(_translate("batchExtractDialog", "LFE"))
self.ffmpegCliLabel.setText(_translate("batchExtractDialog", "ffmpeg"))
| 57.012605 | 147 | 0.72614 |
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_batchExtractDialog(object):
def setupUi(self, batchExtractDialog):
batchExtractDialog.setObjectName("batchExtractDialog")
batchExtractDialog.resize(1727, 925)
self.verticalLayout = QtWidgets.QVBoxLayout(batchExtractDialog)
self.verticalLayout.setObjectName("verticalLayout")
self.controlFrame = QtWidgets.QFrame(batchExtractDialog)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.controlFrame.sizePolicy().hasHeightForWidth())
self.controlFrame.setSizePolicy(sizePolicy)
self.controlFrame.setFrameShape(QtWidgets.QFrame.Panel)
self.controlFrame.setFrameShadow(QtWidgets.QFrame.Sunken)
self.controlFrame.setObjectName("controlFrame")
self.gridLayout = QtWidgets.QGridLayout(self.controlFrame)
self.gridLayout.setObjectName("gridLayout")
self.controlsLayout = QtWidgets.QGridLayout()
self.controlsLayout.setObjectName("controlsLayout")
self.threads = QtWidgets.QSpinBox(self.controlFrame)
self.threads.setMinimum(1)
self.threads.setMaximum(64)
self.threads.setProperty("value", 1)
self.threads.setObjectName("threads")
self.controlsLayout.addWidget(self.threads, 3, 1, 1, 1)
self.searchButton = QtWidgets.QPushButton(self.controlFrame)
self.searchButton.setEnabled(False)
self.searchButton.setObjectName("searchButton")
self.controlsLayout.addWidget(self.searchButton, 5, 1, 1, 1)
self.outputDirLabel = QtWidgets.QLabel(self.controlFrame)
self.outputDirLabel.setObjectName("outputDirLabel")
self.controlsLayout.addWidget(self.outputDirLabel, 1, 0, 1, 1)
self.threadsLabel = QtWidgets.QLabel(self.controlFrame)
self.threadsLabel.setObjectName("threadsLabel")
self.controlsLayout.addWidget(self.threadsLabel, 3, 0, 1, 1)
self.filterLabel = QtWidgets.QLabel(self.controlFrame)
self.filterLabel.setToolTip("")
self.filterLabel.setObjectName("filterLabel")
self.controlsLayout.addWidget(self.filterLabel, 0, 0, 1, 1)
self.extractButton = QtWidgets.QPushButton(self.controlFrame)
self.extractButton.setEnabled(False)
self.extractButton.setObjectName("extractButton")
self.controlsLayout.addWidget(self.extractButton, 5, 2, 1, 1)
self.resetButton = QtWidgets.QPushButton(self.controlFrame)
self.resetButton.setEnabled(False)
self.resetButton.setObjectName("resetButton")
self.controlsLayout.addWidget(self.resetButton, 5, 3, 1, 1)
self.outputDirPicker = QtWidgets.QToolButton(self.controlFrame)
self.outputDirPicker.setObjectName("outputDirPicker")
self.controlsLayout.addWidget(self.outputDirPicker, 1, 4, 1, 1)
self.outputDir = QtWidgets.QLineEdit(self.controlFrame)
self.outputDir.setEnabled(False)
self.outputDir.setObjectName("outputDir")
self.controlsLayout.addWidget(self.outputDir, 1, 1, 1, 3)
self.filter = QtWidgets.QLineEdit(self.controlFrame)
font = QtGui.QFont()
font.setFamily("Consolas")
self.filter.setFont(font)
self.filter.setText("")
self.filter.setObjectName("filter")
self.controlsLayout.addWidget(self.filter, 0, 1, 1, 3)
self.monoMix = QtWidgets.QCheckBox(self.controlFrame)
self.monoMix.setChecked(True)
self.monoMix.setObjectName("monoMix")
self.controlsLayout.addWidget(self.monoMix, 5, 0, 1, 1)
self.controlsLayout.setColumnStretch(1, 1)
self.controlsLayout.setColumnStretch(2, 1)
self.controlsLayout.setColumnStretch(3, 1)
self.gridLayout.addLayout(self.controlsLayout, 0, 0, 1, 1)
self.verticalLayout.addWidget(self.controlFrame)
self.resultsFrame = QtWidgets.QFrame(batchExtractDialog)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.resultsFrame.sizePolicy().hasHeightForWidth())
self.resultsFrame.setSizePolicy(sizePolicy)
self.resultsFrame.setFrameShape(QtWidgets.QFrame.Box)
self.resultsFrame.setFrameShadow(QtWidgets.QFrame.Sunken)
self.resultsFrame.setObjectName("resultsFrame")
self.gridLayout_2 = QtWidgets.QGridLayout(self.resultsFrame)
self.gridLayout_2.setObjectName("gridLayout_2")
self.resultsTitle = QtWidgets.QLabel(self.resultsFrame)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.resultsTitle.setFont(font)
self.resultsTitle.setFrameShape(QtWidgets.QFrame.Box)
self.resultsTitle.setFrameShadow(QtWidgets.QFrame.Sunken)
self.resultsTitle.setAlignment(QtCore.Qt.AlignCenter)
self.resultsTitle.setObjectName("resultsTitle")
self.gridLayout_2.addWidget(self.resultsTitle, 0, 0, 1, 1)
self.resultsScrollArea = QtWidgets.QScrollArea(self.resultsFrame)
self.resultsScrollArea.setWidgetResizable(True)
self.resultsScrollArea.setObjectName("resultsScrollArea")
self.resultsScrollAreaContents = QtWidgets.QWidget()
self.resultsScrollAreaContents.setGeometry(QtCore.QRect(0, 0, 1669, 660))
self.resultsScrollAreaContents.setObjectName("resultsScrollAreaContents")
self.resultsScrollLayout = QtWidgets.QGridLayout(self.resultsScrollAreaContents)
self.resultsScrollLayout.setObjectName("resultsScrollLayout")
self.resultsLayout = QtWidgets.QGridLayout()
self.resultsLayout.setObjectName("resultsLayout")
self.statusHeaderLabel = QtWidgets.QLabel(self.resultsScrollAreaContents)
font = QtGui.QFont()
font.setUnderline(True)
self.statusHeaderLabel.setFont(font)
self.statusHeaderLabel.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignTop)
self.statusHeaderLabel.setObjectName("statusHeaderLabel")
self.resultsLayout.addWidget(self.statusHeaderLabel, 0, 0, 1, 1)
self.probeHeaderLabel = QtWidgets.QLabel(self.resultsScrollAreaContents)
font = QtGui.QFont()
font.setItalic(True)
font.setUnderline(True)
self.probeHeaderLabel.setFont(font)
self.probeHeaderLabel.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignTop)
self.probeHeaderLabel.setObjectName("probeHeaderLabel")
self.resultsLayout.addWidget(self.probeHeaderLabel, 0, 2, 1, 1)
self.streamHeaderLabel = QtWidgets.QLabel(self.resultsScrollAreaContents)
font = QtGui.QFont()
font.setBold(False)
font.setItalic(True)
font.setUnderline(True)
font.setWeight(50)
self.streamHeaderLabel.setFont(font)
self.streamHeaderLabel.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignTop)
self.streamHeaderLabel.setObjectName("streamHeaderLabel")
self.resultsLayout.addWidget(self.streamHeaderLabel, 0, 3, 1, 1)
self.inputFileHeaderLabel = QtWidgets.QLabel(self.resultsScrollAreaContents)
font = QtGui.QFont()
font.setBold(False)
font.setItalic(True)
font.setUnderline(True)
font.setWeight(50)
self.inputFileHeaderLabel.setFont(font)
self.inputFileHeaderLabel.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignTop)
self.inputFileHeaderLabel.setObjectName("inputFileHeaderLabel")
self.resultsLayout.addWidget(self.inputFileHeaderLabel, 0, 1, 1, 1)
self.channelsHeaderLabel = QtWidgets.QLabel(self.resultsScrollAreaContents)
font = QtGui.QFont()
font.setBold(False)
font.setItalic(True)
font.setUnderline(True)
font.setWeight(50)
self.channelsHeaderLabel.setFont(font)
self.channelsHeaderLabel.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignTop)
self.channelsHeaderLabel.setObjectName("channelsHeaderLabel")
self.resultsLayout.addWidget(self.channelsHeaderLabel, 0, 4, 1, 1)
self.outputFileHeaderLabel = QtWidgets.QLabel(self.resultsScrollAreaContents)
font = QtGui.QFont()
font.setBold(False)
font.setItalic(True)
font.setUnderline(True)
font.setWeight(50)
self.outputFileHeaderLabel.setFont(font)
self.outputFileHeaderLabel.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignTop)
self.outputFileHeaderLabel.setObjectName("outputFileHeaderLabel")
self.resultsLayout.addWidget(self.outputFileHeaderLabel, 0, 6, 1, 1)
self.progressHeaderLabel = QtWidgets.QLabel(self.resultsScrollAreaContents)
font = QtGui.QFont()
font.setBold(False)
font.setItalic(True)
font.setUnderline(True)
font.setWeight(50)
self.progressHeaderLabel.setFont(font)
self.progressHeaderLabel.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignTop)
self.progressHeaderLabel.setObjectName("progressHeaderLabel")
self.resultsLayout.addWidget(self.progressHeaderLabel, 0, 8, 1, 1)
self.lfeHeaderLabel = QtWidgets.QLabel(self.resultsScrollAreaContents)
font = QtGui.QFont()
font.setBold(False)
font.setItalic(True)
font.setUnderline(True)
font.setWeight(50)
self.lfeHeaderLabel.setFont(font)
self.lfeHeaderLabel.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignTop)
self.lfeHeaderLabel.setObjectName("lfeHeaderLabel")
self.resultsLayout.addWidget(self.lfeHeaderLabel, 0, 5, 1, 1)
self.ffmpegCliLabel = QtWidgets.QLabel(self.resultsScrollAreaContents)
font = QtGui.QFont()
font.setItalic(True)
font.setUnderline(True)
self.ffmpegCliLabel.setFont(font)
self.ffmpegCliLabel.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignTop)
self.ffmpegCliLabel.setObjectName("ffmpegCliLabel")
self.resultsLayout.addWidget(self.ffmpegCliLabel, 0, 7, 1, 1)
self.resultsLayout.setColumnStretch(1, 1)
self.resultsLayout.setColumnStretch(3, 2)
self.resultsLayout.setColumnStretch(6, 1)
self.resultsLayout.setColumnStretch(8, 1)
self.resultsScrollLayout.addLayout(self.resultsLayout, 0, 0, 1, 1)
self.resultsScrollArea.setWidget(self.resultsScrollAreaContents)
self.gridLayout_2.addWidget(self.resultsScrollArea, 1, 0, 1, 1)
self.verticalLayout.addWidget(self.resultsFrame)
self.retranslateUi(batchExtractDialog)
self.searchButton.clicked.connect(batchExtractDialog.search)
self.extractButton.clicked.connect(batchExtractDialog.extract)
self.outputDirPicker.clicked.connect(batchExtractDialog.select_output)
self.filter.textChanged['QString'].connect(batchExtractDialog.enable_search)
self.resetButton.clicked.connect(batchExtractDialog.reset_batch)
self.threads.valueChanged['int'].connect(batchExtractDialog.change_pool_size)
QtCore.QMetaObject.connectSlotsByName(batchExtractDialog)
def retranslateUi(self, batchExtractDialog):
_translate = QtCore.QCoreApplication.translate
batchExtractDialog.setWindowTitle(_translate("batchExtractDialog", "Extract Audio"))
self.searchButton.setText(_translate("batchExtractDialog", "Search"))
self.outputDirLabel.setText(_translate("batchExtractDialog", "Output Directory"))
self.threadsLabel.setText(_translate("batchExtractDialog", "Threads"))
self.filterLabel.setText(_translate("batchExtractDialog", "Search Filter"))
self.extractButton.setText(_translate("batchExtractDialog", "Extract"))
self.resetButton.setText(_translate("batchExtractDialog", "Reset"))
self.outputDirPicker.setText(_translate("batchExtractDialog", "..."))
self.filter.setPlaceholderText(_translate("batchExtractDialog", "Enter 1 or more search filters, e.g. w:/films/*.mkv;y:/videos/**/*.m2ts"))
self.monoMix.setText(_translate("batchExtractDialog", "Mix to Mono?"))
self.resultsTitle.setText(_translate("batchExtractDialog", "Results"))
self.statusHeaderLabel.setText(_translate("batchExtractDialog", "Status"))
self.probeHeaderLabel.setText(_translate("batchExtractDialog", "Probe"))
self.streamHeaderLabel.setText(_translate("batchExtractDialog", "Stream"))
self.inputFileHeaderLabel.setText(_translate("batchExtractDialog", "Input File"))
self.channelsHeaderLabel.setText(_translate("batchExtractDialog", "Channels"))
self.outputFileHeaderLabel.setText(_translate("batchExtractDialog", "Output File"))
self.progressHeaderLabel.setText(_translate("batchExtractDialog", "Progress"))
self.lfeHeaderLabel.setText(_translate("batchExtractDialog", "LFE"))
self.ffmpegCliLabel.setText(_translate("batchExtractDialog", "ffmpeg"))
| true | true |
1c4a200a2dc8929dcf29774d5051e237b10a7a33 | 69,048 | py | Python | tensorflow/python/ops/math_ops.py | seyoung-hyun/tensorflow | 2ac978d2532dd359dd7ebbd27ac13dfa147d755d | [
"Apache-2.0"
] | 1 | 2018-10-16T07:59:09.000Z | 2018-10-16T07:59:09.000Z | tensorflow/python/ops/math_ops.py | seyoung-hyun/tensorflow | 2ac978d2532dd359dd7ebbd27ac13dfa147d755d | [
"Apache-2.0"
] | null | null | null | tensorflow/python/ops/math_ops.py | seyoung-hyun/tensorflow | 2ac978d2532dd359dd7ebbd27ac13dfa147d755d | [
"Apache-2.0"
] | 1 | 2018-10-16T07:58:38.000Z | 2018-10-16T07:58:38.000Z | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Note: Elementwise binary operations in TensorFlow follow [numpy-style
broadcasting](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html).
## Arithmetic Operators
TensorFlow provides several operations that you can use to add basic arithmetic
operators to your graph.
@@add
@@sub
@@mul
@@multiply
@@scalar_mul
@@div
@@divide
@@truediv
@@floordiv
@@mod
@@cross
## Basic Math Functions
TensorFlow provides several operations that you can use to add basic
mathematical functions to your graph.
@@add_n
@@abs
@@neg
@@negative
@@sign
@@inv
@@square
@@round
@@sqrt
@@rsqrt
@@pow
@@exp
@@log
@@log1p
@@ceil
@@floor
@@maximum
@@minimum
@@cos
@@sin
@@lbeta
@@tan
@@acos
@@asin
@@atan
@@lgamma
@@digamma
@@erf
@@erfc
@@squared_difference
@@igamma
@@igammac
@@zeta
@@polygamma
@@betainc
## Matrix Math Functions
TensorFlow provides several operations that you can use to add linear algebra
functions on matrices to your graph.
@@diag
@@diag_part
@@trace
@@transpose
@@eye
@@matrix_diag
@@matrix_diag_part
@@matrix_band_part
@@matrix_set_diag
@@matrix_transpose
@@matmul
@@batch_matmul
@@matrix_determinant
@@matrix_inverse
@@cholesky
@@cholesky_solve
@@matrix_solve
@@matrix_triangular_solve
@@matrix_solve_ls
@@self_adjoint_eig
@@self_adjoint_eigvals
@@svd
## Complex Number Functions
TensorFlow provides several operations that you can use to add complex number
functions to your graph.
@@complex
@@complex_abs
@@conj
@@imag
@@real
## Fourier Transform Functions
TensorFlow provides several operations that you can use to add discrete
Fourier transform functions to your graph.
@@fft
@@ifft
@@fft2d
@@ifft2d
@@fft3d
@@ifft3d
## Reduction
TensorFlow provides several operations that you can use to perform
common math computations that reduce various dimensions of a tensor.
@@reduce_sum
@@reduce_prod
@@reduce_min
@@reduce_max
@@reduce_mean
@@reduce_all
@@reduce_any
@@reduce_logsumexp
@@count_nonzero
@@accumulate_n
@@einsum
## Scan
TensorFlow provides several operations that you can use to perform scans
(running totals) across one axis of a tensor.
@@cumsum
@@cumprod
## Segmentation
TensorFlow provides several operations that you can use to perform common
math computations on tensor segments.
Here a segmentation is a partitioning of a tensor along
the first dimension, i.e. it defines a mapping from the first dimension onto
`segment_ids`. The `segment_ids` tensor should be the size of
the first dimension, `d0`, with consecutive IDs in the range `0` to `k`,
where `k<d0`.
In particular, a segmentation of a matrix tensor is a mapping of rows to
segments.
For example:
```python
c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]])
tf.segment_sum(c, tf.constant([0, 0, 1]))
==> [[0 0 0 0]
[5 6 7 8]]
```
@@segment_sum
@@segment_prod
@@segment_min
@@segment_max
@@segment_mean
@@unsorted_segment_sum
@@sparse_segment_sum
@@sparse_segment_mean
@@sparse_segment_sqrt_n
## Sequence Comparison and Indexing
TensorFlow provides several operations that you can use to add sequence
comparison and index extraction to your graph. You can use these operations to
determine sequence differences and determine the indexes of specific values in
a tensor.
@@argmin
@@argmax
@@setdiff1d
@@where
@@unique
@@edit_distance
@@invert_permutation
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import common_shapes
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import graph_util
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_control_flow_ops
from tensorflow.python.ops import gen_data_flow_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import gen_sparse_ops
from tensorflow.python.ops import gen_state_ops
from tensorflow.python.ops import state_ops
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.ops.gen_math_ops import *
# pylint: enable=wildcard-import
# Aliases for some automatically-generated names.
argmax = gen_math_ops.arg_max
argmin = gen_math_ops.arg_min
linspace = gen_math_ops.lin_space
# pylint: disable=anomalous-backslash-in-string,protected-access
def abs(x, name=None):
"""Computes the absolute value of a tensor.
Given a tensor of real numbers `x`, this operation returns a tensor
containing the absolute value of each element in `x`. For example, if x is
an input element and y is an output element, this operation computes
\\\\(y = |x|\\\\).
See [`tf.complex_abs()`](#tf_complex_abs) to compute the absolute value of a complex
number.
Args:
x: A `Tensor` or `SparseTensor` of type `float32`, `float64`, `int32`, or
`int64`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` the same size and type as `x` with absolute
values.
"""
with ops.name_scope(name, "Abs", [x]) as name:
if isinstance(x, sparse_tensor.SparseTensor):
if x.values.dtype in (dtypes.complex64, dtypes.complex128):
x_abs = gen_math_ops.complex_abs(x.values,
Tout=x.values.dtype.real_dtype, name=name)
return sparse_tensor.SparseTensor(
indices=x.indices, values=x_abs, shape=x.shape)
x_abs = gen_math_ops._abs(x.values, name=name)
return sparse_tensor.SparseTensor(
indices=x.indices, values=x_abs, shape=x.shape)
else:
x = ops.convert_to_tensor(x, name="x")
if x.dtype in (dtypes.complex64, dtypes.complex128):
return gen_math_ops.complex_abs(x, Tout=x.dtype.real_dtype, name=name)
return gen_math_ops._abs(x, name=name)
def divide(x, y, name=None):
"""Computes Python style division of `x` by `y`."""
with ops.name_scope(name, "Divide", [x]) as name:
return x / y
# Make Python Aliases
multiply = gen_math_ops.mul
subtract = gen_math_ops.sub
negative = gen_math_ops.neg
def neg(x, name=None):
"""Computes numerical negative value element-wise.
I.e., \\(y = -x\\).
Args:
x: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`,
`float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`.
"""
with ops.name_scope(name, "Neg", [x]) as name:
if isinstance(x, sparse_tensor.SparseTensor):
x_neg = gen_math_ops.neg(x.values, name=name)
return sparse_tensor.SparseTensor(
indices=x.indices, values=x_neg, shape=x.shape)
else:
return gen_math_ops.neg(x, name=name)
def sign(x, name=None):
"""Returns an element-wise indication of the sign of a number.
`y = sign(x) = -1` if `x < 0`; 0 if `x == 0`; 1 if `x > 0`.
For complex numbers, `y = sign(x) = x / |x|` if `x != 0`, otherwise `y = 0`.
Args:
x: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`,
`float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`.
"""
with ops.name_scope(name, "Sign", [x]) as name:
if isinstance(x, sparse_tensor.SparseTensor):
x_sign = gen_math_ops.sign(x.values, name=name)
return sparse_tensor.SparseTensor(
indices=x.indices, values=x_sign, shape=x.shape)
else:
return gen_math_ops.sign(x, name=name)
def square(x, name=None):
"""Computes square of x element-wise.
I.e., \\(y = x * x = x^2\\).
Args:
x: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`,
`float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor`. Has the same type as `x`.
"""
with ops.name_scope(name, "Square", [x]) as name:
if isinstance(x, sparse_tensor.SparseTensor):
x_square = gen_math_ops.square(x.values, name=name)
return sparse_tensor.SparseTensor(
indices=x.indices, values=x_square, shape=x.shape)
else:
return gen_math_ops.square(x, name=name)
def sqrt(x, name=None):
"""Computes square root of x element-wise.
I.e., \\(y = \sqrt{x} = x^{1/2}\\).
Args:
x: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`,
`float32`, `float64`, `complex64`, `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`.
"""
with ops.name_scope(name, "Sqrt", [x]) as name:
if isinstance(x, sparse_tensor.SparseTensor):
x_sqrt = gen_math_ops.sqrt(x.values, name=name)
return sparse_tensor.SparseTensor(
indices=x.indices, values=x_sqrt, shape=x.shape)
else:
return gen_math_ops.sqrt(x, name=name)
def erf(x, name=None):
"""Computes the Gauss error function of `x` element-wise.
Args:
x: A `Tensor` of `SparseTensor`. Must be one of the following types: `half`,
`float32`, `float64`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`.
"""
with ops.name_scope(name, "Erf", [x]) as name:
if isinstance(x, sparse_tensor.SparseTensor):
x_erf = gen_math_ops.erf(x.values, name=name)
return sparse_tensor.SparseTensor(
indices=x.indices, values=x_erf, shape=x.shape)
else:
return gen_math_ops.erf(x, name=name)
def complex_abs(x, name=None):
r"""Computes the complex absolute value of a tensor.
Given a tensor `x` of complex numbers, this operation returns a tensor of type
`float32` or `float64` that is the absolute value of each element in `x`. All
elements in `x` must be complex numbers of the form \\(a + bj\\). The
absolute value is computed as \\( \sqrt{a^2 + b^2}\\).
For example:
```
# tensor 'x' is [[-2.25 + 4.75j], [-3.25 + 5.75j]]
tf.complex_abs(x) ==> [5.25594902, 6.60492229]
```
Args:
x: A `Tensor` of type `complex64` or `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `float32` or `float64`.
"""
return gen_math_ops.complex_abs(x, Tout=x.dtype.real_dtype, name=name)
def scalar_mul(scalar, x):
"""Multiplies a scalar times a `Tensor` or `IndexedSlices` object.
Intended for use in gradient code which might deal with `IndexedSlices`
objects, which are easy to multiply by a scalar but more expensive to
multiply with arbitrary tensors.
Args:
scalar: A 0-D scalar `Tensor`. Must have known shape.
x: A `Tensor` or `IndexedSlices` to be scaled.
Returns:
`scalar * x` of the same type (`Tensor` or `IndexedSlices`) as `x`.
Raises:
ValueError: if scalar is not a 0-D `scalar`.
"""
scalar = ops.convert_to_tensor(scalar, dtype=x.dtype.base_dtype,
name="scalar")
shape = scalar.get_shape()
if shape.ndims == 0:
if isinstance(x, ops.IndexedSlices):
return ops.IndexedSlices(scalar * x.values, x.indices, x.dense_shape)
else:
return scalar * x
else:
raise ValueError("Only scalar multiply works, got shape %s" % shape)
def pow(x, y, name=None):
"""Computes the power of one value to another.
Given a tensor `x` and a tensor `y`, this operation computes \\\\(x^y\\\\) for
corresponding elements in `x` and `y`. For example:
```
# tensor 'x' is [[2, 2], [3, 3]]
# tensor 'y' is [[8, 16], [2, 3]]
tf.pow(x, y) ==> [[256, 65536], [9, 27]]
```
Args:
x: A `Tensor` of type `float32`, `float64`, `int32`, `int64`, `complex64`,
or `complex128`.
y: A `Tensor` of type `float32`, `float64`, `int32`, `int64`, `complex64`,
or `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor`.
"""
with ops.name_scope(name, "Pow", [x]) as name:
return gen_math_ops._pow(x, y, name=name)
def complex(real, imag, name=None):
"""Converts two real numbers to a complex number.
Given a tensor `real` representing the real part of a complex number, and a
tensor `imag` representing the imaginary part of a complex number, this
operation returns complex numbers elementwise of the form \\(a + bj\\), where
*a* represents the `real` part and *b* represents the `imag` part.
The input tensors `real` and `imag` must have the same shape.
For example:
```
# tensor 'real' is [2.25, 3.25]
# tensor `imag` is [4.75, 5.75]
tf.complex(real, imag) ==> [[2.25 + 4.75j], [3.25 + 5.75j]]
```
Args:
real: A `Tensor`. Must be one of the following types: `float32`, `float64`.
imag: A `Tensor`. Must have the same type as `real`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `complex64` or `complex128`.
"""
real = ops.convert_to_tensor(real, name="real")
imag = ops.convert_to_tensor(imag, name="imag")
with ops.name_scope(name, "Complex", [real, imag]) as name:
input_types = (real.dtype, imag.dtype)
if input_types == (dtypes.float64, dtypes.float64):
Tout = dtypes.complex128
elif input_types == (dtypes.float32, dtypes.float32):
Tout = dtypes.complex64
else:
raise TypeError("real and imag have incorrect types: "
"{} {}".format(real.dtype.name, imag.dtype.name))
return gen_math_ops._complex(real, imag, Tout=Tout, name=name)
def real(input, name=None):
"""Returns the real part of a complex number.
Given a tensor `input` of complex numbers, this operation returns a tensor of
type `float32` or `float64` that is the real part of each element in `input`.
All elements in `input` must be complex numbers of the form \\(a + bj\\),
where *a* is the real part returned by this operation and *b* is the
imaginary part.
For example:
```
# tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j]
tf.real(input) ==> [-2.25, 3.25]
```
If `input` is already real, it is returned unchanged.
Args:
input: A `Tensor`. Must have numeric type.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `float32` or `float64`.
"""
with ops.name_scope(name, "Real", [input]) as name:
real_dtype = input.dtype.real_dtype
if input.dtype.base_dtype == real_dtype:
return input
return gen_math_ops.real(input, Tout=real_dtype, name=name)
def imag(input, name=None):
"""Returns the imaginary part of a complex number.
Given a tensor `input` of complex numbers, this operation returns a tensor of
type `float32` or `float64` that is the imaginary part of each element in
`input`. All elements in `input` must be complex numbers of the form \\(a +
bj\\), where *a* is the real part and *b* is the imaginary part returned by
this operation.
For example:
```
# tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j]
tf.imag(input) ==> [4.75, 5.75]
```
Args:
input: A `Tensor`. Must be one of the following types: `complex64`, `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `float32` or `float64`.
"""
with ops.name_scope(name, "Imag", [input]) as name:
return gen_math_ops.imag(input, Tout=input.dtype.real_dtype, name=name)
def round(x, name=None):
"""Rounds the values of a tensor to the nearest integer, element-wise.
Rounds half to even. Also known as bankers rounding. If you want to round
according to the current system rounding mode use tf::cint.
For example:
```python
# 'a' is [0.9, 2.5, 2.3, 1.5, -4.5]
tf.round(a) ==> [ 1.0, 2.0, 2.0, 2.0, -4.0 ]
```
Args:
x: A `Tensor` of type `float32` or `float64`.
name: A name for the operation (optional).
Returns:
A `Tensor` of same shape and type as `x`.
"""
x = ops.convert_to_tensor(x, name="x")
if x.dtype.is_integer:
return x
else:
# TODO(nolivia): Switch to new Round op
# return gen_math_ops.round(x, name=name)
return gen_math_ops.floor(x + 0.5, name=name)
ops.RegisterShape("Round")(common_shapes.call_cpp_shape_fn)
def cast(x, dtype, name=None):
"""Casts a tensor to a new type.
The operation casts `x` (in case of `Tensor`) or `x.values`
(in case of `SparseTensor`) to `dtype`.
For example:
```python
# tensor `a` is [1.8, 2.2], dtype=tf.float
tf.cast(a, tf.int32) ==> [1, 2] # dtype=tf.int32
```
Args:
x: A `Tensor` or `SparseTensor`.
dtype: The destination type.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` with same shape as `x`.
Raises:
TypeError: If `x` cannot be cast to the `dtype`.
"""
base_type = dtypes.as_dtype(dtype).base_dtype
with ops.name_scope(name, "Cast", [x]) as name:
if isinstance(x, sparse_tensor.SparseTensor):
values_cast = cast(x.values, base_type, name=name)
return sparse_tensor.SparseTensor(x.indices, values_cast, x.shape)
else:
# TODO(touts): Handle what Josh said.
#
# Could return ops.convert_to_tensor(x, dtype=dtype, ...) here, but that
# allows some conversions that cast() can't do, e.g. casting numbers to
# strings.
x = ops.convert_to_tensor(x, name="x")
if x.dtype.base_dtype == base_type:
return x
return gen_math_ops.cast(x, base_type, name=name)
def saturate_cast(value, dtype, name=None):
"""Performs a safe saturating cast of `value` to `dtype`.
This function casts the input to `dtype` without applying any scaling. If
there is a danger that values would over or underflow in the cast, this op
applies the appropriate clamping before the cast.
Args:
value: A `Tensor`.
dtype: The desired output `DType`.
name: A name for the operation (optional).
Returns:
`value` safely cast to `dtype`.
"""
# When casting to a type with smaller representable range, clamp.
# Note that this covers casting to unsigned types as well.
with ops.name_scope(name, "saturate_cast", [value]) as name:
value = ops.convert_to_tensor(value, name="value")
dtype = dtypes.as_dtype(dtype).base_dtype
if value.dtype.min < dtype.min:
value = gen_math_ops.maximum(value, ops.convert_to_tensor(
dtype.min, dtype=value.dtype, name="min"))
if value.dtype.max > dtype.max:
value = gen_math_ops.minimum(value, ops.convert_to_tensor(
dtype.max, dtype=value.dtype, name="max"))
return cast(value, dtype, name=name)
def to_float(x, name="ToFloat"):
"""Casts a tensor to type `float32`.
Args:
x: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` with same shape as `x` with type `float32`.
Raises:
TypeError: If `x` cannot be cast to the `float32`.
"""
return cast(x, dtypes.float32, name=name)
def to_double(x, name="ToDouble"):
"""Casts a tensor to type `float64`.
Args:
x: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` with same shape as `x` with type `float64`.
Raises:
TypeError: If `x` cannot be cast to the `float64`.
"""
return cast(x, dtypes.float64, name=name)
def to_int32(x, name="ToInt32"):
"""Casts a tensor to type `int32`.
Args:
x: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` with same shape as `x` with type `int32`.
Raises:
TypeError: If `x` cannot be cast to the `int32`.
"""
return cast(x, dtypes.int32, name=name)
def to_int64(x, name="ToInt64"):
"""Casts a tensor to type `int64`.
Args:
x: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` with same shape as `x` with type `int64`.
Raises:
TypeError: If `x` cannot be cast to the `int64`.
"""
return cast(x, dtypes.int64, name=name)
def to_bfloat16(x, name="ToBFloat16"):
"""Casts a tensor to type `bfloat16`.
Args:
x: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` with same shape as `x` with type `bfloat16`.
Raises:
TypeError: If `x` cannot be cast to the `bfloat16`.
"""
return cast(x, dtypes.bfloat16, name=name)
ops.Tensor._override_operator("__neg__", gen_math_ops.neg)
ops.Tensor._override_operator("__abs__", abs)
# __invert__ corresponds to the ~ operator. Here we follow the numpy convention
# ~ marks an elementwise bit-wise inverse. This is only implemented for boolean
# tensors and will throw a TypeError if used on nonboolean arrays
ops.Tensor._override_operator("__invert__", gen_math_ops.logical_not)
def _OverrideBinaryOperatorHelper(func, op_name, clazz_object=ops.Tensor):
"""Register operators with different tensor and scalar versions.
If `clazz_object` is `SparseTensor`, assumes `func` takes `(sp_indices,
sp_values, sp_shape, dense)` and outputs `(new_sp_values)`.
Args:
func: the operator
op_name: name of the operator being overridden
clazz_object: class to override for. Either `Tensor` or `SparseTensor`.
"""
def binary_op_wrapper(x, y):
with ops.name_scope(None, op_name, [x, y]) as name:
if not isinstance(y, sparse_tensor.SparseTensor):
y = ops.convert_to_tensor(y, dtype=x.dtype.base_dtype, name="y")
return func(x, y, name=name)
def binary_op_wrapper_sparse(sp_x, y):
with ops.name_scope(None, op_name, [sp_x, y]) as name:
y = ops.convert_to_tensor(y, dtype=sp_x.dtype.base_dtype, name="y")
return sparse_tensor.SparseTensor(
sp_x.indices, func(sp_x.indices, sp_x.values,
sp_x.shape, y, name=name),
sp_x.shape)
def r_binary_op_wrapper(y, x):
with ops.name_scope(None, op_name, [x, y]) as name:
x = ops.convert_to_tensor(x, dtype=y.dtype.base_dtype, name="x")
return func(x, y, name=name)
# Propagate func.__doc__ to the wrappers
try:
doc = func.__doc__
except AttributeError:
doc = None
binary_op_wrapper.__doc__ = doc
r_binary_op_wrapper.__doc__ = doc
binary_op_wrapper_sparse.__doc__ = doc
if clazz_object is ops.Tensor:
clazz_object._override_operator("__%s__" % op_name, binary_op_wrapper)
del binary_op_wrapper
clazz_object._override_operator("__r%s__" % op_name, r_binary_op_wrapper)
del r_binary_op_wrapper
else:
clazz_object._override_operator("__%s__" % op_name,
binary_op_wrapper_sparse)
del binary_op_wrapper_sparse
# Conversion table for __truediv__. None entries mean no conversion required.
_TRUEDIV_TABLE = {
dtypes.uint8: dtypes.float32,
dtypes.int8: dtypes.float32,
dtypes.uint16: dtypes.float32,
dtypes.int16: dtypes.float32,
dtypes.int32: dtypes.float64,
dtypes.int64: dtypes.float64,
dtypes.float16: None,
dtypes.float32: None,
dtypes.float64: None,
dtypes.complex64: None,
dtypes.complex128: None,
}
# NOTE: the support of "sparse (true)div dense" is currently not baked in into
# "tf.(true_)div()". Until such an API decision is made, the supported usage is
# to explicitly use the "/" operator to invoke either truediv or div.
def _sparse_dense_truediv(sp_indices, sp_values, sp_shape, y, name=None):
"""Internal helper function for 'sp_t / dense_t'."""
with ops.name_scope(name, "truediv",
[sp_indices, sp_values, sp_shape, y]) as name:
sp_values = ops.convert_to_tensor(sp_values, name="sp_values")
y = ops.convert_to_tensor(y, name="y")
x_dtype = sp_values.dtype.base_dtype
y_dtype = y.dtype.base_dtype
if x_dtype != y_dtype:
raise TypeError("x and y must have the same dtype, got %r != %r" %
(x_dtype, y_dtype))
try:
dtype = _TRUEDIV_TABLE[x_dtype]
except KeyError:
raise TypeError("Invalid dtype %r in __truediv__" % x_dtype)
if dtype is not None:
sp_values = cast(sp_values, dtype)
y = cast(y, dtype)
return gen_sparse_ops.sparse_dense_cwise_div(sp_indices, sp_values,
sp_shape, y, name=name)
def truediv(x, y, name=None):
"""Divides x / y elementwise, always producing floating point results.
The same as `tf.div` for floating point arguments, but casts integer arguments
to floating point before dividing so that the result is always floating point.
This op is generated by normal `x / y` division in Python 3 and in Python 2.7
with `from __future__ import division`. If you want integer division that
rounds down, use `x // y` or `tf.floordiv`.
`x` and `y` must have the same numeric type. If the inputs are floating
point, the output will have the same type. If the inputs are integral, the
inputs are cast to `float32` for `int8` and `int16` and `float64` for `int32`
and `int64` (matching the behavior of Numpy).
Args:
x: `Tensor` numerator of numeric type.
y: `Tensor` denominator of numeric type.
name: A name for the operation (optional).
Returns:
`x / y` evaluated in floating point.
Raises:
TypeError: If `x` and `y` have different dtypes.
"""
with ops.name_scope(name, "truediv", [x, y]) as name:
x = ops.convert_to_tensor(x, name="x")
y = ops.convert_to_tensor(y, name="y")
x_dtype = x.dtype.base_dtype
y_dtype = y.dtype.base_dtype
if x_dtype != y_dtype:
raise TypeError("x and y must have the same dtype, got %r != %r" %
(x_dtype, y_dtype))
try:
dtype = _TRUEDIV_TABLE[x_dtype]
except KeyError:
raise TypeError("Invalid dtype %r in __truediv__" % x_dtype)
if dtype is not None:
x = cast(x, dtype)
y = cast(y, dtype)
return gen_math_ops.div(x, y, name=name)
def floordiv(x, y, name=None):
"""Divides `x / y` elementwise, rounding down for floating point.
The same as `tf.div(x,y)` for integers, but uses `tf.floor(tf.div(x,y))` for
floating point arguments so that the result is always an integer (though
possibly an integer represented as floating point). This op is generated by
`x // y` floor division in Python 3 and in Python 2.7 with
`from __future__ import division`.
Note that for efficiency, `floordiv` uses C semantics for negative numbers
(unlike Python and Numpy).
`x` and `y` must have the same type, and the result will have the same type
as well.
Args:
x: `Tensor` numerator of real numeric type.
y: `Tensor` denominator of real numeric type.
name: A name for the operation (optional).
Returns:
`x / y` rounded down (except possibly towards zero for negative integers).
Raises:
TypeError: If the inputs are complex.
"""
with ops.name_scope(name, "floordiv", [x, y]) as name:
x = ops.convert_to_tensor(x, name="x")
dtype = x.dtype
if dtype.is_floating:
return gen_math_ops.floor(gen_math_ops.div(x, y), name=name)
else:
if not dtype.is_integer:
raise TypeError("Expected floating point or integer, got %r" % dtype)
# TODO(aselle): Switch to math_ops.floor_div() when ready
# return gen_math_ops.floor_div(x, y, name=name)
return gen_math_ops.div(x, y, name=name)
def _mul_dispatch(x, y, name=None):
"""Dispatches cwise mul for "Dense*Dense" and "Dense*Sparse"."""
is_tensor_y = isinstance(y, ops.Tensor)
if is_tensor_y:
return gen_math_ops.mul(x, y, name=name)
else:
assert isinstance(y, sparse_tensor.SparseTensor) # Case: Dense * Sparse.
new_vals = gen_sparse_ops.sparse_dense_cwise_mul(y.indices, y.values,
y.shape, x, name)
return sparse_tensor.SparseTensor(y.indices, new_vals, y.shape)
_OverrideBinaryOperatorHelper(gen_sparse_ops.sparse_dense_cwise_div, "div",
sparse_tensor.SparseTensor)
_OverrideBinaryOperatorHelper(_sparse_dense_truediv, "truediv",
sparse_tensor.SparseTensor)
_OverrideBinaryOperatorHelper(gen_sparse_ops.sparse_dense_cwise_mul, "mul",
sparse_tensor.SparseTensor)
_OverrideBinaryOperatorHelper(gen_math_ops.add, "add")
_OverrideBinaryOperatorHelper(gen_math_ops.sub, "sub")
_OverrideBinaryOperatorHelper(_mul_dispatch, "mul")
_OverrideBinaryOperatorHelper(gen_math_ops.div, "div")
_OverrideBinaryOperatorHelper(truediv, "truediv")
_OverrideBinaryOperatorHelper(floordiv, "floordiv")
# TODO(aselle): Switch mod to floor_mod when ready
# _OverrideBinaryOperatorHelper(gen_math_ops.floor_mod, "mod")
_OverrideBinaryOperatorHelper(gen_math_ops.mod, "mod")
_OverrideBinaryOperatorHelper(pow, "pow")
def logical_xor(x, y, name="LogicalXor"):
"""x ^ y = (x | y) & ~(x & y)."""
# TODO(alemi) Make this a cwise op if people end up relying on it.
return gen_math_ops.logical_and(
gen_math_ops.logical_or(x, y),
gen_math_ops.logical_not(gen_math_ops.logical_and(x, y)),
name=name)
_OverrideBinaryOperatorHelper(gen_math_ops.logical_and, "and")
_OverrideBinaryOperatorHelper(gen_math_ops.logical_or, "or")
_OverrideBinaryOperatorHelper(logical_xor, "xor")
ops.Tensor._override_operator("__lt__", gen_math_ops.less)
ops.Tensor._override_operator("__le__", gen_math_ops.less_equal)
ops.Tensor._override_operator("__gt__", gen_math_ops.greater)
ops.Tensor._override_operator("__ge__", gen_math_ops.greater_equal)
def range(start, limit=None, delta=1, dtype=None, name="range"):
"""Creates a sequence of numbers.
Creates a sequence of numbers that begins at `start` and extends by
increments of `delta` up to but not including `limit`.
The dtype of the resulting tensor is inferred from the inputs unless
it is provided explicitly.
Like the Python builtin `range`, `start` defaults to 0, so that
`range(n) = range(0, n)`.
For example:
```python
# 'start' is 3
# 'limit' is 18
# 'delta' is 3
tf.range(start, limit, delta) ==> [3, 6, 9, 12, 15]
# 'start' is 3
# 'limit' is 1
# 'delta' is -0.5
tf.range(start, limit, delta) ==> [3, 2.5, 2, 1.5]
# 'limit' is 5
tf.range(limit) ==> [0, 1, 2, 3, 4]
```
Args:
start: A 0-D `Tensor` (scalar). Acts as first entry in the range if
`limit` is not None; otherwise, acts as range limit and first entry
defaults to 0.
limit: A 0-D `Tensor` (scalar). Upper limit of sequence,
exclusive. If None, defaults to the value of `start` while the first
entry of the range defaults to 0.
delta: A 0-D `Tensor` (scalar). Number that increments
`start`. Defaults to 1.
dtype: The type of the elements of the resulting tensor.
name: A name for the operation. Defaults to "range".
Returns:
An 1-D `Tensor` of type `dtype`.
"""
if limit is None:
start, limit = 0, start
with ops.name_scope(name, "Range", [start, limit, delta]) as name:
start = ops.convert_to_tensor(start, dtype=dtype, name="start")
limit = ops.convert_to_tensor(limit, dtype=dtype, name="limit")
delta = ops.convert_to_tensor(delta, dtype=dtype, name="delta")
# infer dtype if not explicitly provided
if dtype is None:
dtype_hierarchy = [dtypes.int32, dtypes.int64, dtypes.float32,
dtypes.float64]
assert all(arg.dtype in dtype_hierarchy for arg in [start, limit, delta])
inferred_dtype = max([arg.dtype for arg in [start, limit, delta]],
key=dtype_hierarchy.index)
start = cast(start, inferred_dtype)
limit = cast(limit, inferred_dtype)
delta = cast(delta, inferred_dtype)
return gen_math_ops._range(start, limit, delta, name=name)
@ops.RegisterShape("Range")
def _RangeShape(op):
return common_shapes.call_cpp_shape_fn(op, input_tensors_needed=[0, 1, 2])
# Reduction operations
def _ReductionDims(x, reduction_indices):
"""Returns range(0, rank(x)) if reduction_indices is None."""
if reduction_indices is not None:
return reduction_indices
else:
# Fast path: avoid creating Rank and Range ops if ndims is known.
if isinstance(x, ops.Tensor) and x.get_shape().ndims is not None:
return constant_op.constant(np.arange(x.get_shape().ndims),
dtype=dtypes.int32)
if (isinstance(x, sparse_tensor.SparseTensor) and
x.shape.get_shape().is_fully_defined()):
rank = x.shape.get_shape()[0].value # sparse.shape is an 1-D tensor.
return constant_op.constant(np.arange(rank), dtype=dtypes.int32)
# Otherwise, we rely on Range and Rank to do the right thing at run-time.
return range(0, array_ops.rank(x))
def reduce_sum(input_tensor, reduction_indices=None, keep_dims=False,
name=None):
"""Computes the sum of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `reduction_indices`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `reduction_indices`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `reduction_indices` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
For example:
```python
# 'x' is [[1, 1, 1]
# [1, 1, 1]]
tf.reduce_sum(x) ==> 6
tf.reduce_sum(x, 0) ==> [2, 2, 2]
tf.reduce_sum(x, 1) ==> [3, 3]
tf.reduce_sum(x, 1, keep_dims=True) ==> [[3], [3]]
tf.reduce_sum(x, [0, 1]) ==> 6
```
Args:
input_tensor: The tensor to reduce. Should have numeric type.
reduction_indices: The dimensions to reduce. If `None` (the default),
reduces all dimensions.
keep_dims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
Returns:
The reduced tensor.
"""
return gen_math_ops._sum(input_tensor, _ReductionDims(input_tensor,
reduction_indices),
keep_dims, name=name)
def count_nonzero(input_tensor, reduction_indices=None, keep_dims=False,
dtype=dtypes.int64, name=None):
"""Computes number of nonzero elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `reduction_indices`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `reduction_indices`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `reduction_indices` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
**NOTE** Floating point comparison to zero is done by exact floating point
equality check. Small values are **not** rounded to zero for purposes of
the nonzero check.
For example:
```python
# 'x' is [[0, 1, 0]
# [1, 1, 0]]
tf.count_nonzero(x) ==> 3
tf.count_nonzero(x, 0) ==> [1, 2, 0]
tf.count_nonzero(x, 1) ==> [1, 2]
tf.count_nonzero(x, 1, keep_dims=True) ==> [[1], [2]]
tf.count_nonzero(x, [0, 1]) ==> 3
```
Args:
input_tensor: The tensor to reduce. Should be of numeric type, or `bool`.
reduction_indices: The dimensions to reduce. If `None` (the default),
reduces all dimensions.
keep_dims: If true, retains reduced dimensions with length 1.
dtype: The output dtype; defaults to `tf.int64`.
name: A name for the operation (optional).
Returns:
The reduced tensor (number of nonzero values).
"""
with ops.name_scope(name, "count_nonzero", [input_tensor]):
input_tensor = ops.convert_to_tensor(input_tensor, name="input_tensor")
zero = input_tensor.dtype.as_numpy_dtype()
return cast(
reduce_sum(
# int64 reduction happens on GPU
to_int64(gen_math_ops.not_equal(input_tensor, zero)),
reduction_indices=reduction_indices,
keep_dims=keep_dims),
dtype=dtype)
def reduce_mean(input_tensor, reduction_indices=None, keep_dims=False,
name=None):
"""Computes the mean of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `reduction_indices`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `reduction_indices`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `reduction_indices` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
For example:
```python
# 'x' is [[1., 1.]
# [2., 2.]]
tf.reduce_mean(x) ==> 1.5
tf.reduce_mean(x, 0) ==> [1.5, 1.5]
tf.reduce_mean(x, 1) ==> [1., 2.]
```
Args:
input_tensor: The tensor to reduce. Should have numeric type.
reduction_indices: The dimensions to reduce. If `None` (the default),
reduces all dimensions.
keep_dims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
Returns:
The reduced tensor.
"""
return gen_math_ops._mean(input_tensor, _ReductionDims(input_tensor,
reduction_indices),
keep_dims, name=name)
def reduce_prod(input_tensor, reduction_indices=None, keep_dims=False,
name=None):
"""Computes the product of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `reduction_indices`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `reduction_indices`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `reduction_indices` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
Args:
input_tensor: The tensor to reduce. Should have numeric type.
reduction_indices: The dimensions to reduce. If `None` (the default),
reduces all dimensions.
keep_dims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
Returns:
The reduced tensor.
"""
return gen_math_ops._prod(input_tensor, _ReductionDims(input_tensor,
reduction_indices),
keep_dims, name=name)
def reduce_min(input_tensor, reduction_indices=None, keep_dims=False,
name=None):
"""Computes the minimum of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `reduction_indices`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `reduction_indices`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `reduction_indices` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
Args:
input_tensor: The tensor to reduce. Should have numeric type.
reduction_indices: The dimensions to reduce. If `None` (the default),
reduces all dimensions.
keep_dims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
Returns:
The reduced tensor.
"""
return gen_math_ops._min(input_tensor, _ReductionDims(input_tensor,
reduction_indices),
keep_dims, name=name)
def reduce_max(input_tensor, reduction_indices=None, keep_dims=False,
name=None):
"""Computes the maximum of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `reduction_indices`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `reduction_indices`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `reduction_indices` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
Args:
input_tensor: The tensor to reduce. Should have numeric type.
reduction_indices: The dimensions to reduce. If `None` (the default),
reduces all dimensions.
keep_dims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
Returns:
The reduced tensor.
"""
return gen_math_ops._max(input_tensor, _ReductionDims(input_tensor,
reduction_indices),
keep_dims, name=name)
def reduce_all(input_tensor, reduction_indices=None, keep_dims=False,
name=None):
"""Computes the "logical and" of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `reduction_indices`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `reduction_indices`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `reduction_indices` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
For example:
```python
# 'x' is [[True, True]
# [False, False]]
tf.reduce_all(x) ==> False
tf.reduce_all(x, 0) ==> [False, False]
tf.reduce_all(x, 1) ==> [True, False]
```
Args:
input_tensor: The boolean tensor to reduce.
reduction_indices: The dimensions to reduce. If `None` (the default),
reduces all dimensions.
keep_dims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
Returns:
The reduced tensor.
"""
return gen_math_ops._all(input_tensor, _ReductionDims(input_tensor,
reduction_indices),
keep_dims, name=name)
def reduce_any(input_tensor, reduction_indices=None, keep_dims=False,
name=None):
"""Computes the "logical or" of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `reduction_indices`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `reduction_indices`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `reduction_indices` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
For example:
```python
# 'x' is [[True, True]
# [False, False]]
tf.reduce_any(x) ==> True
tf.reduce_any(x, 0) ==> [True, True]
tf.reduce_any(x, 1) ==> [True, False]
```
Args:
input_tensor: The boolean tensor to reduce.
reduction_indices: The dimensions to reduce. If `None` (the default),
reduces all dimensions.
keep_dims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
Returns:
The reduced tensor.
"""
return gen_math_ops._any(input_tensor, _ReductionDims(input_tensor,
reduction_indices),
keep_dims, name=name)
def reduce_logsumexp(input_tensor, reduction_indices=None, keep_dims=False,
name=None):
"""Computes log(sum(exp(elements across dimensions of a tensor))).
Reduces `input_tensor` along the dimensions given in `reduction_indices`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `reduction_indices`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `reduction_indices` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
This function is more numerically stable than log(sum(exp(input))). It avoids
overflows caused by taking the exp of large inputs and underflows caused by
taking the log of small inputs.
For example:
```python
# 'x' is [[0, 0, 0]]
# [0, 0, 0]]
tf.reduce_logsumexp(x) ==> log(6)
tf.reduce_logsumexp(x, 0) ==> [log(2), log(2), log(2)]
tf.reduce_logsumexp(x, 1) ==> [log(3), log(3)]
tf.reduce_logsumexp(x, 1, keep_dims=True) ==> [[log(3)], [log(3)]]
tf.reduce_logsumexp(x, [0, 1]) ==> log(6)
```
Args:
input_tensor: The tensor to reduce. Should have numeric type.
reduction_indices: The dimensions to reduce. If `None` (the default),
reduces all dimensions.
keep_dims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
Returns:
The reduced tensor.
"""
with ops.name_scope(name, "ReduceLogSumExp", [input_tensor]) as name:
my_max = array_ops.stop_gradient(
reduce_max(input_tensor, reduction_indices, keep_dims=True))
result = gen_math_ops.log(reduce_sum(
gen_math_ops.exp(input_tensor - my_max),
reduction_indices,
keep_dims=True)) + my_max
if not keep_dims:
if isinstance(reduction_indices, int):
reduction_indices = [reduction_indices]
result = array_ops.squeeze(result, reduction_indices)
return result
def trace(x, name=None):
""" Compute the trace of a tensor `x`.
`trace(x)` returns the sum along the main diagonal of each inner-most matrix
in x. If x is of rank `k` with shape `[I, J, K, ..., L, M, N]`, then output
is a tensor of rank `k-2` with dimensions `[I, J, K, ..., L]` where
`output[i, j, k, ..., l] = trace(x[i, j, i, ..., l, :, :])`
For example:
```python
# 'x' is [[1, 2],
# [3, 4]]
tf.trace(x) ==> 5
# 'x' is [[1,2,3],
# [4,5,6],
# [7,8,9]]
tf.trace(x) ==> 15
# 'x' is [[[1,2,3],
# [4,5,6],
# [7,8,9]],
# [[-1,-2,-3],
# [-4,-5,-6],
# [-7,-8,-9]]]
tf.trace(x) ==> [15,-15]
```
Args:
x: tensor.
name: A name for the operation (optional).
Returns:
The trace of input tensor.
"""
with ops.name_scope(name, "Trace", [x]) as name:
x = ops.convert_to_tensor(x, name="x")
return reduce_sum(array_ops.matrix_diag_part(x), [-1], name=name)
def matmul(a, b,
transpose_a=False, transpose_b=False,
a_is_sparse=False, b_is_sparse=False,
name=None):
"""Multiplies matrix `a` by matrix `b`, producing `a` * `b`.
The inputs must be two-dimensional matrices, with matching inner dimensions,
possibly after transposition.
Both matrices must be of the same type. The supported types are:
`float32`, `float64`, `int32`, `complex64`.
Either matrix can be transposed on the fly by setting the corresponding flag
to `True`. This is `False` by default.
If one or both of the matrices contain a lot of zeros, a more efficient
multiplication algorithm can be used by setting the corresponding
`a_is_sparse` or `b_is_sparse` flag to `True`. These are `False` by default.
For example:
```python
# 2-D tensor `a`
a = tf.constant([1, 2, 3, 4, 5, 6], shape=[2, 3]) => [[1. 2. 3.]
[4. 5. 6.]]
# 2-D tensor `b`
b = tf.constant([7, 8, 9, 10, 11, 12], shape=[3, 2]) => [[7. 8.]
[9. 10.]
[11. 12.]]
c = tf.matmul(a, b) => [[58 64]
[139 154]]
```
Args:
a: `Tensor` of type `float32`, `float64`, `int32` or `complex64`.
b: `Tensor` with same type as `a`.
transpose_a: If `True`, `a` is transposed before multiplication.
transpose_b: If `True`, `b` is transposed before multiplication.
a_is_sparse: If `True`, `a` is treated as a sparse matrix.
b_is_sparse: If `True`, `b` is treated as a sparse matrix.
name: Name for the operation (optional).
Returns:
A `Tensor` of the same type as `a`.
"""
with ops.name_scope(name, "MatMul", [a, b]) as name:
a = ops.convert_to_tensor(a, name="a")
b = ops.convert_to_tensor(b, name="b")
sparse_matmul_types = [dtypes.bfloat16, dtypes.float32]
use_sparse_matmul = (a.dtype in sparse_matmul_types and
b.dtype in sparse_matmul_types and
(a_is_sparse or b_is_sparse))
if dtypes.bfloat16 in (a.dtype, b.dtype):
# matmul currently doesn't handle bfloat16 inputs.
use_sparse_matmul = True
if use_sparse_matmul:
return sparse_matmul(a, b,
transpose_a=transpose_a,
transpose_b=transpose_b,
a_is_sparse=a_is_sparse,
b_is_sparse=b_is_sparse,
name=name)
else:
return gen_math_ops._mat_mul(a, b,
transpose_a=transpose_a,
transpose_b=transpose_b,
name=name)
sparse_matmul = gen_math_ops._sparse_mat_mul
batch_matmul = gen_math_ops._batch_mat_mul
ops.RegisterShape("MatMul")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("SparseMatMul")(common_shapes.call_cpp_shape_fn)
@ops.RegisterStatistics("MatMul", "flops")
def _calc_mat_mul_flops(graph, node):
"""Calculates the compute resources needed for MatMul."""
transpose_a = node.attr["transpose_a"].b
a_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])
a_shape.assert_is_fully_defined()
if transpose_a:
k = int(a_shape[0])
else:
k = int(a_shape[1])
output_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)
output_shape.assert_is_fully_defined()
output_count = np.prod(output_shape.as_list())
return ops.OpStats("flops", (k * output_count * 2))
def _as_indexed_slices(x, optimize=True):
"""Convert 'x' to IndexedSlices.
Convert a dense Tensor to a block-sparse IndexedSlices.
Args:
x: Either a Tensor object, or an IndexedSlices object.
optimize: if true, attempt to optimize the conversion of 'x'.
Returns:
An IndexedSlices object.
Raises:
TypeError: If 'x' is not a Tensor or an IndexedSlices object.
"""
# TODO(touts): op_scope
if not isinstance(x, (ops.Tensor, ops.IndexedSlices)):
raise TypeError("Not a Tensor or IndexedSlices: %s" % type(x))
if isinstance(x, ops.IndexedSlices):
return x
x_shape = array_ops.shape_internal(x, optimize=optimize)
return ops.IndexedSlices(x, range(0, x_shape[0]), x_shape)
def _as_indexed_slices_list(inputs, optimize=True):
"""Convert all elements of 'inputs' to IndexedSlices.
Additionally, homogenize the types of all the indices to
either int32 or int64.
Args:
inputs: List containing either Tensor or IndexedSlices objects.
optimize: if true, attempt to optimize the conversion of each input.
Returns:
A list of IndexedSlices objects.
Raises:
TypeError: If 'inputs' is not a list or a tuple.
"""
if not isinstance(inputs, (list, tuple)):
raise TypeError("Expected a list or tuple, not a %s" % type(inputs))
outputs = [_as_indexed_slices(i, optimize=optimize) for i in inputs]
with_int32_index = [o.indices for o in outputs
if o.indices.dtype == dtypes.int32]
if not with_int32_index or len(with_int32_index) == len(outputs):
return outputs
casted_outputs = []
for o in outputs:
if o.indices.dtype == dtypes.int32:
casted_outputs.append(
ops.IndexedSlices(o.values, cast(o.indices, dtypes.int64),
o.dense_shape))
else:
casted_outputs.append(o)
return casted_outputs
def add_n(inputs, name=None):
"""Adds all input tensors element-wise.
Args:
inputs: A list of `Tensor` objects, each with same shape and type.
name: A name for the operation (optional).
Returns:
A `Tensor` of same shape and type as the elements of `inputs`.
Raises:
ValueError: If `inputs` don't all have same shape and dtype or the shape
cannot be inferred.
"""
if not inputs or not isinstance(inputs, (list, tuple)):
raise ValueError("inputs must be a list of at least one Tensor with the "
"same dtype and shape")
inputs = ops.convert_n_to_tensor_or_indexed_slices(inputs)
if not all(isinstance(x, ops.Tensor) for x in inputs):
raise ValueError("inputs must be a list of at least one Tensor with the "
"same dtype and shape")
if len(inputs) == 1:
if name:
return array_ops.identity(inputs[0], name=name)
return inputs[0]
return gen_math_ops._add_n(inputs, name=name)
def accumulate_n(inputs, shape=None, tensor_dtype=None, name=None):
"""Returns the element-wise sum of a list of tensors.
Optionally, pass `shape` and `tensor_dtype` for shape and type checking,
otherwise, these are inferred.
NOTE: This operation is not differentiable and cannot be used if inputs depend
on trainable variables. Please use `tf.add_n` for such cases.
For example:
```python
# tensor 'a' is [[1, 2], [3, 4]]
# tensor `b` is [[5, 0], [0, 6]]
tf.accumulate_n([a, b, a]) ==> [[7, 4], [6, 14]]
# Explicitly pass shape and type
tf.accumulate_n([a, b, a], shape=[2, 2], tensor_dtype=tf.int32)
==> [[7, 4], [6, 14]]
```
Args:
inputs: A list of `Tensor` objects, each with same shape and type.
shape: Shape of elements of `inputs`.
tensor_dtype: The type of `inputs`.
name: A name for the operation (optional).
Returns:
A `Tensor` of same shape and type as the elements of `inputs`.
Raises:
ValueError: If `inputs` don't all have same shape and dtype or the shape
cannot be inferred.
"""
if not inputs or not isinstance(inputs, (list, tuple)):
raise ValueError("inputs must be a list of at least one Tensor with the "
"same dtype and shape")
inputs = ops.convert_n_to_tensor_or_indexed_slices(inputs)
if not all(isinstance(x, ops.Tensor) for x in inputs):
raise ValueError("inputs must be a list of at least one Tensor with the "
"same dtype and shape")
if not all(x.dtype == inputs[0].dtype for x in inputs):
raise ValueError("inputs must be a list of at least one Tensor with the "
"same dtype and shape")
if shape is not None:
shape = tensor_shape.as_shape(shape)
else:
shape = tensor_shape.unknown_shape()
for input_tensor in inputs:
if isinstance(input_tensor, ops.Tensor):
shape = shape.merge_with(input_tensor.get_shape())
if len(inputs) == 1:
return inputs[0]
if tensor_dtype is None:
tensor_dtype = inputs[0].dtype
with ops.name_scope(name, "AccumulateN", inputs) as name:
var = gen_state_ops._temporary_variable(shape=tensor_shape.vector(0),
dtype=tensor_dtype)
with ops.colocate_with(var):
zeros = array_ops.zeros_like(gen_control_flow_ops._merge(inputs)[0])
zeros.set_shape(shape)
ref = state_ops.assign(var, zeros, validate_shape=False)
update_ops = [state_ops.assign_add(ref, input_tensor, use_locking=True)
for input_tensor in inputs]
with ops.control_dependencies(update_ops):
return gen_state_ops._destroy_temporary_variable(
ref, var_name=var.op.name, name=name)
ops.RegisterShape("BatchMatMul")(common_shapes.call_cpp_shape_fn)
def sigmoid(x, name=None):
"""Computes sigmoid of `x` element-wise.
Specifically, `y = 1 / (1 + exp(-x))`.
Args:
x: A Tensor with type `float32`, `float64`, `int32`, `complex64`, `int64`,
or `qint32`.
name: A name for the operation (optional).
Returns:
A Tensor with the same type as `x` if `x.dtype != qint32`
otherwise the return type is `quint8`.
"""
with ops.name_scope(name, "Sigmoid", [x]) as name:
x = ops.convert_to_tensor(x, name="x")
return gen_math_ops._sigmoid(x, name=name)
def tanh(x, name=None):
"""Computes hyperbolic tangent of `x` element-wise.
Args:
x: A Tensor or SparseTensor with type `float`, `double`, `int32`,
`complex64`, `int64`, or `qint32`.
name: A name for the operation (optional).
Returns:
A Tensor or SparseTensor respectively with the same type as `x` if
`x.dtype != qint32` otherwise the return type is `quint8`.
"""
with ops.name_scope(name, "Tanh", [x]) as name:
if isinstance(x, sparse_tensor.SparseTensor):
x_tanh = gen_math_ops._tanh(x.values, name=name)
return sparse_tensor.SparseTensor(
indices=x.indices, values=x_tanh, shape=x.shape)
else:
return gen_math_ops._tanh(x, name=name)
def cumsum(x, axis=0, exclusive=False, reverse=False, name=None):
"""Compute the cumulative sum of the tensor `x` along `axis`.
By default, this op performs an inclusive cumsum, which means that the first
element of the input is identical to the first element of the output:
```prettyprint
tf.cumsum([a, b, c]) ==> [a, a + b, a + b + c]
```
By setting the `exclusive` kwarg to `True`, an exclusive cumsum is performed
instead:
```prettyprint
tf.cumsum([a, b, c], exclusive=True) ==> [0, a, a + b]
```
By setting the `reverse` kwarg to `True`, the cumsum is performed in the
opposite direction:
```prettyprint
tf.cumsum([a, b, c], reverse=True) ==> [a + b + c, b + c, c]
```
This is more efficient than using separate `tf.reverse` ops.
The `reverse` and `exclusive` kwargs can also be combined:
```prettyprint
tf.cumsum([a, b, c], exclusive=True, reverse=True) ==> [b + c, c, 0]
```
Args:
x: A `Tensor`. Must be one of the following types: `float32`, `float64`,
`int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`,
`complex128`, `qint8`, `quint8`, `qint32`, `half`.
axis: A `Tensor` of type `int32` (default: 0).
reverse: A `bool` (default: False).
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `x`.
"""
with ops.name_scope(name, "Cumsum", [x]) as name:
x = ops.convert_to_tensor(x, name="x")
return gen_math_ops.cumsum(
x, axis, exclusive=exclusive, reverse=reverse, name=name)
def cumprod(x, axis=0, exclusive=False, reverse=False, name=None):
"""Compute the cumulative product of the tensor `x` along `axis`.
By default, this op performs an inclusive cumprod, which means that the
first
element of the input is identical to the first element of the output:
```prettyprint
tf.cumprod([a, b, c]) ==> [a, a * b, a * b * c]
```
By setting the `exclusive` kwarg to `True`, an exclusive cumprod is
performed
instead:
```prettyprint
tf.cumprod([a, b, c], exclusive=True) ==> [1, a, a * b]
```
By setting the `reverse` kwarg to `True`, the cumprod is performed in the
opposite direction:
```prettyprint
tf.cumprod([a, b, c], reverse=True) ==> [a * b * c, b * c, c]
```
This is more efficient than using separate `tf.reverse` ops.
The `reverse` and `exclusive` kwargs can also be combined:
```prettyprint
tf.cumprod([a, b, c], exclusive=True, reverse=True) ==> [b * c, c, 1]
```
Args:
x: A `Tensor`. Must be one of the following types: `float32`, `float64`,
`int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`,
`complex128`, `qint8`, `quint8`, `qint32`, `half`.
axis: A `Tensor` of type `int32` (default: 0).
reverse: A `bool` (default: False).
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `x`.
"""
with ops.name_scope(name, "Cumprod", [x]) as name:
x = ops.convert_to_tensor(x, name="x")
return gen_math_ops.cumprod(
x, axis, exclusive=exclusive, reverse=reverse, name=name)
def conj(x, name=None):
r"""Returns the complex conjugate of a complex number.
Given a tensor `input` of complex numbers, this operation returns a tensor of
complex numbers that are the complex conjugate of each element in `input`. The
complex numbers in `input` must be of the form \\(a + bj\\), where *a* is the
real part and *b* is the imaginary part.
The complex conjugate returned by this operation is of the form \\(a - bj\\).
For example:
# tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j]
tf.conj(input) ==> [-2.25 - 4.75j, 3.25 - 5.75j]
If `x` is real, it is returned unchanged.
Args:
x: `Tensor` to conjugate. Must have numeric type.
name: A name for the operation (optional).
Returns:
A `Tensor` that is the conjugate of `x` (with the same type).
Raises:
TypeError: If `x` is not a numeric tensor.
"""
with ops.name_scope(name, "Conj", [x]) as name:
x = ops.convert_to_tensor(x, name="x")
if x.dtype.is_complex:
return gen_math_ops._conj(x, name=name)
elif x.dtype.is_floating or x.dtype.is_integer:
return x
else:
raise TypeError("Expected numeric tensor, got dtype %r" % x.dtype)
ops.RegisterShape("Abs")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Acos")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Asin")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Atan")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Ceil")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Conj")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Cos")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Cross")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Exp")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Floor")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Imag")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Inv")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("IsFinite")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("IsInf")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("IsNan")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Log")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Log1p")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("LogicalNot")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Neg")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Real")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Rsqrt")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Sign")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Sin")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Sqrt")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Square")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Sigmoid")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Tanh")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Tan")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Lgamma")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Digamma")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Erf")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Erfc")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Cast")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("ComplexAbs")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("FFT")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("IFFT")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("FFT2D")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("IFFT2D")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("FFT3D")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("IFFT3D")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("TanhGrad")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("SigmoidGrad")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("InvGrad")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("SqrtGrad")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("RsqrtGrad")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Cumsum")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Cumprod")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Add")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Complex")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Div")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Equal")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Greater")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("GreaterEqual")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Igamma")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Igammac")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Zeta")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Polygamma")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Less")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("LessEqual")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("LogicalAnd")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("LogicalOr")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Maximum")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Minimum")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Mod")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("FloorMod")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("FloorDiv")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Mul")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("NotEqual")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Pow")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Sub")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("SquaredDifference")(common_shapes.call_cpp_shape_fn)
def _BroadcastShape(op):
"""Common shape function for binary operators that broadcast their inputs."""
return [common_shapes.broadcast_shape(
op.inputs[0].get_shape(),
op.inputs[1].get_shape())]
ops.RegisterShape("Betainc")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("SparseDenseCwiseMul")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("SparseDenseCwiseDiv")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("SparseDenseCwiseAdd")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("AddN")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Select")(common_shapes.call_cpp_shape_fn)
@ops.RegisterShape("ArgMax")
@ops.RegisterShape("ArgMin")
def _ArgOpShape(op):
return common_shapes.call_cpp_shape_fn(op, input_tensors_needed=[1])
@ops.RegisterShape("All")
@ops.RegisterShape("Any")
@ops.RegisterShape("Max")
@ops.RegisterShape("Mean")
@ops.RegisterShape("Min")
@ops.RegisterShape("Prod")
@ops.RegisterShape("Sum")
def _ReductionShape(op):
return common_shapes.call_cpp_shape_fn(op, input_tensors_needed=[1])
ops.RegisterShape("SegmentMax")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("SegmentMean")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("SegmentMin")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("SegmentProd")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("SegmentSum")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("SparseSegmentMean")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("SparseSegmentSqrtN")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("SparseSegmentSum")(common_shapes.call_cpp_shape_fn)
@ops.RegisterShape("SparseSegmentMeanGrad")
@ops.RegisterShape("SparseSegmentSqrtNGrad")
# pylint: disable=invalid-name
def _SparseSegmentReductionGradShape(op):
return common_shapes.call_cpp_shape_fn(op, input_tensors_needed=[3])
# pylint: enable=invalid-name
@ops.RegisterShape("UnsortedSegmentSum")
def _UnsortedSegmentSumShape(op):
return common_shapes.call_cpp_shape_fn(op, input_tensors_needed=[2])
@ops.RegisterShape("LinSpace")
def _LinspaceShape(op):
return common_shapes.call_cpp_shape_fn(op, input_tensors_needed=[2])
def reduced_shape(input_shape, axes):
"""Helper function for reduction ops.
Args:
input_shape: 1-D Tensor, the shape of the Tensor being reduced.
axes: 1-D Tensor, the reduction axes.
Returns:
A 1-D Tensor, the output shape as if keep_dims were set to True.
"""
# Example:
# cast needed for SparseTensor reductions
input_shape = to_int32(input_shape) # [2, 3, 5, 7]
axes = to_int32(axes) # [1, 2]
input_rank = array_ops.size(input_shape) # 4
axes = (axes + input_rank) % input_rank
axes_shape = array_ops.shape(axes) # [2]
return gen_data_flow_ops.dynamic_stitch( # [2, 1, 1, 7]
[range(input_rank), # [0, 1, 2, 3]
axes], # [1, 2]
[input_shape, # [2, 3, 5, 7]
array_ops.fill(axes_shape, 1)]) # [1, 1]
ops.RegisterShape("QuantizedMatMul")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Requantize")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("RequantizationRange")(common_shapes.call_cpp_shape_fn)
| 33.930221 | 86 | 0.683119 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import common_shapes
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import graph_util
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_control_flow_ops
from tensorflow.python.ops import gen_data_flow_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import gen_sparse_ops
from tensorflow.python.ops import gen_state_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops.gen_math_ops import *
argmax = gen_math_ops.arg_max
argmin = gen_math_ops.arg_min
linspace = gen_math_ops.lin_space
def abs(x, name=None):
with ops.name_scope(name, "Abs", [x]) as name:
if isinstance(x, sparse_tensor.SparseTensor):
if x.values.dtype in (dtypes.complex64, dtypes.complex128):
x_abs = gen_math_ops.complex_abs(x.values,
Tout=x.values.dtype.real_dtype, name=name)
return sparse_tensor.SparseTensor(
indices=x.indices, values=x_abs, shape=x.shape)
x_abs = gen_math_ops._abs(x.values, name=name)
return sparse_tensor.SparseTensor(
indices=x.indices, values=x_abs, shape=x.shape)
else:
x = ops.convert_to_tensor(x, name="x")
if x.dtype in (dtypes.complex64, dtypes.complex128):
return gen_math_ops.complex_abs(x, Tout=x.dtype.real_dtype, name=name)
return gen_math_ops._abs(x, name=name)
def divide(x, y, name=None):
with ops.name_scope(name, "Divide", [x]) as name:
return x / y
multiply = gen_math_ops.mul
subtract = gen_math_ops.sub
negative = gen_math_ops.neg
def neg(x, name=None):
with ops.name_scope(name, "Neg", [x]) as name:
if isinstance(x, sparse_tensor.SparseTensor):
x_neg = gen_math_ops.neg(x.values, name=name)
return sparse_tensor.SparseTensor(
indices=x.indices, values=x_neg, shape=x.shape)
else:
return gen_math_ops.neg(x, name=name)
def sign(x, name=None):
with ops.name_scope(name, "Sign", [x]) as name:
if isinstance(x, sparse_tensor.SparseTensor):
x_sign = gen_math_ops.sign(x.values, name=name)
return sparse_tensor.SparseTensor(
indices=x.indices, values=x_sign, shape=x.shape)
else:
return gen_math_ops.sign(x, name=name)
def square(x, name=None):
with ops.name_scope(name, "Square", [x]) as name:
if isinstance(x, sparse_tensor.SparseTensor):
x_square = gen_math_ops.square(x.values, name=name)
return sparse_tensor.SparseTensor(
indices=x.indices, values=x_square, shape=x.shape)
else:
return gen_math_ops.square(x, name=name)
def sqrt(x, name=None):
with ops.name_scope(name, "Sqrt", [x]) as name:
if isinstance(x, sparse_tensor.SparseTensor):
x_sqrt = gen_math_ops.sqrt(x.values, name=name)
return sparse_tensor.SparseTensor(
indices=x.indices, values=x_sqrt, shape=x.shape)
else:
return gen_math_ops.sqrt(x, name=name)
def erf(x, name=None):
with ops.name_scope(name, "Erf", [x]) as name:
if isinstance(x, sparse_tensor.SparseTensor):
x_erf = gen_math_ops.erf(x.values, name=name)
return sparse_tensor.SparseTensor(
indices=x.indices, values=x_erf, shape=x.shape)
else:
return gen_math_ops.erf(x, name=name)
def complex_abs(x, name=None):
return gen_math_ops.complex_abs(x, Tout=x.dtype.real_dtype, name=name)
def scalar_mul(scalar, x):
scalar = ops.convert_to_tensor(scalar, dtype=x.dtype.base_dtype,
name="scalar")
shape = scalar.get_shape()
if shape.ndims == 0:
if isinstance(x, ops.IndexedSlices):
return ops.IndexedSlices(scalar * x.values, x.indices, x.dense_shape)
else:
return scalar * x
else:
raise ValueError("Only scalar multiply works, got shape %s" % shape)
def pow(x, y, name=None):
with ops.name_scope(name, "Pow", [x]) as name:
return gen_math_ops._pow(x, y, name=name)
def complex(real, imag, name=None):
real = ops.convert_to_tensor(real, name="real")
imag = ops.convert_to_tensor(imag, name="imag")
with ops.name_scope(name, "Complex", [real, imag]) as name:
input_types = (real.dtype, imag.dtype)
if input_types == (dtypes.float64, dtypes.float64):
Tout = dtypes.complex128
elif input_types == (dtypes.float32, dtypes.float32):
Tout = dtypes.complex64
else:
raise TypeError("real and imag have incorrect types: "
"{} {}".format(real.dtype.name, imag.dtype.name))
return gen_math_ops._complex(real, imag, Tout=Tout, name=name)
def real(input, name=None):
with ops.name_scope(name, "Real", [input]) as name:
real_dtype = input.dtype.real_dtype
if input.dtype.base_dtype == real_dtype:
return input
return gen_math_ops.real(input, Tout=real_dtype, name=name)
def imag(input, name=None):
with ops.name_scope(name, "Imag", [input]) as name:
return gen_math_ops.imag(input, Tout=input.dtype.real_dtype, name=name)
def round(x, name=None):
x = ops.convert_to_tensor(x, name="x")
if x.dtype.is_integer:
return x
else:
return gen_math_ops.floor(x + 0.5, name=name)
ops.RegisterShape("Round")(common_shapes.call_cpp_shape_fn)
def cast(x, dtype, name=None):
base_type = dtypes.as_dtype(dtype).base_dtype
with ops.name_scope(name, "Cast", [x]) as name:
if isinstance(x, sparse_tensor.SparseTensor):
values_cast = cast(x.values, base_type, name=name)
return sparse_tensor.SparseTensor(x.indices, values_cast, x.shape)
else:
# strings.
x = ops.convert_to_tensor(x, name="x")
if x.dtype.base_dtype == base_type:
return x
return gen_math_ops.cast(x, base_type, name=name)
def saturate_cast(value, dtype, name=None):
# When casting to a type with smaller representable range, clamp.
# Note that this covers casting to unsigned types as well.
with ops.name_scope(name, "saturate_cast", [value]) as name:
value = ops.convert_to_tensor(value, name="value")
dtype = dtypes.as_dtype(dtype).base_dtype
if value.dtype.min < dtype.min:
value = gen_math_ops.maximum(value, ops.convert_to_tensor(
dtype.min, dtype=value.dtype, name="min"))
if value.dtype.max > dtype.max:
value = gen_math_ops.minimum(value, ops.convert_to_tensor(
dtype.max, dtype=value.dtype, name="max"))
return cast(value, dtype, name=name)
def to_float(x, name="ToFloat"):
return cast(x, dtypes.float32, name=name)
def to_double(x, name="ToDouble"):
return cast(x, dtypes.float64, name=name)
def to_int32(x, name="ToInt32"):
return cast(x, dtypes.int32, name=name)
def to_int64(x, name="ToInt64"):
return cast(x, dtypes.int64, name=name)
def to_bfloat16(x, name="ToBFloat16"):
return cast(x, dtypes.bfloat16, name=name)
ops.Tensor._override_operator("__neg__", gen_math_ops.neg)
ops.Tensor._override_operator("__abs__", abs)
# __invert__ corresponds to the ~ operator. Here we follow the numpy convention
# ~ marks an elementwise bit-wise inverse. This is only implemented for boolean
# tensors and will throw a TypeError if used on nonboolean arrays
ops.Tensor._override_operator("__invert__", gen_math_ops.logical_not)
def _OverrideBinaryOperatorHelper(func, op_name, clazz_object=ops.Tensor):
def binary_op_wrapper(x, y):
with ops.name_scope(None, op_name, [x, y]) as name:
if not isinstance(y, sparse_tensor.SparseTensor):
y = ops.convert_to_tensor(y, dtype=x.dtype.base_dtype, name="y")
return func(x, y, name=name)
def binary_op_wrapper_sparse(sp_x, y):
with ops.name_scope(None, op_name, [sp_x, y]) as name:
y = ops.convert_to_tensor(y, dtype=sp_x.dtype.base_dtype, name="y")
return sparse_tensor.SparseTensor(
sp_x.indices, func(sp_x.indices, sp_x.values,
sp_x.shape, y, name=name),
sp_x.shape)
def r_binary_op_wrapper(y, x):
with ops.name_scope(None, op_name, [x, y]) as name:
x = ops.convert_to_tensor(x, dtype=y.dtype.base_dtype, name="x")
return func(x, y, name=name)
# Propagate func.__doc__ to the wrappers
try:
doc = func.__doc__
except AttributeError:
doc = None
binary_op_wrapper.__doc__ = doc
r_binary_op_wrapper.__doc__ = doc
binary_op_wrapper_sparse.__doc__ = doc
if clazz_object is ops.Tensor:
clazz_object._override_operator("__%s__" % op_name, binary_op_wrapper)
del binary_op_wrapper
clazz_object._override_operator("__r%s__" % op_name, r_binary_op_wrapper)
del r_binary_op_wrapper
else:
clazz_object._override_operator("__%s__" % op_name,
binary_op_wrapper_sparse)
del binary_op_wrapper_sparse
# Conversion table for __truediv__. None entries mean no conversion required.
_TRUEDIV_TABLE = {
dtypes.uint8: dtypes.float32,
dtypes.int8: dtypes.float32,
dtypes.uint16: dtypes.float32,
dtypes.int16: dtypes.float32,
dtypes.int32: dtypes.float64,
dtypes.int64: dtypes.float64,
dtypes.float16: None,
dtypes.float32: None,
dtypes.float64: None,
dtypes.complex64: None,
dtypes.complex128: None,
}
# NOTE: the support of "sparse (true)div dense" is currently not baked in into
# "tf.(true_)div()". Until such an API decision is made, the supported usage is
# to explicitly use the "/" operator to invoke either truediv or div.
def _sparse_dense_truediv(sp_indices, sp_values, sp_shape, y, name=None):
with ops.name_scope(name, "truediv",
[sp_indices, sp_values, sp_shape, y]) as name:
sp_values = ops.convert_to_tensor(sp_values, name="sp_values")
y = ops.convert_to_tensor(y, name="y")
x_dtype = sp_values.dtype.base_dtype
y_dtype = y.dtype.base_dtype
if x_dtype != y_dtype:
raise TypeError("x and y must have the same dtype, got %r != %r" %
(x_dtype, y_dtype))
try:
dtype = _TRUEDIV_TABLE[x_dtype]
except KeyError:
raise TypeError("Invalid dtype %r in __truediv__" % x_dtype)
if dtype is not None:
sp_values = cast(sp_values, dtype)
y = cast(y, dtype)
return gen_sparse_ops.sparse_dense_cwise_div(sp_indices, sp_values,
sp_shape, y, name=name)
def truediv(x, y, name=None):
with ops.name_scope(name, "truediv", [x, y]) as name:
x = ops.convert_to_tensor(x, name="x")
y = ops.convert_to_tensor(y, name="y")
x_dtype = x.dtype.base_dtype
y_dtype = y.dtype.base_dtype
if x_dtype != y_dtype:
raise TypeError("x and y must have the same dtype, got %r != %r" %
(x_dtype, y_dtype))
try:
dtype = _TRUEDIV_TABLE[x_dtype]
except KeyError:
raise TypeError("Invalid dtype %r in __truediv__" % x_dtype)
if dtype is not None:
x = cast(x, dtype)
y = cast(y, dtype)
return gen_math_ops.div(x, y, name=name)
def floordiv(x, y, name=None):
with ops.name_scope(name, "floordiv", [x, y]) as name:
x = ops.convert_to_tensor(x, name="x")
dtype = x.dtype
if dtype.is_floating:
return gen_math_ops.floor(gen_math_ops.div(x, y), name=name)
else:
if not dtype.is_integer:
raise TypeError("Expected floating point or integer, got %r" % dtype)
# TODO(aselle): Switch to math_ops.floor_div() when ready
# return gen_math_ops.floor_div(x, y, name=name)
return gen_math_ops.div(x, y, name=name)
def _mul_dispatch(x, y, name=None):
is_tensor_y = isinstance(y, ops.Tensor)
if is_tensor_y:
return gen_math_ops.mul(x, y, name=name)
else:
assert isinstance(y, sparse_tensor.SparseTensor) # Case: Dense * Sparse.
new_vals = gen_sparse_ops.sparse_dense_cwise_mul(y.indices, y.values,
y.shape, x, name)
return sparse_tensor.SparseTensor(y.indices, new_vals, y.shape)
_OverrideBinaryOperatorHelper(gen_sparse_ops.sparse_dense_cwise_div, "div",
sparse_tensor.SparseTensor)
_OverrideBinaryOperatorHelper(_sparse_dense_truediv, "truediv",
sparse_tensor.SparseTensor)
_OverrideBinaryOperatorHelper(gen_sparse_ops.sparse_dense_cwise_mul, "mul",
sparse_tensor.SparseTensor)
_OverrideBinaryOperatorHelper(gen_math_ops.add, "add")
_OverrideBinaryOperatorHelper(gen_math_ops.sub, "sub")
_OverrideBinaryOperatorHelper(_mul_dispatch, "mul")
_OverrideBinaryOperatorHelper(gen_math_ops.div, "div")
_OverrideBinaryOperatorHelper(truediv, "truediv")
_OverrideBinaryOperatorHelper(floordiv, "floordiv")
# TODO(aselle): Switch mod to floor_mod when ready
# _OverrideBinaryOperatorHelper(gen_math_ops.floor_mod, "mod")
_OverrideBinaryOperatorHelper(gen_math_ops.mod, "mod")
_OverrideBinaryOperatorHelper(pow, "pow")
def logical_xor(x, y, name="LogicalXor"):
# TODO(alemi) Make this a cwise op if people end up relying on it.
return gen_math_ops.logical_and(
gen_math_ops.logical_or(x, y),
gen_math_ops.logical_not(gen_math_ops.logical_and(x, y)),
name=name)
_OverrideBinaryOperatorHelper(gen_math_ops.logical_and, "and")
_OverrideBinaryOperatorHelper(gen_math_ops.logical_or, "or")
_OverrideBinaryOperatorHelper(logical_xor, "xor")
ops.Tensor._override_operator("__lt__", gen_math_ops.less)
ops.Tensor._override_operator("__le__", gen_math_ops.less_equal)
ops.Tensor._override_operator("__gt__", gen_math_ops.greater)
ops.Tensor._override_operator("__ge__", gen_math_ops.greater_equal)
def range(start, limit=None, delta=1, dtype=None, name="range"):
if limit is None:
start, limit = 0, start
with ops.name_scope(name, "Range", [start, limit, delta]) as name:
start = ops.convert_to_tensor(start, dtype=dtype, name="start")
limit = ops.convert_to_tensor(limit, dtype=dtype, name="limit")
delta = ops.convert_to_tensor(delta, dtype=dtype, name="delta")
# infer dtype if not explicitly provided
if dtype is None:
dtype_hierarchy = [dtypes.int32, dtypes.int64, dtypes.float32,
dtypes.float64]
assert all(arg.dtype in dtype_hierarchy for arg in [start, limit, delta])
inferred_dtype = max([arg.dtype for arg in [start, limit, delta]],
key=dtype_hierarchy.index)
start = cast(start, inferred_dtype)
limit = cast(limit, inferred_dtype)
delta = cast(delta, inferred_dtype)
return gen_math_ops._range(start, limit, delta, name=name)
@ops.RegisterShape("Range")
def _RangeShape(op):
return common_shapes.call_cpp_shape_fn(op, input_tensors_needed=[0, 1, 2])
# Reduction operations
def _ReductionDims(x, reduction_indices):
if reduction_indices is not None:
return reduction_indices
else:
# Fast path: avoid creating Rank and Range ops if ndims is known.
if isinstance(x, ops.Tensor) and x.get_shape().ndims is not None:
return constant_op.constant(np.arange(x.get_shape().ndims),
dtype=dtypes.int32)
if (isinstance(x, sparse_tensor.SparseTensor) and
x.shape.get_shape().is_fully_defined()):
rank = x.shape.get_shape()[0].value # sparse.shape is an 1-D tensor.
return constant_op.constant(np.arange(rank), dtype=dtypes.int32)
# Otherwise, we rely on Range and Rank to do the right thing at run-time.
return range(0, array_ops.rank(x))
def reduce_sum(input_tensor, reduction_indices=None, keep_dims=False,
name=None):
return gen_math_ops._sum(input_tensor, _ReductionDims(input_tensor,
reduction_indices),
keep_dims, name=name)
def count_nonzero(input_tensor, reduction_indices=None, keep_dims=False,
dtype=dtypes.int64, name=None):
with ops.name_scope(name, "count_nonzero", [input_tensor]):
input_tensor = ops.convert_to_tensor(input_tensor, name="input_tensor")
zero = input_tensor.dtype.as_numpy_dtype()
return cast(
reduce_sum(
# int64 reduction happens on GPU
to_int64(gen_math_ops.not_equal(input_tensor, zero)),
reduction_indices=reduction_indices,
keep_dims=keep_dims),
dtype=dtype)
def reduce_mean(input_tensor, reduction_indices=None, keep_dims=False,
name=None):
return gen_math_ops._mean(input_tensor, _ReductionDims(input_tensor,
reduction_indices),
keep_dims, name=name)
def reduce_prod(input_tensor, reduction_indices=None, keep_dims=False,
name=None):
return gen_math_ops._prod(input_tensor, _ReductionDims(input_tensor,
reduction_indices),
keep_dims, name=name)
def reduce_min(input_tensor, reduction_indices=None, keep_dims=False,
name=None):
return gen_math_ops._min(input_tensor, _ReductionDims(input_tensor,
reduction_indices),
keep_dims, name=name)
def reduce_max(input_tensor, reduction_indices=None, keep_dims=False,
name=None):
return gen_math_ops._max(input_tensor, _ReductionDims(input_tensor,
reduction_indices),
keep_dims, name=name)
def reduce_all(input_tensor, reduction_indices=None, keep_dims=False,
name=None):
return gen_math_ops._all(input_tensor, _ReductionDims(input_tensor,
reduction_indices),
keep_dims, name=name)
def reduce_any(input_tensor, reduction_indices=None, keep_dims=False,
name=None):
return gen_math_ops._any(input_tensor, _ReductionDims(input_tensor,
reduction_indices),
keep_dims, name=name)
def reduce_logsumexp(input_tensor, reduction_indices=None, keep_dims=False,
name=None):
with ops.name_scope(name, "ReduceLogSumExp", [input_tensor]) as name:
my_max = array_ops.stop_gradient(
reduce_max(input_tensor, reduction_indices, keep_dims=True))
result = gen_math_ops.log(reduce_sum(
gen_math_ops.exp(input_tensor - my_max),
reduction_indices,
keep_dims=True)) + my_max
if not keep_dims:
if isinstance(reduction_indices, int):
reduction_indices = [reduction_indices]
result = array_ops.squeeze(result, reduction_indices)
return result
def trace(x, name=None):
with ops.name_scope(name, "Trace", [x]) as name:
x = ops.convert_to_tensor(x, name="x")
return reduce_sum(array_ops.matrix_diag_part(x), [-1], name=name)
def matmul(a, b,
transpose_a=False, transpose_b=False,
a_is_sparse=False, b_is_sparse=False,
name=None):
with ops.name_scope(name, "MatMul", [a, b]) as name:
a = ops.convert_to_tensor(a, name="a")
b = ops.convert_to_tensor(b, name="b")
sparse_matmul_types = [dtypes.bfloat16, dtypes.float32]
use_sparse_matmul = (a.dtype in sparse_matmul_types and
b.dtype in sparse_matmul_types and
(a_is_sparse or b_is_sparse))
if dtypes.bfloat16 in (a.dtype, b.dtype):
# matmul currently doesn't handle bfloat16 inputs.
use_sparse_matmul = True
if use_sparse_matmul:
return sparse_matmul(a, b,
transpose_a=transpose_a,
transpose_b=transpose_b,
a_is_sparse=a_is_sparse,
b_is_sparse=b_is_sparse,
name=name)
else:
return gen_math_ops._mat_mul(a, b,
transpose_a=transpose_a,
transpose_b=transpose_b,
name=name)
sparse_matmul = gen_math_ops._sparse_mat_mul
batch_matmul = gen_math_ops._batch_mat_mul
ops.RegisterShape("MatMul")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("SparseMatMul")(common_shapes.call_cpp_shape_fn)
@ops.RegisterStatistics("MatMul", "flops")
def _calc_mat_mul_flops(graph, node):
transpose_a = node.attr["transpose_a"].b
a_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])
a_shape.assert_is_fully_defined()
if transpose_a:
k = int(a_shape[0])
else:
k = int(a_shape[1])
output_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)
output_shape.assert_is_fully_defined()
output_count = np.prod(output_shape.as_list())
return ops.OpStats("flops", (k * output_count * 2))
def _as_indexed_slices(x, optimize=True):
if not isinstance(x, (ops.Tensor, ops.IndexedSlices)):
raise TypeError("Not a Tensor or IndexedSlices: %s" % type(x))
if isinstance(x, ops.IndexedSlices):
return x
x_shape = array_ops.shape_internal(x, optimize=optimize)
return ops.IndexedSlices(x, range(0, x_shape[0]), x_shape)
def _as_indexed_slices_list(inputs, optimize=True):
if not isinstance(inputs, (list, tuple)):
raise TypeError("Expected a list or tuple, not a %s" % type(inputs))
outputs = [_as_indexed_slices(i, optimize=optimize) for i in inputs]
with_int32_index = [o.indices for o in outputs
if o.indices.dtype == dtypes.int32]
if not with_int32_index or len(with_int32_index) == len(outputs):
return outputs
casted_outputs = []
for o in outputs:
if o.indices.dtype == dtypes.int32:
casted_outputs.append(
ops.IndexedSlices(o.values, cast(o.indices, dtypes.int64),
o.dense_shape))
else:
casted_outputs.append(o)
return casted_outputs
def add_n(inputs, name=None):
if not inputs or not isinstance(inputs, (list, tuple)):
raise ValueError("inputs must be a list of at least one Tensor with the "
"same dtype and shape")
inputs = ops.convert_n_to_tensor_or_indexed_slices(inputs)
if not all(isinstance(x, ops.Tensor) for x in inputs):
raise ValueError("inputs must be a list of at least one Tensor with the "
"same dtype and shape")
if len(inputs) == 1:
if name:
return array_ops.identity(inputs[0], name=name)
return inputs[0]
return gen_math_ops._add_n(inputs, name=name)
def accumulate_n(inputs, shape=None, tensor_dtype=None, name=None):
if not inputs or not isinstance(inputs, (list, tuple)):
raise ValueError("inputs must be a list of at least one Tensor with the "
"same dtype and shape")
inputs = ops.convert_n_to_tensor_or_indexed_slices(inputs)
if not all(isinstance(x, ops.Tensor) for x in inputs):
raise ValueError("inputs must be a list of at least one Tensor with the "
"same dtype and shape")
if not all(x.dtype == inputs[0].dtype for x in inputs):
raise ValueError("inputs must be a list of at least one Tensor with the "
"same dtype and shape")
if shape is not None:
shape = tensor_shape.as_shape(shape)
else:
shape = tensor_shape.unknown_shape()
for input_tensor in inputs:
if isinstance(input_tensor, ops.Tensor):
shape = shape.merge_with(input_tensor.get_shape())
if len(inputs) == 1:
return inputs[0]
if tensor_dtype is None:
tensor_dtype = inputs[0].dtype
with ops.name_scope(name, "AccumulateN", inputs) as name:
var = gen_state_ops._temporary_variable(shape=tensor_shape.vector(0),
dtype=tensor_dtype)
with ops.colocate_with(var):
zeros = array_ops.zeros_like(gen_control_flow_ops._merge(inputs)[0])
zeros.set_shape(shape)
ref = state_ops.assign(var, zeros, validate_shape=False)
update_ops = [state_ops.assign_add(ref, input_tensor, use_locking=True)
for input_tensor in inputs]
with ops.control_dependencies(update_ops):
return gen_state_ops._destroy_temporary_variable(
ref, var_name=var.op.name, name=name)
ops.RegisterShape("BatchMatMul")(common_shapes.call_cpp_shape_fn)
def sigmoid(x, name=None):
with ops.name_scope(name, "Sigmoid", [x]) as name:
x = ops.convert_to_tensor(x, name="x")
return gen_math_ops._sigmoid(x, name=name)
def tanh(x, name=None):
with ops.name_scope(name, "Tanh", [x]) as name:
if isinstance(x, sparse_tensor.SparseTensor):
x_tanh = gen_math_ops._tanh(x.values, name=name)
return sparse_tensor.SparseTensor(
indices=x.indices, values=x_tanh, shape=x.shape)
else:
return gen_math_ops._tanh(x, name=name)
def cumsum(x, axis=0, exclusive=False, reverse=False, name=None):
with ops.name_scope(name, "Cumsum", [x]) as name:
x = ops.convert_to_tensor(x, name="x")
return gen_math_ops.cumsum(
x, axis, exclusive=exclusive, reverse=reverse, name=name)
def cumprod(x, axis=0, exclusive=False, reverse=False, name=None):
with ops.name_scope(name, "Cumprod", [x]) as name:
x = ops.convert_to_tensor(x, name="x")
return gen_math_ops.cumprod(
x, axis, exclusive=exclusive, reverse=reverse, name=name)
def conj(x, name=None):
with ops.name_scope(name, "Conj", [x]) as name:
x = ops.convert_to_tensor(x, name="x")
if x.dtype.is_complex:
return gen_math_ops._conj(x, name=name)
elif x.dtype.is_floating or x.dtype.is_integer:
return x
else:
raise TypeError("Expected numeric tensor, got dtype %r" % x.dtype)
ops.RegisterShape("Abs")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Acos")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Asin")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Atan")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Ceil")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Conj")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Cos")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Cross")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Exp")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Floor")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Imag")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Inv")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("IsFinite")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("IsInf")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("IsNan")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Log")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Log1p")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("LogicalNot")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Neg")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Real")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Rsqrt")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Sign")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Sin")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Sqrt")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Square")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Sigmoid")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Tanh")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Tan")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Lgamma")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Digamma")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Erf")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Erfc")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Cast")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("ComplexAbs")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("FFT")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("IFFT")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("FFT2D")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("IFFT2D")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("FFT3D")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("IFFT3D")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("TanhGrad")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("SigmoidGrad")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("InvGrad")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("SqrtGrad")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("RsqrtGrad")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Cumsum")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Cumprod")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Add")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Complex")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Div")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Equal")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Greater")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("GreaterEqual")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Igamma")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Igammac")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Zeta")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Polygamma")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Less")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("LessEqual")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("LogicalAnd")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("LogicalOr")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Maximum")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Minimum")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Mod")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("FloorMod")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("FloorDiv")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Mul")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("NotEqual")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Pow")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Sub")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("SquaredDifference")(common_shapes.call_cpp_shape_fn)
def _BroadcastShape(op):
return [common_shapes.broadcast_shape(
op.inputs[0].get_shape(),
op.inputs[1].get_shape())]
ops.RegisterShape("Betainc")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("SparseDenseCwiseMul")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("SparseDenseCwiseDiv")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("SparseDenseCwiseAdd")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("AddN")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Select")(common_shapes.call_cpp_shape_fn)
@ops.RegisterShape("ArgMax")
@ops.RegisterShape("ArgMin")
def _ArgOpShape(op):
return common_shapes.call_cpp_shape_fn(op, input_tensors_needed=[1])
@ops.RegisterShape("All")
@ops.RegisterShape("Any")
@ops.RegisterShape("Max")
@ops.RegisterShape("Mean")
@ops.RegisterShape("Min")
@ops.RegisterShape("Prod")
@ops.RegisterShape("Sum")
def _ReductionShape(op):
return common_shapes.call_cpp_shape_fn(op, input_tensors_needed=[1])
ops.RegisterShape("SegmentMax")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("SegmentMean")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("SegmentMin")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("SegmentProd")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("SegmentSum")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("SparseSegmentMean")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("SparseSegmentSqrtN")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("SparseSegmentSum")(common_shapes.call_cpp_shape_fn)
@ops.RegisterShape("SparseSegmentMeanGrad")
@ops.RegisterShape("SparseSegmentSqrtNGrad")
def _SparseSegmentReductionGradShape(op):
return common_shapes.call_cpp_shape_fn(op, input_tensors_needed=[3])
@ops.RegisterShape("UnsortedSegmentSum")
def _UnsortedSegmentSumShape(op):
return common_shapes.call_cpp_shape_fn(op, input_tensors_needed=[2])
@ops.RegisterShape("LinSpace")
def _LinspaceShape(op):
return common_shapes.call_cpp_shape_fn(op, input_tensors_needed=[2])
def reduced_shape(input_shape, axes):
input_shape = to_int32(input_shape) axes = to_int32(axes)
input_rank = array_ops.size(input_shape) axes = (axes + input_rank) % input_rank
axes_shape = array_ops.shape(axes) return gen_data_flow_ops.dynamic_stitch( [range(input_rank), axes], [input_shape, array_ops.fill(axes_shape, 1)])
ops.RegisterShape("QuantizedMatMul")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Requantize")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("RequantizationRange")(common_shapes.call_cpp_shape_fn)
| true | true |
1c4a20e25214c64f1fee4492086b179d9de887ea | 5,520 | py | Python | imsngpy/gpwatch.py | SilverRon/imsngpy | e9e55a73403bef4c73dcc242735efc28d79a3066 | [
"MIT"
] | 1 | 2021-12-22T08:58:47.000Z | 2021-12-22T08:58:47.000Z | imsngpy/gpwatch.py | SilverRon/imsngpy | e9e55a73403bef4c73dcc242735efc28d79a3066 | [
"MIT"
] | null | null | null | imsngpy/gpwatch.py | SilverRon/imsngpy | e9e55a73403bef4c73dcc242735efc28d79a3066 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#============================================================
# Module
#------------------------------------------------------------
import os
import sys
import time
# IMSNGpy modules
sys.path.append('/home/paek/imsngpy')
from misc import *
# Astropy
from astropy.io import ascii
#============================================================
# Function
#------------------------------------------------------------
def get_size(start_path = '.'):
total_size = 0
for dirpath, dirnames, filenames in os.walk(start_path):
for f in filenames:
fp = os.path.join(dirpath, f)
# skip if it is symbolic link
if not os.path.islink(fp):
total_size += os.path.getsize(fp)
return total_size
#------------------------------------------------------------
# Path
#------------------------------------------------------------
path_obsdata = '/data6/obsdata'
path_table = '/home/paek/table'
path_log = '/home/paek/log'
path_gppy = '/home/paek/imsngpy/imsngpy'
path_check_rasa36 = '/home/paek/qsopy/monitor/classify_rasa36.py'
path_preprocess = f'{path_gppy}/pipeline.processing.py'
# Slack
keytbl = ascii.read(f'{path_table}/keys.dat')
OAuth_Token = keytbl['key'][keytbl['name']=='slack'].item()
#------------------------------------------------------------
# Data information
#------------------------------------------------------------
obsdict = dict(
# LOAO
loao=dict(
path_base='/data6/obsdata/LOAO',
path_new='',
log=f'{path_log}/loao.log',
size=0, # [bytes]
core=1, # 4
),
# DOAO
doao=dict(
path_base='/data6/obsdata/DOAO',
path_new='',
log=f'{path_log}/doao.log',
size=0, # [bytes]
core=1, # 4
),
# SOAO
soao=dict(
path_base='/data6/obsdata/SOAO',
path_new='',
log=f'{path_log}/soao.log',
size=0, # [bytes]
core=1, # 4
),
# CBNUO
cbnuo=dict(
path_base='/data6/obsdata/CBNUO', # ./2021_0101
path_new='',
log=f'{path_log}/cbnuo.log',
size=0, # [bytes]
core=1, # 4
),
# KHAO
khao=dict(
path_base='/data6/obsdata/KHAO', # ./2021_0101
path_new='',
log=f'{path_log}/khao.log',
size=0, # [bytes]
core=2, # 4
),
# MDFTS
mdfts=dict(
path_base='/data6/obsdata/MDFTS', # ./2021_0101
path_new='',
log=f'{path_log}/mdfts.log',
size=0, # [bytes]
core=2, # 4
),
# KCT_STX16803
kct_stx16803=dict(
path_base='/data6/obsdata/KCT_STX16803',
path_new='',
log=f'{path_log}/kct_stx16803.log',
size=0, # [bytes]
core=1, # 4
),
# RASA36
rasa36=dict(
path_base='/data6/obsdata/RASA36',
path_new='',
log=f'{path_log}/rasa36.log',
size=0, # [bytes]
core=1, # 4
),
)
#------------------------------------------------------------
obslist = ['LOAO', 'DOAO', 'SOAO', 'CBNUO', 'KHAO', 'KCT_STX16803', 'RASA36']
print('OBSERVATOR LIST :', end='')
print(obslist)
obs = input('obs:').upper()
# obs = 'LOAO'
delay = 10
ncore = input('# of cores (i.e. 8):')
'''
print(f"Wrong input in variable 'sphere' (sphere={sphere})")
print('Process all obs. data')
obslist = ['loao', 'doao', 'soao', 'cbnuo',]+['kct_stx16803', 'rasa36']
'''
#============================================================
# Main body
#------------------------------------------------------------
print(f"{'='*60}\n\n[gpwatch/o_o] Watching new data for {obs} with {ncore} cores \n\n{'='*60}")
st = time.time()
while True:
try:
# Time
et = time.time()
delt = int(et - st)
h = delt // (60*60)
m = delt // 60
s = delt % 60
timer = '{:02d}:{:02d}:{:02d}'.format(h, m, s)
print(timer, end="\r")
log = obsdict[obs]['log']
path_base = f"{path_obsdata}/{obs}"
#
logtbl = ascii.read(log)
dirlist = os.listdir(path_base)
#
for f in dirlist:
path_new = f"{path_base}/{f}"
if (path_new not in logtbl['date']) & (f"{path_new}/" not in logtbl['date']) & (os.path.isdir(path_new)):
print()
#------------------------------------------------------------
# Slack message
#------------------------------------------------------------
channel = '#pipeline'
text = f'[gpwatch/{obs}] Detected New {os.path.basename(path_new)} Data'
param_slack = dict(
token = OAuth_Token,
channel = channel,
text = text,
)
slack_bot(**param_slack)
#
print(test)
init_size = get_size(path_new)
while True:
time.sleep(int(delay*2))
now_size = get_size(path_new)
if init_size != now_size:
print(f'Still uploading {os.path.basename(path_new)} : {init_size} --> {now_size}')
init_size = now_size
else:
# RASA36 exception
if (obs == 'rasa36'):
com = f'python {path_check_rasa36} {path_new}'
print(com)
os.system(com)
if len(dirlist) == len(os.listdir(path_base)):
com = f"python {path_calib} {obs} {ncore}"
print(com)
os.system(com)
else:
break
else:
# Run python code
com = f"python {path_calib} {obs} {ncore}"
print(com)
os.system(com)
print(f"[gpwatch/{obs}] Process for {os.path.basename(path_new)} is done.")
print(f"{'='*60}\n\n[gpwatch/o_o] Watching new data for {obs} with {ncore} cores \n\n{'='*60}")
break
except Exception as e:
print(e)
#------------------------------------------------------------
# Slack message
#------------------------------------------------------------
channel = '#pipeline'
text = f'[gpwatch/{obs}] Error\n{e}'
param_slack = dict(
token = OAuth_Token,
channel = channel,
text = text,
)
slack_bot(**param_slack)
time.sleep(1) | 27.6 | 108 | 0.509058 | import os
import sys
import time
sys.path.append('/home/paek/imsngpy')
from misc import *
from astropy.io import ascii
def get_size(start_path = '.'):
total_size = 0
for dirpath, dirnames, filenames in os.walk(start_path):
for f in filenames:
fp = os.path.join(dirpath, f)
if not os.path.islink(fp):
total_size += os.path.getsize(fp)
return total_size
path_obsdata = '/data6/obsdata'
path_table = '/home/paek/table'
path_log = '/home/paek/log'
path_gppy = '/home/paek/imsngpy/imsngpy'
path_check_rasa36 = '/home/paek/qsopy/monitor/classify_rasa36.py'
path_preprocess = f'{path_gppy}/pipeline.processing.py'
keytbl = ascii.read(f'{path_table}/keys.dat')
OAuth_Token = keytbl['key'][keytbl['name']=='slack'].item()
obsdict = dict(
loao=dict(
path_base='/data6/obsdata/LOAO',
path_new='',
log=f'{path_log}/loao.log',
size=0, core=1, ),
doao=dict(
path_base='/data6/obsdata/DOAO',
path_new='',
log=f'{path_log}/doao.log',
size=0, core=1, ),
soao=dict(
path_base='/data6/obsdata/SOAO',
path_new='',
log=f'{path_log}/soao.log',
size=0, core=1, ),
cbnuo=dict(
path_base='/data6/obsdata/CBNUO', path_new='',
log=f'{path_log}/cbnuo.log',
size=0, core=1, ),
khao=dict(
path_base='/data6/obsdata/KHAO', path_new='',
log=f'{path_log}/khao.log',
size=0, core=2, ),
mdfts=dict(
path_base='/data6/obsdata/MDFTS', path_new='',
log=f'{path_log}/mdfts.log',
size=0, core=2, ),
kct_stx16803=dict(
path_base='/data6/obsdata/KCT_STX16803',
path_new='',
log=f'{path_log}/kct_stx16803.log',
size=0, core=1, ),
rasa36=dict(
path_base='/data6/obsdata/RASA36',
path_new='',
log=f'{path_log}/rasa36.log',
size=0, core=1, ),
)
obslist = ['LOAO', 'DOAO', 'SOAO', 'CBNUO', 'KHAO', 'KCT_STX16803', 'RASA36']
print('OBSERVATOR LIST :', end='')
print(obslist)
obs = input('obs:').upper()
delay = 10
ncore = input('# of cores (i.e. 8):')
print(f"{'='*60}\n\n[gpwatch/o_o] Watching new data for {obs} with {ncore} cores \n\n{'='*60}")
st = time.time()
while True:
try:
et = time.time()
delt = int(et - st)
h = delt // (60*60)
m = delt // 60
s = delt % 60
timer = '{:02d}:{:02d}:{:02d}'.format(h, m, s)
print(timer, end="\r")
log = obsdict[obs]['log']
path_base = f"{path_obsdata}/{obs}"
logtbl = ascii.read(log)
dirlist = os.listdir(path_base)
for f in dirlist:
path_new = f"{path_base}/{f}"
if (path_new not in logtbl['date']) & (f"{path_new}/" not in logtbl['date']) & (os.path.isdir(path_new)):
print()
channel = '#pipeline'
text = f'[gpwatch/{obs}] Detected New {os.path.basename(path_new)} Data'
param_slack = dict(
token = OAuth_Token,
channel = channel,
text = text,
)
slack_bot(**param_slack)
print(test)
init_size = get_size(path_new)
while True:
time.sleep(int(delay*2))
now_size = get_size(path_new)
if init_size != now_size:
print(f'Still uploading {os.path.basename(path_new)} : {init_size} --> {now_size}')
init_size = now_size
else:
if (obs == 'rasa36'):
com = f'python {path_check_rasa36} {path_new}'
print(com)
os.system(com)
if len(dirlist) == len(os.listdir(path_base)):
com = f"python {path_calib} {obs} {ncore}"
print(com)
os.system(com)
else:
break
else:
com = f"python {path_calib} {obs} {ncore}"
print(com)
os.system(com)
print(f"[gpwatch/{obs}] Process for {os.path.basename(path_new)} is done.")
print(f"{'='*60}\n\n[gpwatch/o_o] Watching new data for {obs} with {ncore} cores \n\n{'='*60}")
break
except Exception as e:
print(e)
channel = '#pipeline'
text = f'[gpwatch/{obs}] Error\n{e}'
param_slack = dict(
token = OAuth_Token,
channel = channel,
text = text,
)
slack_bot(**param_slack)
time.sleep(1) | true | true |
1c4a210a93526f7ae8c560a788759b5ed53c5f6f | 21,306 | py | Python | src/briefcase/commands/base.py | chuckyQ/briefcase | 06e84e7b1c3af016c828a5a640d277809de6644b | [
"BSD-3-Clause"
] | 3 | 2020-10-20T00:59:22.000Z | 2021-02-22T09:22:00.000Z | src/briefcase/commands/base.py | chuckyQ/briefcase | 06e84e7b1c3af016c828a5a640d277809de6644b | [
"BSD-3-Clause"
] | null | null | null | src/briefcase/commands/base.py | chuckyQ/briefcase | 06e84e7b1c3af016c828a5a640d277809de6644b | [
"BSD-3-Clause"
] | null | null | null |
import argparse
import importlib
import inspect
import os
import platform
import shutil
import sys
from abc import ABC, abstractmethod
from cgi import parse_header
from pathlib import Path
from urllib.parse import urlparse
import requests
import toml
from cookiecutter.main import cookiecutter
from cookiecutter.repository import is_repo_url
from briefcase import __version__, integrations
from briefcase.config import AppConfig, BaseConfig, GlobalConfig, parse_config
from briefcase.console import Console
from briefcase.exceptions import (
BadNetworkResourceError,
BriefcaseCommandError,
BriefcaseConfigError,
MissingNetworkResourceError
)
from briefcase.integrations.subprocess import Subprocess
class TemplateUnsupportedVersion(BriefcaseCommandError):
def __init__(self, version_tag):
self.version_tag = version_tag
super().__init__(
msg='Template does not support {version_tag}'.format(
version_tag=version_tag
)
)
def create_config(klass, config, msg):
try:
return klass(**config)
except TypeError:
# Inspect the GlobalConfig constructor to find which
# parameters are required and don't have a default
# value.
required_args = {
name
for name, param in inspect.signature(klass.__init__).parameters.items()
if param.default == inspect._empty
and name not in {'self', 'kwargs'}
}
missing_args = required_args - config.keys()
missing = ', '.join(
"'{arg}'".format(arg=arg)
for arg in sorted(missing_args)
)
raise BriefcaseConfigError(
"{msg} is incomplete (missing {missing})".format(
msg=msg,
missing=missing
)
)
def cookiecutter_cache_path(template):
"""
Determine the cookiecutter template cache directory given a template URL.
This will return a valid path, regardless of whether `template`
:param template: The template to use. This can be a filesystem path or
a URL.
:returns: The path that cookiecutter would use for the given template name.
"""
template = template.rstrip('/')
tail = template.split('/')[-1]
cache_name = tail.rsplit('.git')[0]
return Path.home() / '.cookiecutters' / cache_name
def full_options(state, options):
"""
Merge command state with keyword arguments.
Command state takes precedence over any keyword argument.
:param state: The current command state. Can be ``None``.
:param options: The base options.
:returns: A dictionary containing all of ``options``, with any values
provided in ``state`` overriding the base ``options`` values.
"""
if state is not None:
full = options.copy()
full.update(state)
else:
full = options
return full
class BaseCommand(ABC):
cmd_line = "briefcase {command} {platform} {output_format}"
GLOBAL_CONFIG_CLASS = GlobalConfig
APP_CONFIG_CLASS = AppConfig
def __init__(self, base_path, home_path=Path.home(), apps=None, input_enabled=True):
self.base_path = base_path
self.home_path = home_path
self.dot_briefcase_path = home_path / ".briefcase"
self.tools_path = self.dot_briefcase_path / 'tools'
self.global_config = None
self.apps = {} if apps is None else apps
self._path_index = {}
# Some details about the host machine
self.host_arch = platform.machine()
self.host_os = platform.system()
# External service APIs.
# These are abstracted to enable testing without patching.
self.cookiecutter = cookiecutter
self.requests = requests
self.input = Console(enabled=input_enabled)
self.os = os
self.sys = sys
self.shutil = shutil
self.subprocess = Subprocess(self)
# The internal Briefcase integrations API.
self.integrations = integrations
@property
def create_command(self):
"Factory property; return an instance of a create command for the same format"
format_module = importlib.import_module(self.__module__)
command = format_module.create(
base_path=self.base_path,
apps=self.apps,
input_enabled=self.input.enabled,
)
command.clone_options(self)
return command
@property
def update_command(self):
"Factory property; return an instance of an update command for the same format"
format_module = importlib.import_module(self.__module__)
command = format_module.update(
base_path=self.base_path,
apps=self.apps,
input_enabled=self.input.enabled,
)
command.clone_options(self)
return command
@property
def build_command(self):
"Factory property; return an instance of a build command for the same format"
format_module = importlib.import_module(self.__module__)
command = format_module.build(
base_path=self.base_path,
apps=self.apps,
input_enabled=self.input.enabled,
)
command.clone_options(self)
return command
@property
def run_command(self):
"Factory property; return an instance of a run command for the same format"
format_module = importlib.import_module(self.__module__)
command = format_module.run(
base_path=self.base_path,
apps=self.apps,
input_enabled=self.input.enabled,
)
command.clone_options(self)
return command
@property
def package_command(self):
"Factory property; return an instance of a package command for the same format"
format_module = importlib.import_module(self.__module__)
command = format_module.package(
base_path=self.base_path,
apps=self.apps,
input_enabled=self.input.enabled,
)
command.clone_options(self)
return command
@property
def publish_command(self):
"Factory property; return an instance of a publish command for the same format"
format_module = importlib.import_module(self.__module__)
command = format_module.publish(
base_path=self.base_path,
apps=self.apps,
input_enabled=self.input.enabled,
)
command.clone_options(self)
return command
@property
def platform_path(self):
"""
The path for all applications for this command's platform
"""
return self.base_path / self.platform
def bundle_path(self, app):
"""
The path to the bundle for the app in the output format.
The bundle is the template-generated source form of the app.
The path will usually be a directory, the existence of which is
indicative that the template has been rolled out for an app.
:param app: The app config
"""
return self.platform_path / app.formal_name
@abstractmethod
def binary_path(self, app):
"""
The path to the executable artefact for the app in the output format.
This may be a binary file produced by compilation; however, if
the output format doesn't require compilation, it may be the same
as the bundle path (assuming the bundle path is inherently
"executable"), or a path that reasonably represents the thing that can
be executed.
:param app: The app config
"""
...
@abstractmethod
def distribution_path(self, app):
"""
The path to the distributable artefact for the app in the output format.
This is the single file that should be uploaded for distribution.
This may be the binary (if the binary is a self contained executable);
however, if the output format produces an installer, it will be the
path to the installer.
:param app: The app config
"""
...
def _load_path_index(self, app: BaseConfig):
"""
Load the path index from the index file provided by the app template
:param app: The config object for the app
:return: The contents of the application path index.
"""
with (self.bundle_path(app) / 'briefcase.toml').open() as f:
self._path_index[app] = toml.load(f)['paths']
return self._path_index[app]
def support_path(self, app: BaseConfig):
"""
Obtain the path into which the support package should be unpacked
:param app: The config object for the app
:return: The full path where the support package should be unpacked.
"""
# If the index file hasn't been loaded for this app, load it.
try:
path_index = self._path_index[app]
except KeyError:
path_index = self._load_path_index(app)
return self.bundle_path(app) / path_index['support_path']
def app_packages_path(self, app: BaseConfig):
"""
Obtain the path into which dependencies should be installed
:param app: The config object for the app
:return: The full path where application dependencies should be installed.
"""
# If the index file hasn't been loaded for this app, load it.
try:
path_index = self._path_index[app]
except KeyError:
path_index = self._load_path_index(app)
return self.bundle_path(app) / path_index['app_packages_path']
def app_path(self, app: BaseConfig):
"""
Obtain the path into which the application should be installed.
:param app: The config object for the app
:return: The full path where application code should be installed.
"""
# If the index file hasn't been loaded for this app, load it.
try:
path_index = self._path_index[app]
except KeyError:
path_index = self._load_path_index(app)
return self.bundle_path(app) / path_index['app_path']
def app_module_path(self, app):
"""
Find the path for the application module for an app.
:param app: The config object for the app
:returns: The Path to the dist-info folder.
"""
app_home = [
path.split('/')
for path in app.sources
if path.rsplit('/', 1)[-1] == app.module_name
]
try:
if len(app_home) == 1:
path = Path(str(self.base_path), *app_home[0])
else:
raise BriefcaseCommandError(
"Multiple paths in sources found for application '{app.app_name}'".format(app=app)
)
except IndexError:
raise BriefcaseCommandError(
"Unable to find code for application '{app.app_name}'".format(app=app)
)
return path
@property
def python_version_tag(self):
"""
The major.minor of the Python version in use, as a string.
This is used as a repository label/tag to identify the appropriate
templates, etc to use.
"""
return '{major}.{minor}'.format(
major=self.sys.version_info.major,
minor=self.sys.version_info.minor
)
def verify_tools(self):
"""
Verify that the tools needed to run this command exist
Raises MissingToolException if a required system tool is missing.
"""
pass
def parse_options(self, extra):
parser = argparse.ArgumentParser(
prog=self.cmd_line.format(
command=self.command,
platform=self.platform,
output_format=self.output_format
),
description=self.description,
)
self.add_default_options(parser)
self.add_options(parser)
# Parse the full set of command line options from the content
# remaining after the basic command/platform/output format
# has been extracted.
options = vars(parser.parse_args(extra))
# Extract the base default options onto the command
self.input.enabled = options.pop('input_enabled')
self.verbosity = options.pop('verbosity')
return options
def clone_options(self, command):
"""
Clone options from one command to this one.
:param command: The command whose options are to be cloned
"""
self.input.enabled = command.input.enabled
self.verbosity = command.verbosity
def add_default_options(self, parser):
"""
Add the default options that exist on *all* commands
:param parser: a stub argparse parser for the command.
"""
parser.add_argument(
'-v', '--verbosity',
action='count',
default=1,
help="set the verbosity of output"
)
parser.add_argument(
'-V', '--version',
action='version',
version=__version__
)
parser.add_argument(
'--no-input',
action='store_false',
default=True,
dest="input_enabled",
help="Don't ask for user input. If any action would be destructive, "
"an error will be raised; otherwise, default answers will be "
"assumed."
)
def add_options(self, parser):
"""
Add any options that this command needs to parse from the command line.
:param parser: a stub argparse parser for the command.
"""
pass
def parse_config(self, filename):
try:
with open(filename) as config_file:
# Parse the content of the pyproject.toml file, extracting
# any platform and output format configuration for each app,
# creating a single set of configuration options.
global_config, app_configs = parse_config(
config_file,
platform=self.platform,
output_format=self.output_format
)
self.global_config = create_config(
klass=self.GLOBAL_CONFIG_CLASS,
config=global_config,
msg="Global configuration"
)
for app_name, app_config in app_configs.items():
# Construct an AppConfig object with the final set of
# configuration options for the app.
self.apps[app_name] = create_config(
klass=self.APP_CONFIG_CLASS,
config=app_config,
msg="Configuration for '{app_name}'".format(
app_name=app_name
)
)
except FileNotFoundError:
raise BriefcaseConfigError('configuration file not found')
def download_url(self, url, download_path):
"""
Download a given URL, caching it. If it has already been downloaded,
return the value that has been cached.
This is a utility method used to obtain assets used by the
install process. The cached filename will be the filename portion of
the URL, appended to the download path.
:param url: The URL to download
:param download_path: The path to the download cache folder. This path
will be created if it doesn't exist.
:returns: The filename of the downloaded (or cached) file.
"""
download_path.mkdir(parents=True, exist_ok=True)
response = self.requests.get(url, stream=True)
if response.status_code == 404:
raise MissingNetworkResourceError(
url=url,
)
elif response.status_code != 200:
raise BadNetworkResourceError(
url=url,
status_code=response.status_code
)
# The initial URL might (read: will) go through URL redirects, so
# we need the *final* response. We look at either the `Content-Disposition`
# header, or the final URL, to extract the cache filename.
cache_full_name = urlparse(response.url).path
header_value = response.headers.get('Content-Disposition')
if header_value:
# See also https://tools.ietf.org/html/rfc6266
value, parameters = parse_header(header_value)
if (value.split(':', 1)[-1].strip().lower() == 'attachment' and parameters.get('filename')):
cache_full_name = parameters['filename']
cache_name = cache_full_name.split('/')[-1]
filename = download_path / cache_name
if not filename.exists():
# We have meaningful content, and it hasn't been cached previously,
# so save it in the requested location
print('Downloading {cache_name}...'.format(cache_name=cache_name))
with filename.open('wb') as f:
total = response.headers.get('content-length')
if total is None:
f.write(response.content)
else:
downloaded = 0
total = int(total)
for data in response.iter_content(chunk_size=1024 * 1024):
downloaded += len(data)
f.write(data)
done = int(50 * downloaded / total)
print('\r{}{} {}%'.format('#' * done, '.' * (50-done), 2*done), end='', flush=True)
print()
else:
print('{cache_name} already downloaded'.format(cache_name=cache_name))
return filename
def update_cookiecutter_cache(self, template: str, branch='master'):
"""
Ensure that we have a current checkout of a template path.
If the path is a local path, use the path as is.
If the path is a URL, look for a local cache; if one exists, update it,
including checking out the required branch.
:param template: The template URL or path.
:param branch: The template branch to use. Default: ``master``
:return: The path to the cached template. This may be the originally
provided path if the template was a file path.
"""
if is_repo_url(template):
# The app template is a repository URL.
#
# When in `no_input=True` mode, cookiecutter deletes and reclones
# a template directory, rather than updating the existing repo.
#
# Look for a cookiecutter cache of the template; if one exists,
# try to update it using git. If no cache exists, or if the cache
# directory isn't a git directory, or git fails for some reason,
# fall back to using the specified template directly.
try:
cached_template = cookiecutter_cache_path(template)
repo = self.git.Repo(cached_template)
try:
# Attempt to update the repository
remote = repo.remote(name='origin')
remote.fetch()
except self.git.exc.GitCommandError:
# We are offline, or otherwise unable to contact
# the origin git repo. It's OK to continue; but warn
# the user that the template may be stale.
print("***************************************************************************")
print("WARNING: Unable to update template (is your computer offline?)")
print("WARNING: Briefcase will use existing template without updating.")
print("***************************************************************************")
try:
# Check out the branch for the required version tag.
head = remote.refs[branch]
print("Using existing template (sha {hexsha}, updated {datestamp})".format(
hexsha=head.commit.hexsha,
datestamp=head.commit.committed_datetime.strftime("%c")
))
head.checkout()
except IndexError:
# No branch exists for the requested version.
raise TemplateUnsupportedVersion(branch)
except self.git.exc.NoSuchPathError:
# Template cache path doesn't exist.
# Just use the template directly, rather than attempting an update.
cached_template = template
except self.git.exc.InvalidGitRepositoryError:
# Template cache path exists, but isn't a git repository
# Just use the template directly, rather than attempting an update.
cached_template = template
else:
# If this isn't a repository URL, treat it as a local directory
cached_template = template
return cached_template
| 36.797927 | 107 | 0.597766 |
import argparse
import importlib
import inspect
import os
import platform
import shutil
import sys
from abc import ABC, abstractmethod
from cgi import parse_header
from pathlib import Path
from urllib.parse import urlparse
import requests
import toml
from cookiecutter.main import cookiecutter
from cookiecutter.repository import is_repo_url
from briefcase import __version__, integrations
from briefcase.config import AppConfig, BaseConfig, GlobalConfig, parse_config
from briefcase.console import Console
from briefcase.exceptions import (
BadNetworkResourceError,
BriefcaseCommandError,
BriefcaseConfigError,
MissingNetworkResourceError
)
from briefcase.integrations.subprocess import Subprocess
class TemplateUnsupportedVersion(BriefcaseCommandError):
def __init__(self, version_tag):
self.version_tag = version_tag
super().__init__(
msg='Template does not support {version_tag}'.format(
version_tag=version_tag
)
)
def create_config(klass, config, msg):
try:
return klass(**config)
except TypeError:
# value.
required_args = {
name
for name, param in inspect.signature(klass.__init__).parameters.items()
if param.default == inspect._empty
and name not in {'self', 'kwargs'}
}
missing_args = required_args - config.keys()
missing = ', '.join(
"'{arg}'".format(arg=arg)
for arg in sorted(missing_args)
)
raise BriefcaseConfigError(
"{msg} is incomplete (missing {missing})".format(
msg=msg,
missing=missing
)
)
def cookiecutter_cache_path(template):
template = template.rstrip('/')
tail = template.split('/')[-1]
cache_name = tail.rsplit('.git')[0]
return Path.home() / '.cookiecutters' / cache_name
def full_options(state, options):
if state is not None:
full = options.copy()
full.update(state)
else:
full = options
return full
class BaseCommand(ABC):
cmd_line = "briefcase {command} {platform} {output_format}"
GLOBAL_CONFIG_CLASS = GlobalConfig
APP_CONFIG_CLASS = AppConfig
def __init__(self, base_path, home_path=Path.home(), apps=None, input_enabled=True):
self.base_path = base_path
self.home_path = home_path
self.dot_briefcase_path = home_path / ".briefcase"
self.tools_path = self.dot_briefcase_path / 'tools'
self.global_config = None
self.apps = {} if apps is None else apps
self._path_index = {}
# Some details about the host machine
self.host_arch = platform.machine()
self.host_os = platform.system()
# External service APIs.
# These are abstracted to enable testing without patching.
self.cookiecutter = cookiecutter
self.requests = requests
self.input = Console(enabled=input_enabled)
self.os = os
self.sys = sys
self.shutil = shutil
self.subprocess = Subprocess(self)
# The internal Briefcase integrations API.
self.integrations = integrations
@property
def create_command(self):
format_module = importlib.import_module(self.__module__)
command = format_module.create(
base_path=self.base_path,
apps=self.apps,
input_enabled=self.input.enabled,
)
command.clone_options(self)
return command
@property
def update_command(self):
format_module = importlib.import_module(self.__module__)
command = format_module.update(
base_path=self.base_path,
apps=self.apps,
input_enabled=self.input.enabled,
)
command.clone_options(self)
return command
@property
def build_command(self):
format_module = importlib.import_module(self.__module__)
command = format_module.build(
base_path=self.base_path,
apps=self.apps,
input_enabled=self.input.enabled,
)
command.clone_options(self)
return command
@property
def run_command(self):
format_module = importlib.import_module(self.__module__)
command = format_module.run(
base_path=self.base_path,
apps=self.apps,
input_enabled=self.input.enabled,
)
command.clone_options(self)
return command
@property
def package_command(self):
format_module = importlib.import_module(self.__module__)
command = format_module.package(
base_path=self.base_path,
apps=self.apps,
input_enabled=self.input.enabled,
)
command.clone_options(self)
return command
@property
def publish_command(self):
format_module = importlib.import_module(self.__module__)
command = format_module.publish(
base_path=self.base_path,
apps=self.apps,
input_enabled=self.input.enabled,
)
command.clone_options(self)
return command
@property
def platform_path(self):
return self.base_path / self.platform
def bundle_path(self, app):
return self.platform_path / app.formal_name
@abstractmethod
def binary_path(self, app):
...
@abstractmethod
def distribution_path(self, app):
...
def _load_path_index(self, app: BaseConfig):
with (self.bundle_path(app) / 'briefcase.toml').open() as f:
self._path_index[app] = toml.load(f)['paths']
return self._path_index[app]
def support_path(self, app: BaseConfig):
# If the index file hasn't been loaded for this app, load it.
try:
path_index = self._path_index[app]
except KeyError:
path_index = self._load_path_index(app)
return self.bundle_path(app) / path_index['support_path']
def app_packages_path(self, app: BaseConfig):
try:
path_index = self._path_index[app]
except KeyError:
path_index = self._load_path_index(app)
return self.bundle_path(app) / path_index['app_packages_path']
def app_path(self, app: BaseConfig):
# If the index file hasn't been loaded for this app, load it.
try:
path_index = self._path_index[app]
except KeyError:
path_index = self._load_path_index(app)
return self.bundle_path(app) / path_index['app_path']
def app_module_path(self, app):
app_home = [
path.split('/')
for path in app.sources
if path.rsplit('/', 1)[-1] == app.module_name
]
try:
if len(app_home) == 1:
path = Path(str(self.base_path), *app_home[0])
else:
raise BriefcaseCommandError(
"Multiple paths in sources found for application '{app.app_name}'".format(app=app)
)
except IndexError:
raise BriefcaseCommandError(
"Unable to find code for application '{app.app_name}'".format(app=app)
)
return path
@property
def python_version_tag(self):
return '{major}.{minor}'.format(
major=self.sys.version_info.major,
minor=self.sys.version_info.minor
)
def verify_tools(self):
pass
def parse_options(self, extra):
parser = argparse.ArgumentParser(
prog=self.cmd_line.format(
command=self.command,
platform=self.platform,
output_format=self.output_format
),
description=self.description,
)
self.add_default_options(parser)
self.add_options(parser)
options = vars(parser.parse_args(extra))
self.input.enabled = options.pop('input_enabled')
self.verbosity = options.pop('verbosity')
return options
def clone_options(self, command):
self.input.enabled = command.input.enabled
self.verbosity = command.verbosity
def add_default_options(self, parser):
parser.add_argument(
'-v', '--verbosity',
action='count',
default=1,
help="set the verbosity of output"
)
parser.add_argument(
'-V', '--version',
action='version',
version=__version__
)
parser.add_argument(
'--no-input',
action='store_false',
default=True,
dest="input_enabled",
help="Don't ask for user input. If any action would be destructive, "
"an error will be raised; otherwise, default answers will be "
"assumed."
)
def add_options(self, parser):
pass
def parse_config(self, filename):
try:
with open(filename) as config_file:
# Parse the content of the pyproject.toml file, extracting
# any platform and output format configuration for each app,
# creating a single set of configuration options.
global_config, app_configs = parse_config(
config_file,
platform=self.platform,
output_format=self.output_format
)
self.global_config = create_config(
klass=self.GLOBAL_CONFIG_CLASS,
config=global_config,
msg="Global configuration"
)
for app_name, app_config in app_configs.items():
# Construct an AppConfig object with the final set of
# configuration options for the app.
self.apps[app_name] = create_config(
klass=self.APP_CONFIG_CLASS,
config=app_config,
msg="Configuration for '{app_name}'".format(
app_name=app_name
)
)
except FileNotFoundError:
raise BriefcaseConfigError('configuration file not found')
def download_url(self, url, download_path):
download_path.mkdir(parents=True, exist_ok=True)
response = self.requests.get(url, stream=True)
if response.status_code == 404:
raise MissingNetworkResourceError(
url=url,
)
elif response.status_code != 200:
raise BadNetworkResourceError(
url=url,
status_code=response.status_code
)
# The initial URL might (read: will) go through URL redirects, so
# we need the *final* response. We look at either the `Content-Disposition`
# header, or the final URL, to extract the cache filename.
cache_full_name = urlparse(response.url).path
header_value = response.headers.get('Content-Disposition')
if header_value:
# See also https://tools.ietf.org/html/rfc6266
value, parameters = parse_header(header_value)
if (value.split(':', 1)[-1].strip().lower() == 'attachment' and parameters.get('filename')):
cache_full_name = parameters['filename']
cache_name = cache_full_name.split('/')[-1]
filename = download_path / cache_name
if not filename.exists():
# We have meaningful content, and it hasn't been cached previously,
print('Downloading {cache_name}...'.format(cache_name=cache_name))
with filename.open('wb') as f:
total = response.headers.get('content-length')
if total is None:
f.write(response.content)
else:
downloaded = 0
total = int(total)
for data in response.iter_content(chunk_size=1024 * 1024):
downloaded += len(data)
f.write(data)
done = int(50 * downloaded / total)
print('\r{}{} {}%'.format('#' * done, '.' * (50-done), 2*done), end='', flush=True)
print()
else:
print('{cache_name} already downloaded'.format(cache_name=cache_name))
return filename
def update_cookiecutter_cache(self, template: str, branch='master'):
if is_repo_url(template):
# fall back to using the specified template directly.
try:
cached_template = cookiecutter_cache_path(template)
repo = self.git.Repo(cached_template)
try:
# Attempt to update the repository
remote = repo.remote(name='origin')
remote.fetch()
except self.git.exc.GitCommandError:
# We are offline, or otherwise unable to contact
# the origin git repo. It's OK to continue; but warn
print("***************************************************************************")
print("WARNING: Unable to update template (is your computer offline?)")
print("WARNING: Briefcase will use existing template without updating.")
print("***************************************************************************")
try:
head = remote.refs[branch]
print("Using existing template (sha {hexsha}, updated {datestamp})".format(
hexsha=head.commit.hexsha,
datestamp=head.commit.committed_datetime.strftime("%c")
))
head.checkout()
except IndexError:
raise TemplateUnsupportedVersion(branch)
except self.git.exc.NoSuchPathError:
# Just use the template directly, rather than attempting an update.
cached_template = template
except self.git.exc.InvalidGitRepositoryError:
# Template cache path exists, but isn't a git repository
cached_template = template
else:
cached_template = template
return cached_template
| true | true |
1c4a21aabd3ae6db6ff20f418f668c968e995202 | 1,966 | py | Python | utils/setup.py | david8862/keras-CenterNet | e74b933f6dd5ffac04f2de3eb0d887742be8490f | [
"Apache-2.0"
] | null | null | null | utils/setup.py | david8862/keras-CenterNet | e74b933f6dd5ffac04f2de3eb0d887742be8490f | [
"Apache-2.0"
] | null | null | null | utils/setup.py | david8862/keras-CenterNet | e74b933f6dd5ffac04f2de3eb0d887742be8490f | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import setuptools
from setuptools.extension import Extension
from distutils.command.build_ext import build_ext as DistUtilsBuildExt
class BuildExtension(setuptools.Command):
description = DistUtilsBuildExt.description
user_options = DistUtilsBuildExt.user_options
boolean_options = DistUtilsBuildExt.boolean_options
help_options = DistUtilsBuildExt.help_options
def __init__(self, *args, **kwargs):
from setuptools.command.build_ext import build_ext as SetupToolsBuildExt
# Bypass __setatrr__ to avoid infinite recursion.
self.__dict__['_command'] = SetupToolsBuildExt(*args, **kwargs)
def __getattr__(self, name):
return getattr(self._command, name)
def __setattr__(self, name, value):
setattr(self._command, name, value)
def initialize_options(self, *args, **kwargs):
return self._command.initialize_options(*args, **kwargs)
def finalize_options(self, *args, **kwargs):
ret = self._command.finalize_options(*args, **kwargs)
import numpy
self.include_dirs.append(numpy.get_include())
return ret
def run(self, *args, **kwargs):
return self._command.run(*args, **kwargs)
extensions = [
Extension(
'compute_overlap',
['compute_overlap.pyx']
),
]
setuptools.setup(
name = 'keras-CenterNet',
version = '0.0.1',
description = 'Keras implementation of CenterNet object detection.',
url = 'https://github.com/david8862/keras-CenterNet',
author = 'david8862',
author_email = '[email protected]',
maintainer = 'david8862',
maintainer_email = '[email protected]',
cmdclass = {'build_ext': BuildExtension},
packages = setuptools.find_packages(),
ext_modules = extensions,
setup_requires = ["cython>=0.28", "numpy>=1.14.0"]
)
| 32.229508 | 80 | 0.660224 | import setuptools
from setuptools.extension import Extension
from distutils.command.build_ext import build_ext as DistUtilsBuildExt
class BuildExtension(setuptools.Command):
description = DistUtilsBuildExt.description
user_options = DistUtilsBuildExt.user_options
boolean_options = DistUtilsBuildExt.boolean_options
help_options = DistUtilsBuildExt.help_options
def __init__(self, *args, **kwargs):
from setuptools.command.build_ext import build_ext as SetupToolsBuildExt
self.__dict__['_command'] = SetupToolsBuildExt(*args, **kwargs)
def __getattr__(self, name):
return getattr(self._command, name)
def __setattr__(self, name, value):
setattr(self._command, name, value)
def initialize_options(self, *args, **kwargs):
return self._command.initialize_options(*args, **kwargs)
def finalize_options(self, *args, **kwargs):
ret = self._command.finalize_options(*args, **kwargs)
import numpy
self.include_dirs.append(numpy.get_include())
return ret
def run(self, *args, **kwargs):
return self._command.run(*args, **kwargs)
extensions = [
Extension(
'compute_overlap',
['compute_overlap.pyx']
),
]
setuptools.setup(
name = 'keras-CenterNet',
version = '0.0.1',
description = 'Keras implementation of CenterNet object detection.',
url = 'https://github.com/david8862/keras-CenterNet',
author = 'david8862',
author_email = '[email protected]',
maintainer = 'david8862',
maintainer_email = '[email protected]',
cmdclass = {'build_ext': BuildExtension},
packages = setuptools.find_packages(),
ext_modules = extensions,
setup_requires = ["cython>=0.28", "numpy>=1.14.0"]
)
| true | true |
1c4a21e7724b1a0546fadafe66916377d6168f66 | 6,287 | py | Python | engine/trainer/train.py | 7eta/udk_labeler | 8cd8a86bc1a78647c0aaf81ca78e6e518fb86ceb | [
"Apache-2.0"
] | 2 | 2021-03-08T02:29:09.000Z | 2021-03-08T02:29:11.000Z | engine/trainer/train.py | 7eta/udk_labeler | 8cd8a86bc1a78647c0aaf81ca78e6e518fb86ceb | [
"Apache-2.0"
] | null | null | null | engine/trainer/train.py | 7eta/udk_labeler | 8cd8a86bc1a78647c0aaf81ca78e6e518fb86ceb | [
"Apache-2.0"
] | null | null | null | import os
import argparse
import numpy as np
import torch
import torch.optim as optim
from engine.dataloader import get_dataloader
from engine.retinanet import model
from engine.retinanet import coco_eval
from engine.log.saver import Saver
from tqdm import tqdm
from collections import deque
from engine.log import logger, summarise
assert torch.__version__.split('.')[0] == '1'
print('CUDA available: {}'.format(torch.cuda.is_available()))
class Trainer(object):
def __init__(self, config, img_dir, coco_json):
self.config = config
# Define Saver
self.saver = Saver(self.config)
# Define Tensorboard
if self.config.tensorboard:
self.summary = summarise.TensorboardSummary(self.saver.directory)
self.writer = self.summary.create_summary()
# Define Logger
self.getlogger = logger.get_logger(self.saver.directory)
self.logger = self.getlogger
# Define DataLoader
self.train_loader, self.n_train_img,\
self.val_set, self.val_loader, self.n_val_img, self.n_classes = get_dataloader(self.config, img_dir, coco_json)
# Define Network
if self.config.depth == 18:
self.retinanet = model.resnet18(num_classes=self.n_classes, pretrained=True)
elif self.config.depth == 34:
self.retinanet = model.resnet34(num_classes=self.n_classes, pretrained=True)
elif self.config.depth == 50:
self.retinanet = model.resnet50(num_classes=self.n_classes, pretrained=True)
elif self.config.depth == 101:
self.retinanet = model.resnet101(num_classes=self.n_classes, pretrained=True)
elif self.config.depth == 152:
self.retinanet = model.resnet152(num_classes=self.n_classes, pretrained=True)
else:
raise ValueError('Unsupported model depth, must be one of 18, 34, 50, 101, 152')
# Define Optimizer
self.optimizer = optim.Adam(self.retinanet.parameters(), lr=self.config.lr)
# Define lr_schduler
self.scheduler = optim.lr_scheduler.ReduceLROnPlateau(self.optimizer, patience=3, verbose=True)
# Define loss
self.loss_hist = deque(maxlen=500)
# Define cuda
if torch.cuda.is_available():
self.retinanet = torch.nn.DataParallel(self.retinanet).cuda()
else:
raise ValueError('=> Cuda is not available. Check cuda')
# Define resume
self.best_f1_score = .0
if self.config.resume is not None:
self.retinanet = torch.load(self.config.resume)
self.retinanet.cuda()
# check model summary
# summary(self.retinanet, (3, 512, 512))
def train(self, epoch):
self.retinanet.train()
self.retinanet.module.freeze_bn()
epoch_loss = []
print(f'Num training images: {self.n_train_img}')
with tqdm(self.train_loader) as tbar:
for iter_num, data in enumerate(tbar):
self.optimizer.zero_grad()
img = data['img'].cuda().float()
annot = data['annot']
cls_loss, reg_loss = self.retinanet([img, annot])
cls_loss = cls_loss.mean()
reg_loss = reg_loss.mean()
loss = cls_loss + reg_loss
epoch_loss.append(float(loss))
self.loss_hist.append(float(loss))
if bool(loss == 0):
continue
loss.backward()
torch.nn.utils.clip_grad_norm_(self.retinanet.parameters(), 0.1)
self.optimizer.step()
if self.config.tensorboard:
self.writer.add_scalar('Train_Loss/classification_loss',
cls_loss,
iter_num + epoch*(len(self.train_loader)))
self.writer.add_scalar('Train_Loss/regression_loss',
reg_loss,
iter_num + epoch*(len(self.train_loader)))
self.writer.add_scalar('Train_Loss/total_loss',
np.mean(self.loss_hist),
iter_num + epoch*(len(self.train_loader)))
tbar.set_description(f'Epoch: {epoch} | '
f'Cls loss: {cls_loss:1.5f} | '
f'Reg loss: {reg_loss:1.5f} | '
f'Running loss: {np.mean(self.loss_hist):1.5f}')
del cls_loss, reg_loss
self.scheduler.step(np.mean(epoch_loss))
def validation(self, epoch):
print('Evaluating dataset')
stats = coco_eval.evaluate_coco(self.val_set, self.retinanet, self.saver.directory)
if stats is None:
return
# stats: 0~11까지 12개의 값이 존재
# 0: mAP / 1: map .5 / 2: map .75 / 3: ap small / 4: ap medium / 5: ap large/
# 6: ar Det1 / 7: ar Det10 / 8: ar Det100 / 9: ar small / 10: ar medium / 11: ar large
if self.config.tensorboard:
self.writer.add_scalar('Precision/mAP', stats[0], epoch)
self.writer.add_scalar('Precision/mAP@50IOU', stats[1], epoch)
self.writer.add_scalar('Precision/mAP@75IOU', stats[2], epoch)
self.writer.add_scalar('Precision/mAP(samll)', stats[3], epoch)
self.writer.add_scalar('Precision/mAP(medium)', stats[4], epoch)
self.writer.add_scalar('Precision/mAP(large)', stats[5], epoch)
self.writer.add_scalar('Recall/AR@1', stats[6], epoch)
self.writer.add_scalar('Recall/AR@10', stats[7], epoch)
self.writer.add_scalar('Recall/AR@100', stats[8], epoch)
self.writer.add_scalar('Recall/AR@100(small)', stats[9], epoch)
self.writer.add_scalar('Recall/AR@100(medium)', stats[10], epoch)
self.writer.add_scalar('Recall/AR@100(large)', stats[11], epoch)
mAP, AR = stats[0], stats[8]
f1_score = 2 * (mAP * AR) / (mAP + AR)
if f1_score > self.best_f1_score:
self.best_f1_score = f1_score
self.saver.save_checkpoint(self.retinanet.module, f1_score) | 41.091503 | 119 | 0.585335 | import os
import argparse
import numpy as np
import torch
import torch.optim as optim
from engine.dataloader import get_dataloader
from engine.retinanet import model
from engine.retinanet import coco_eval
from engine.log.saver import Saver
from tqdm import tqdm
from collections import deque
from engine.log import logger, summarise
assert torch.__version__.split('.')[0] == '1'
print('CUDA available: {}'.format(torch.cuda.is_available()))
class Trainer(object):
def __init__(self, config, img_dir, coco_json):
self.config = config
self.saver = Saver(self.config)
if self.config.tensorboard:
self.summary = summarise.TensorboardSummary(self.saver.directory)
self.writer = self.summary.create_summary()
self.getlogger = logger.get_logger(self.saver.directory)
self.logger = self.getlogger
self.train_loader, self.n_train_img,\
self.val_set, self.val_loader, self.n_val_img, self.n_classes = get_dataloader(self.config, img_dir, coco_json)
if self.config.depth == 18:
self.retinanet = model.resnet18(num_classes=self.n_classes, pretrained=True)
elif self.config.depth == 34:
self.retinanet = model.resnet34(num_classes=self.n_classes, pretrained=True)
elif self.config.depth == 50:
self.retinanet = model.resnet50(num_classes=self.n_classes, pretrained=True)
elif self.config.depth == 101:
self.retinanet = model.resnet101(num_classes=self.n_classes, pretrained=True)
elif self.config.depth == 152:
self.retinanet = model.resnet152(num_classes=self.n_classes, pretrained=True)
else:
raise ValueError('Unsupported model depth, must be one of 18, 34, 50, 101, 152')
self.optimizer = optim.Adam(self.retinanet.parameters(), lr=self.config.lr)
self.scheduler = optim.lr_scheduler.ReduceLROnPlateau(self.optimizer, patience=3, verbose=True)
self.loss_hist = deque(maxlen=500)
if torch.cuda.is_available():
self.retinanet = torch.nn.DataParallel(self.retinanet).cuda()
else:
raise ValueError('=> Cuda is not available. Check cuda')
self.best_f1_score = .0
if self.config.resume is not None:
self.retinanet = torch.load(self.config.resume)
self.retinanet.cuda()
def train(self, epoch):
self.retinanet.train()
self.retinanet.module.freeze_bn()
epoch_loss = []
print(f'Num training images: {self.n_train_img}')
with tqdm(self.train_loader) as tbar:
for iter_num, data in enumerate(tbar):
self.optimizer.zero_grad()
img = data['img'].cuda().float()
annot = data['annot']
cls_loss, reg_loss = self.retinanet([img, annot])
cls_loss = cls_loss.mean()
reg_loss = reg_loss.mean()
loss = cls_loss + reg_loss
epoch_loss.append(float(loss))
self.loss_hist.append(float(loss))
if bool(loss == 0):
continue
loss.backward()
torch.nn.utils.clip_grad_norm_(self.retinanet.parameters(), 0.1)
self.optimizer.step()
if self.config.tensorboard:
self.writer.add_scalar('Train_Loss/classification_loss',
cls_loss,
iter_num + epoch*(len(self.train_loader)))
self.writer.add_scalar('Train_Loss/regression_loss',
reg_loss,
iter_num + epoch*(len(self.train_loader)))
self.writer.add_scalar('Train_Loss/total_loss',
np.mean(self.loss_hist),
iter_num + epoch*(len(self.train_loader)))
tbar.set_description(f'Epoch: {epoch} | '
f'Cls loss: {cls_loss:1.5f} | '
f'Reg loss: {reg_loss:1.5f} | '
f'Running loss: {np.mean(self.loss_hist):1.5f}')
del cls_loss, reg_loss
self.scheduler.step(np.mean(epoch_loss))
def validation(self, epoch):
print('Evaluating dataset')
stats = coco_eval.evaluate_coco(self.val_set, self.retinanet, self.saver.directory)
if stats is None:
return
if self.config.tensorboard:
self.writer.add_scalar('Precision/mAP', stats[0], epoch)
self.writer.add_scalar('Precision/mAP@50IOU', stats[1], epoch)
self.writer.add_scalar('Precision/mAP@75IOU', stats[2], epoch)
self.writer.add_scalar('Precision/mAP(samll)', stats[3], epoch)
self.writer.add_scalar('Precision/mAP(medium)', stats[4], epoch)
self.writer.add_scalar('Precision/mAP(large)', stats[5], epoch)
self.writer.add_scalar('Recall/AR@1', stats[6], epoch)
self.writer.add_scalar('Recall/AR@10', stats[7], epoch)
self.writer.add_scalar('Recall/AR@100', stats[8], epoch)
self.writer.add_scalar('Recall/AR@100(small)', stats[9], epoch)
self.writer.add_scalar('Recall/AR@100(medium)', stats[10], epoch)
self.writer.add_scalar('Recall/AR@100(large)', stats[11], epoch)
mAP, AR = stats[0], stats[8]
f1_score = 2 * (mAP * AR) / (mAP + AR)
if f1_score > self.best_f1_score:
self.best_f1_score = f1_score
self.saver.save_checkpoint(self.retinanet.module, f1_score) | true | true |
1c4a220909211a50d6280d47c38bfe37ab27a7a4 | 5,687 | py | Python | ipro/moduleModbusTCP.py | pavelk43/ipro_site | 6e5bab5df0ad62f4d78ca96062d02a7be56bc132 | [
"MIT"
] | null | null | null | ipro/moduleModbusTCP.py | pavelk43/ipro_site | 6e5bab5df0ad62f4d78ca96062d02a7be56bc132 | [
"MIT"
] | null | null | null | ipro/moduleModbusTCP.py | pavelk43/ipro_site | 6e5bab5df0ad62f4d78ca96062d02a7be56bc132 | [
"MIT"
] | null | null | null | import time, datetime
import payload
import socket
class moduleModbusTCP():
def __init__(self, ip, port = 502):
self.modbusCommands = { "read holdings" : 3,
"read inputs" : 4,
"write holding" : 6,
"write holdings" : 16 }
self.crc16_table = self.Generate_CRC16_Table()
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.settimeout(1.0)
try:
self.socket.connect((ip, port))
except Exception as e:
print e
else:
print "error"
finally:
print "final"
def __del__(self):
if self.socket != None:
self.socket.shutdown(1)
self.socket.close()
def Hex(self, string):
out = ""
for x in xrange(len(string)):
out += "%02X " % ord(string[x])
return out
def Elapsed(self, started):
return (time.clock() - started)
def RunCommand(self, command, expectedBytes):
totalSent = 0
while totalSent < len(command):
sent = self.socket.send(command[totalSent:])
if sent == 0: # Error
print "RunCommand send returned 0!"
return False, ""
totalSent += sent
# print "Command:", self.Hex(command), "sent", totalSent
start = time.clock()
timedOut = False
timeout = 3.0
out = ""
received = 0
while received < expectedBytes:
chunk = ''
try:
chunk = self.socket.recv(min(expectedBytes - received, 2 * expectedBytes))
except socket.error, e:
print e
return False, ''
except Exception, e:
print e
out += chunk
received += len(chunk)
# print self.Elapsed(start), received, expectedBytes
if self.Elapsed(start) > timeout:
timedOut = True
break
if (timedOut == True) or (len(out) != expectedBytes):
print "*** RunCommand command > ", self.Hex(command)
print " %f elapsed seconds %d bytes returned" % (self.Elapsed(start), len(out))
return False, out
return True, out
def ReadRegistersTCP(self, range, unit, address, count, endian):
if count <= 0 or count > 1000:
return False, ""
p = payload.BinaryPayloadBuilder(endian = endian)
p.add_16bit_uint(0x0000) # Transaction #
p.add_16bit_uint(0x0000) # 0's
p.add_16bit_uint(1 + 1 + 2 + 2) # Following bytes: unit, function code, 2b address, 2b count
p.add_8bit_uint(unit) # Unit
p.add_8bit_uint(self.modbusCommands[range]) # Function
p.add_16bit_uint(address) # Address
p.add_16bit_uint(count) # Count
str = p.build()
success, result = self.RunCommand(str, 5 + count * 2)
if success != True or len(result) == 0:
# print "ReadHoldingRegisters RunCommand returned false or no data", success
print self.Hex(result)
return False, ""
print self.Hex(result)
d = payload.BinaryPayloadDecoder(result, endian)
unit = d.decode_8bit_uint()
function = d.decode_8bit_uint()
totalBytes = d.decode_8bit_uint()
binary = result[3 : 3 + 2 * count]
return True, binary
def ReadHoldingRegistersRTU(self, unit, address, count, endian):
if count <= 0 or count > 1000:
return False, ""
p = payload.BinaryPayloadBuilder(endian = endian)
p.add_8bit_uint(unit)
p.add_8bit_uint(self.modbusCommands["read holdings"])
p.add_16bit_uint(address)
p.add_16bit_uint(count)
str = p.build()
crc = self.ComputeCRC(str)
p.add_16bit_uint(crc)
success, result = self.RunCommand(p.build(), 5 + count * 2)
if (success != True) or (len(result) == 0):
# print "ReadHoldingRegisters RunCommand returned false or no data", success, self.Hex(result)
return False, ""
if self.CheckCRC(result[:-2], result[-2:]) != True:
print "CRC fail"
return False, ""
d = payload.BinaryPayloadDecoder(result, endian)
unit = d.decode_8bit_uint()
function = d.decode_8bit_uint()
totalBytes = d.decode_8bit_uint()
binary = result[3 : 3 + 2 * count]
# print runit, rcode, rbytes, repr(binary)
return True, binary
def Generate_CRC16_Table(self):
result = []
for byte in range(256):
crc = 0x0000
for _ in range(8):
if (byte ^ crc) & 0x0001:
crc = (crc >> 1) ^ 0xa001
else:
crc >>= 1
byte >>= 1
result.append(crc)
return result
def ComputeCRC(self, data):
crc = 0xffff
for a in data:
idx = self.crc16_table[(crc ^ ord(a)) & 0xff];
crc = ((crc >> 8) & 0xff) ^ idx
swapped = ((crc << 8) & 0xff00) | ((crc >> 8) & 0x00ff)
return swapped
def CheckCRC(self, data, check):
checkInt = 256 * ord(check[0]) + ord(check[1])
# print "Check CRC", self.Hex(data), "Check", self.Hex(check), "Result", "0x%04X" % self.ComputeCRC(data)
return self.ComputeCRC(data) == checkInt
| 33.452941 | 113 | 0.519958 | import time, datetime
import payload
import socket
class moduleModbusTCP():
def __init__(self, ip, port = 502):
self.modbusCommands = { "read holdings" : 3,
"read inputs" : 4,
"write holding" : 6,
"write holdings" : 16 }
self.crc16_table = self.Generate_CRC16_Table()
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.settimeout(1.0)
try:
self.socket.connect((ip, port))
except Exception as e:
print e
else:
print "error"
finally:
print "final"
def __del__(self):
if self.socket != None:
self.socket.shutdown(1)
self.socket.close()
def Hex(self, string):
out = ""
for x in xrange(len(string)):
out += "%02X " % ord(string[x])
return out
def Elapsed(self, started):
return (time.clock() - started)
def RunCommand(self, command, expectedBytes):
totalSent = 0
while totalSent < len(command):
sent = self.socket.send(command[totalSent:])
if sent == 0: print "RunCommand send returned 0!"
return False, ""
totalSent += sent
start = time.clock()
timedOut = False
timeout = 3.0
out = ""
received = 0
while received < expectedBytes:
chunk = ''
try:
chunk = self.socket.recv(min(expectedBytes - received, 2 * expectedBytes))
except socket.error, e:
print e
return False, ''
except Exception, e:
print e
out += chunk
received += len(chunk)
if self.Elapsed(start) > timeout:
timedOut = True
break
if (timedOut == True) or (len(out) != expectedBytes):
print "*** RunCommand command > ", self.Hex(command)
print " %f elapsed seconds %d bytes returned" % (self.Elapsed(start), len(out))
return False, out
return True, out
def ReadRegistersTCP(self, range, unit, address, count, endian):
if count <= 0 or count > 1000:
return False, ""
p = payload.BinaryPayloadBuilder(endian = endian)
p.add_16bit_uint(0x0000) p.add_16bit_uint(0x0000) p.add_16bit_uint(1 + 1 + 2 + 2) # Following bytes: unit, function code, 2b address, 2b count
p.add_8bit_uint(unit) # Unit
p.add_8bit_uint(self.modbusCommands[range]) # Function
p.add_16bit_uint(address) # Address
p.add_16bit_uint(count) # Count
str = p.build()
success, result = self.RunCommand(str, 5 + count * 2)
if success != True or len(result) == 0:
# print "ReadHoldingRegisters RunCommand returned false or no data", success
print self.Hex(result)
return False, ""
print self.Hex(result)
d = payload.BinaryPayloadDecoder(result, endian)
unit = d.decode_8bit_uint()
function = d.decode_8bit_uint()
totalBytes = d.decode_8bit_uint()
binary = result[3 : 3 + 2 * count]
return True, binary
def ReadHoldingRegistersRTU(self, unit, address, count, endian):
if count <= 0 or count > 1000:
return False, ""
p = payload.BinaryPayloadBuilder(endian = endian)
p.add_8bit_uint(unit)
p.add_8bit_uint(self.modbusCommands["read holdings"])
p.add_16bit_uint(address)
p.add_16bit_uint(count)
str = p.build()
crc = self.ComputeCRC(str)
p.add_16bit_uint(crc)
success, result = self.RunCommand(p.build(), 5 + count * 2)
if (success != True) or (len(result) == 0):
# print "ReadHoldingRegisters RunCommand returned false or no data", success, self.Hex(result)
return False, ""
if self.CheckCRC(result[:-2], result[-2:]) != True:
print "CRC fail"
return False, ""
d = payload.BinaryPayloadDecoder(result, endian)
unit = d.decode_8bit_uint()
function = d.decode_8bit_uint()
totalBytes = d.decode_8bit_uint()
binary = result[3 : 3 + 2 * count]
# print runit, rcode, rbytes, repr(binary)
return True, binary
def Generate_CRC16_Table(self):
result = []
for byte in range(256):
crc = 0x0000
for _ in range(8):
if (byte ^ crc) & 0x0001:
crc = (crc >> 1) ^ 0xa001
else:
crc >>= 1
byte >>= 1
result.append(crc)
return result
def ComputeCRC(self, data):
crc = 0xffff
for a in data:
idx = self.crc16_table[(crc ^ ord(a)) & 0xff];
crc = ((crc >> 8) & 0xff) ^ idx
swapped = ((crc << 8) & 0xff00) | ((crc >> 8) & 0x00ff)
return swapped
def CheckCRC(self, data, check):
checkInt = 256 * ord(check[0]) + ord(check[1])
# print "Check CRC", self.Hex(data), "Check", self.Hex(check), "Result", "0x%04X" % self.ComputeCRC(data)
return self.ComputeCRC(data) == checkInt
| false | true |
1c4a245d2bb97a1c4a9d4c44fe206db1ec8eb500 | 1,617 | py | Python | venv/Lib/site-packages/keystoneclient/tests/unit/v3/test_simple_cert.py | prasoon-uta/IBM-coud-storage | 82a6876316715efbd0b492d0d467dde0ab26a56b | [
"Apache-2.0"
] | null | null | null | venv/Lib/site-packages/keystoneclient/tests/unit/v3/test_simple_cert.py | prasoon-uta/IBM-coud-storage | 82a6876316715efbd0b492d0d467dde0ab26a56b | [
"Apache-2.0"
] | null | null | null | venv/Lib/site-packages/keystoneclient/tests/unit/v3/test_simple_cert.py | prasoon-uta/IBM-coud-storage | 82a6876316715efbd0b492d0d467dde0ab26a56b | [
"Apache-2.0"
] | null | null | null | # Copyright 2014 IBM Corp.
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import testresources
from keystoneclient.tests.unit import client_fixtures
from keystoneclient.tests.unit.v3 import utils
class SimpleCertTests(utils.ClientTestCase, testresources.ResourcedTestCase):
resources = [('examples', client_fixtures.EXAMPLES_RESOURCE)]
def test_get_ca_certificate(self):
self.stub_url('GET', ['OS-SIMPLE-CERT', 'ca'],
headers={'Content-Type': 'application/x-pem-file'},
text=self.examples.SIGNING_CA)
res = self.client.simple_cert.get_ca_certificates()
self.assertEqual(self.examples.SIGNING_CA, res)
def test_get_certificates(self):
self.stub_url('GET', ['OS-SIMPLE-CERT', 'certificates'],
headers={'Content-Type': 'application/x-pem-file'},
text=self.examples.SIGNING_CERT)
res = self.client.simple_cert.get_certificates()
self.assertEqual(self.examples.SIGNING_CERT, res)
def load_tests(loader, tests, pattern):
return testresources.OptimisingTestSuite(tests)
| 39.439024 | 77 | 0.713667 |
import testresources
from keystoneclient.tests.unit import client_fixtures
from keystoneclient.tests.unit.v3 import utils
class SimpleCertTests(utils.ClientTestCase, testresources.ResourcedTestCase):
resources = [('examples', client_fixtures.EXAMPLES_RESOURCE)]
def test_get_ca_certificate(self):
self.stub_url('GET', ['OS-SIMPLE-CERT', 'ca'],
headers={'Content-Type': 'application/x-pem-file'},
text=self.examples.SIGNING_CA)
res = self.client.simple_cert.get_ca_certificates()
self.assertEqual(self.examples.SIGNING_CA, res)
def test_get_certificates(self):
self.stub_url('GET', ['OS-SIMPLE-CERT', 'certificates'],
headers={'Content-Type': 'application/x-pem-file'},
text=self.examples.SIGNING_CERT)
res = self.client.simple_cert.get_certificates()
self.assertEqual(self.examples.SIGNING_CERT, res)
def load_tests(loader, tests, pattern):
return testresources.OptimisingTestSuite(tests)
| true | true |
1c4a24828c826c88606ac6a3b6cd095dfec7005a | 15,703 | py | Python | tests/chainer_tests/test_optimizer.py | toshihikoyanase/chainer | 65b34a19d28f60f732c7069163ca23c710a309f4 | [
"MIT"
] | null | null | null | tests/chainer_tests/test_optimizer.py | toshihikoyanase/chainer | 65b34a19d28f60f732c7069163ca23c710a309f4 | [
"MIT"
] | 2 | 2018-01-09T23:05:30.000Z | 2018-01-19T01:19:34.000Z | tests/chainer_tests/test_optimizer.py | bkvogel/chainer | 894cd5d008f11eccdf6e1d7106f5b8bfff9ce005 | [
"MIT"
] | null | null | null | import copy
import unittest
import warnings
import mock
import numpy as np
import chainer
from chainer import backend
from chainer.backends import cuda
from chainer import optimizer
from chainer import optimizers
from chainer import testing
from chainer.testing import attr
class TestHyperparameter(unittest.TestCase):
def setUp(self):
self.parent = optimizer.Hyperparameter()
self.parent.x = 1
self.parent.y = 2
self.child = optimizer.Hyperparameter(self.parent)
self.child.y = 3
self.child.z = 4
def test_getattr(self):
self.assertTrue(hasattr(self.parent, 'x'))
self.assertEqual(self.parent.x, 1)
self.assertTrue(hasattr(self.parent, 'y'))
self.assertEqual(self.parent.y, 2)
self.assertFalse(hasattr(self.parent, 'z'))
self.assertTrue(hasattr(self.child, 'x'))
self.assertEqual(self.child.x, 1)
self.assertTrue(hasattr(self.child, 'y'))
self.assertEqual(self.child.y, 3)
self.assertTrue(hasattr(self.child, 'z'))
self.assertEqual(self.child.z, 4)
def test_get_dict(self):
self.assertEqual(self.parent.get_dict(), {'x': 1, 'y': 2})
self.assertEqual(self.child.get_dict(), {'x': 1, 'y': 3, 'z': 4})
def test_repr(self):
self.assertEqual(repr(self.parent), 'Hyperparameter(x=1, y=2)')
self.assertEqual(repr(self.child), 'Hyperparameter(x=1, y=3, z=4)')
def test_deep_copy(self):
parent_copy, child_copy = copy.deepcopy([self.parent, self.child])
self.assertEqual(self.child.get_dict(), child_copy.get_dict())
self.assertEqual(self.parent.get_dict(), parent_copy.get_dict())
self.assertIs(child_copy.parent, parent_copy)
class TestUpdateRule(unittest.TestCase):
def setUp(self):
self.data = np.ones((2, 3), np.float32)
self.grad = np.ones_like(self.data)
self.var = chainer.Variable(self.data, grad=self.grad)
self.update_rule = optimizer.UpdateRule()
self.update_rule.update_core_cpu = mock.MagicMock()
self.update_rule.update_core_gpu = mock.MagicMock()
def test_update_cpu(self):
self.update_rule.update(self.var)
self.assertEqual(self.update_rule.update_core_cpu.call_count, 1)
self.assertEqual(self.update_rule.update_core_gpu.call_count, 0)
@attr.gpu
def test_update_gpu(self):
self.var.to_gpu()
self.update_rule.update(self.var)
self.assertEqual(self.update_rule.update_core_cpu.call_count, 0)
self.assertEqual(self.update_rule.update_core_gpu.call_count, 1)
def check_add_hook(self, hook):
self.update_rule.update(self.var)
self.assertEqual(hook.call_count, 1)
args = hook.call_args_list[0][0]
self.assertEqual(len(args), 2)
self.assertIs(args[0], self.update_rule)
self.assertIs(args[1], self.var)
def test_add_hook(self):
hook = mock.MagicMock()
self.update_rule.add_hook(hook)
self.check_add_hook(hook)
def test_add_hook_with_name(self):
hook = mock.MagicMock()
self.update_rule.add_hook(hook, name='hook')
self.check_add_hook(hook)
def test_remove_hook(self):
hook = mock.MagicMock()
self.update_rule.add_hook(hook, name='hook')
self.update_rule.remove_hook('hook')
self.update_rule.update(self.var)
self.assertEqual(hook.call_count, 0)
def test_add_hook_with_function_name(self):
hook_body = mock.MagicMock()
def foo(update_rule, data, grad):
hook_body(update_rule, data, grad)
self.update_rule.add_hook(foo)
self.update_rule.remove_hook('foo')
self.update_rule.update(self.var)
self.assertEqual(hook_body.call_count, 0)
def test_add_hook_no_name(self):
class CallableWithoutName(object):
def __call__(self, update_rule, param):
pass
with self.assertRaises(ValueError):
self.update_rule.add_hook(CallableWithoutName())
def test_add_hook_duplicated_name(self):
self.update_rule.add_hook(mock.MagicMock(), name='foo')
with self.assertRaises(ValueError):
self.update_rule.add_hook(mock.MagicMock(), name='foo')
def test_remove_hook_not_exist(self):
with self.assertRaises(KeyError):
self.update_rule.remove_hook('foo')
def test_disabled_update_rule(self):
self.update_rule.update_core = mock.MagicMock()
self.update_rule.enabled = False
self.update_rule.update(self.var)
self.assertEqual(self.update_rule.update_core.call_count, 0)
self.update_rule.enabled = True
self.update_rule.update(self.var)
self.assertEqual(self.update_rule.update_core.call_count, 1)
def setup_state(self):
def init_state(data):
state = self.update_rule.state
state['a'] = 0
state['b'] = np.array([1, 2, 3], dtype=np.float32)
self.update_rule.init_state = init_state
@attr.gpu
def test_state_copy_to_gpu(self):
self.setup_state()
def update_core(param):
self.assertIsInstance(self.update_rule.state['a'], int)
self.assertIsInstance(self.update_rule.state['b'], cuda.ndarray)
self.update_rule.update_core = update_core
self.var.to_gpu()
self.update_rule.update(self.var)
@attr.multi_gpu(2)
def test_state_copy_to_another_gpu(self):
self.setup_state()
def update_core(param):
self.assertIsInstance(self.update_rule.state['b'], cuda.ndarray)
self.assertEqual(self.update_rule.state['b'].device.id, 1)
# call update with arrays on GPU 0 (tested by another method)
self.update_rule.update_core = lambda param: None
self.update_rule.update(chainer.Variable(
cuda.to_gpu(self.data, 0), grad=cuda.to_gpu(self.grad, 0)))
# check if it copies the states correctly when arrays on another GPU
# are passed
self.update_rule.update_core = update_core
self.update_rule.update(chainer.Variable(
cuda.to_gpu(self.data, 1), grad=cuda.to_gpu(self.grad, 1)))
@attr.gpu
def test_state_copy_to_cpu(self):
self.setup_state()
def update_core(param):
self.assertIsInstance(self.update_rule.state['a'], int)
self.assertIsInstance(self.update_rule.state['b'], np.ndarray)
self.var.to_gpu()
self.update_rule.update(self.var)
self.var.to_cpu()
self.update_rule.update_core = update_core
self.update_rule.update(self.var)
class TestOptimizer(unittest.TestCase):
def setUp(self):
self.optimizer = optimizer.Optimizer()
def test_new_epoch(self):
self.optimizer.new_epoch()
self.assertEqual(1, self.optimizer.epoch)
def test_invalid_new_epoch(self):
self.optimizer.use_auto_new_epoch = True
with self.assertRaises(RuntimeError):
self.optimizer.new_epoch()
def test_auto_new_epoch(self):
self.optimizer.use_auto_new_epoch = True
self.optimizer.new_epoch(auto=True)
self.assertEqual(1, self.optimizer.epoch)
def test_invalid_auto_new_epoch(self):
with self.assertRaises(RuntimeError):
self.optimizer.new_epoch(auto=True)
class TestOptimizerHook(unittest.TestCase):
def setUp(self):
self.optimizer = optimizer.Optimizer()
self.target = SimpleLink(
np.arange(6, dtype=np.float32).reshape(2, 3),
np.arange(3, -3, -1, dtype=np.float32).reshape(2, 3))
def test_add_hook(self):
h1 = mock.MagicMock(timing='pre')
h1.call_for_each_param = False
self.optimizer.setup(self.target)
self.optimizer.add_hook(h1, 'h1')
self.optimizer.call_hooks()
h1.assert_called_with(self.optimizer)
def test_add_hook_call_for_each_param(self):
h1 = mock.MagicMock(timing='pre')
h1.call_for_each_param = True
self.optimizer.setup(self.target)
self.optimizer.add_hook(h1, 'h1')
self.optimizer.call_hooks()
h1.assert_called_with(self.target.param.update_rule, self.target.param)
def test_remove_hook(self):
h1 = mock.MagicMock(timing='pre')
self.optimizer.setup(self.target)
self.optimizer.add_hook(h1, 'h1')
self.optimizer.remove_hook('h1')
self.optimizer.call_hooks()
self.assertFalse(h1.called)
def test_duplicated_hook(self):
self.optimizer.setup(self.target)
self.optimizer.add_hook(lambda s: None, 'h1', timing='pre')
with self.assertRaises(KeyError):
self.optimizer.add_hook(lambda s: None, 'h1', timing='pre')
def test_invalid_hook(self):
with self.assertRaises(TypeError):
self.optimizer.add_hook(1)
def test_add_hook_before_setup(self):
with self.assertRaises(RuntimeError):
self.optimizer.add_hook(lambda s: None, 'h1')
class SimpleLink(chainer.Link):
def __init__(self, w, g):
super(SimpleLink, self).__init__()
with self.init_scope():
self.param = chainer.Parameter(w)
self.param.grad = g
class TestGradientMethod(unittest.TestCase):
def setUp(self):
self.optimizer = chainer.GradientMethod()
self.target = chainer.ChainList(
SimpleLink(np.arange(3).astype(np.float32),
np.arange(3).astype(np.float32)),
SimpleLink(np.arange(3).astype(np.float32),
np.arange(3).astype(np.float32)))
self.optimizer.create_update_rule = mock.MagicMock
def setup_cpu(self):
self.optimizer.setup(self.target)
def setup_gpu(self, device=None):
self.target.to_gpu(device)
self.optimizer.setup(self.target)
def test_setup(self):
create_update_rule = mock.MagicMock()
self.optimizer.create_update_rule = create_update_rule
self.optimizer.setup(self.target)
self.assertEqual(create_update_rule.call_count, 2)
self.assertEqual(create_update_rule.call_args_list[0], [(), {}])
self.assertEqual(create_update_rule.call_args_list[1], [(), {}])
def check_update(self):
self.assertEqual(self.optimizer.t, 0)
self.optimizer.update()
self.assertEqual(self.optimizer.t, 1)
self.target[0].param.update_rule.update.assert_called_once_with(
self.target[0].param)
self.target[1].param.update_rule.update.assert_called_once_with(
self.target[1].param)
def test_update_cpu(self):
self.setup_cpu()
self.check_update()
@attr.gpu
def test_update_gpu(self):
self.setup_gpu()
self.check_update()
@testing.parameterize(*testing.product({
'shape': [(4, 3, 2)],
'dtype': [np.float16, np.float32, np.float64],
'loss_scale': [None, 1, 10],
}))
class TestGradientMethodLossScale(unittest.TestCase):
def setUp(self):
param0_data = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
param0_grad = np.copy(param0_data)
param1_data = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
param1_grad = np.copy(param1_data)
self.target = chainer.ChainList(
SimpleLink(param0_data, param0_grad),
SimpleLink(param1_data, param1_grad))
lr = 1.0
if self.loss_scale is not None:
lr = self.loss_scale
for i in range(2):
self.target[i].param._loss_scale = self.loss_scale
self.optimizer = chainer.optimizers.SGD(lr)
def setup_cpu(self):
self.optimizer.setup(self.target)
def setup_gpu(self, device=None):
self.target.to_gpu(device)
self.optimizer.setup(self.target)
def check_update(self):
self.optimizer.update()
xp = backend.get_array_module(self.target[0].param)
expected_data = xp.zeros(self.shape, dtype=self.dtype)
rtol, atol = 1e-4, 1e-5
if self.dtype is np.float16:
rtol, atol = 1e-1, 1e-2
for i in range(2):
testing.assert_allclose(self.target[i].param.data, expected_data,
rtol=rtol, atol=atol)
def test_update_cpu(self):
self.setup_cpu()
self.check_update()
@attr.gpu
def test_update_gpu(self):
self.setup_gpu()
self.check_update()
class TestCleargradHook(unittest.TestCase):
def setUp(self):
self.target = SimpleLink(
np.arange(6, dtype=np.float32).reshape(2, 3),
np.arange(3, -3, -1, dtype=np.float32).reshape(2, 3))
def check_cleargrad(self):
opt = optimizers.SGD(lr=1)
opt.setup(self.target)
opt.add_hook(CleargradHook(self))
opt.add_hook(DummyHook(self))
opt.update()
def test_cleargrad_cpu(self):
self.check_cleargrad()
@attr.gpu
def test_cleargrad_gpu(self):
self.target.to_gpu()
self.check_cleargrad()
class DummyOptimizer(chainer.GradientMethod):
def __init__(self, test):
super(DummyOptimizer, self).__init__()
self.test = test
def create_update_rule(self):
return mock.MagicMock()
class DummyHook(object):
name = 'Dummy'
timing = 'pre'
def __init__(self, test):
self.test = test
def __call__(self, opt):
for param in opt.target.params():
# Confirm all grads are not None
self.test.assertIsNotNone(param.grad)
class CleargradHook(object):
name = 'Cleargrad'
timing = 'pre'
def __init__(self, _):
pass
def __call__(self, opt):
for param in opt.target.params():
# Clear all grads
param.cleargrad()
class TestGradientMethodClearGrads(unittest.TestCase):
def setUp(self):
self.optimizer = DummyOptimizer(self)
self.target = SimpleLink(
np.arange(3).astype(np.float32),
np.arange(3).astype(np.float32))
self.optimizer.setup(self.target)
self.optimizer.add_hook(DummyHook(self))
def test_update(self):
self.target.cleargrads()
self.optimizer.update()
class TestDeprecatedOptimizerHooksEmitsWarning(unittest.TestCase):
def setUp(self):
self.context = warnings.catch_warnings(record=True)
self.warnings = self.context.__enter__()
warnings.filterwarnings(action='always', category=DeprecationWarning)
def tearDown(self):
self.context.__exit__()
def test_gradient_clipping(self):
chainer.optimizer.GradientClipping(1.)
self.assertEqual(len(self.warnings), 1)
self.assertIs(self.warnings[-1].category, DeprecationWarning)
def test_gradient_hard_clipping(self):
chainer.optimizer.GradientHardClipping(1., 2.)
self.assertEqual(len(self.warnings), 1)
self.assertIs(self.warnings[-1].category, DeprecationWarning)
def test_gradient_noise(self):
chainer.optimizer.GradientNoise(1.)
self.assertEqual(len(self.warnings), 1)
self.assertIs(self.warnings[-1].category, DeprecationWarning)
def test_lasso(self):
chainer.optimizer.Lasso(1.)
self.assertEqual(len(self.warnings), 1)
self.assertIs(self.warnings[-1].category, DeprecationWarning)
def test_weight_decay(self):
chainer.optimizer.WeightDecay(1.)
self.assertEqual(len(self.warnings), 1)
self.assertIs(self.warnings[-1].category, DeprecationWarning)
testing.run_module(__name__, __file__)
| 32.244353 | 79 | 0.650513 | import copy
import unittest
import warnings
import mock
import numpy as np
import chainer
from chainer import backend
from chainer.backends import cuda
from chainer import optimizer
from chainer import optimizers
from chainer import testing
from chainer.testing import attr
class TestHyperparameter(unittest.TestCase):
def setUp(self):
self.parent = optimizer.Hyperparameter()
self.parent.x = 1
self.parent.y = 2
self.child = optimizer.Hyperparameter(self.parent)
self.child.y = 3
self.child.z = 4
def test_getattr(self):
self.assertTrue(hasattr(self.parent, 'x'))
self.assertEqual(self.parent.x, 1)
self.assertTrue(hasattr(self.parent, 'y'))
self.assertEqual(self.parent.y, 2)
self.assertFalse(hasattr(self.parent, 'z'))
self.assertTrue(hasattr(self.child, 'x'))
self.assertEqual(self.child.x, 1)
self.assertTrue(hasattr(self.child, 'y'))
self.assertEqual(self.child.y, 3)
self.assertTrue(hasattr(self.child, 'z'))
self.assertEqual(self.child.z, 4)
def test_get_dict(self):
self.assertEqual(self.parent.get_dict(), {'x': 1, 'y': 2})
self.assertEqual(self.child.get_dict(), {'x': 1, 'y': 3, 'z': 4})
def test_repr(self):
self.assertEqual(repr(self.parent), 'Hyperparameter(x=1, y=2)')
self.assertEqual(repr(self.child), 'Hyperparameter(x=1, y=3, z=4)')
def test_deep_copy(self):
parent_copy, child_copy = copy.deepcopy([self.parent, self.child])
self.assertEqual(self.child.get_dict(), child_copy.get_dict())
self.assertEqual(self.parent.get_dict(), parent_copy.get_dict())
self.assertIs(child_copy.parent, parent_copy)
class TestUpdateRule(unittest.TestCase):
def setUp(self):
self.data = np.ones((2, 3), np.float32)
self.grad = np.ones_like(self.data)
self.var = chainer.Variable(self.data, grad=self.grad)
self.update_rule = optimizer.UpdateRule()
self.update_rule.update_core_cpu = mock.MagicMock()
self.update_rule.update_core_gpu = mock.MagicMock()
def test_update_cpu(self):
self.update_rule.update(self.var)
self.assertEqual(self.update_rule.update_core_cpu.call_count, 1)
self.assertEqual(self.update_rule.update_core_gpu.call_count, 0)
@attr.gpu
def test_update_gpu(self):
self.var.to_gpu()
self.update_rule.update(self.var)
self.assertEqual(self.update_rule.update_core_cpu.call_count, 0)
self.assertEqual(self.update_rule.update_core_gpu.call_count, 1)
def check_add_hook(self, hook):
self.update_rule.update(self.var)
self.assertEqual(hook.call_count, 1)
args = hook.call_args_list[0][0]
self.assertEqual(len(args), 2)
self.assertIs(args[0], self.update_rule)
self.assertIs(args[1], self.var)
def test_add_hook(self):
hook = mock.MagicMock()
self.update_rule.add_hook(hook)
self.check_add_hook(hook)
def test_add_hook_with_name(self):
hook = mock.MagicMock()
self.update_rule.add_hook(hook, name='hook')
self.check_add_hook(hook)
def test_remove_hook(self):
hook = mock.MagicMock()
self.update_rule.add_hook(hook, name='hook')
self.update_rule.remove_hook('hook')
self.update_rule.update(self.var)
self.assertEqual(hook.call_count, 0)
def test_add_hook_with_function_name(self):
hook_body = mock.MagicMock()
def foo(update_rule, data, grad):
hook_body(update_rule, data, grad)
self.update_rule.add_hook(foo)
self.update_rule.remove_hook('foo')
self.update_rule.update(self.var)
self.assertEqual(hook_body.call_count, 0)
def test_add_hook_no_name(self):
class CallableWithoutName(object):
def __call__(self, update_rule, param):
pass
with self.assertRaises(ValueError):
self.update_rule.add_hook(CallableWithoutName())
def test_add_hook_duplicated_name(self):
self.update_rule.add_hook(mock.MagicMock(), name='foo')
with self.assertRaises(ValueError):
self.update_rule.add_hook(mock.MagicMock(), name='foo')
def test_remove_hook_not_exist(self):
with self.assertRaises(KeyError):
self.update_rule.remove_hook('foo')
def test_disabled_update_rule(self):
self.update_rule.update_core = mock.MagicMock()
self.update_rule.enabled = False
self.update_rule.update(self.var)
self.assertEqual(self.update_rule.update_core.call_count, 0)
self.update_rule.enabled = True
self.update_rule.update(self.var)
self.assertEqual(self.update_rule.update_core.call_count, 1)
def setup_state(self):
def init_state(data):
state = self.update_rule.state
state['a'] = 0
state['b'] = np.array([1, 2, 3], dtype=np.float32)
self.update_rule.init_state = init_state
@attr.gpu
def test_state_copy_to_gpu(self):
self.setup_state()
def update_core(param):
self.assertIsInstance(self.update_rule.state['a'], int)
self.assertIsInstance(self.update_rule.state['b'], cuda.ndarray)
self.update_rule.update_core = update_core
self.var.to_gpu()
self.update_rule.update(self.var)
@attr.multi_gpu(2)
def test_state_copy_to_another_gpu(self):
self.setup_state()
def update_core(param):
self.assertIsInstance(self.update_rule.state['b'], cuda.ndarray)
self.assertEqual(self.update_rule.state['b'].device.id, 1)
self.update_rule.update_core = lambda param: None
self.update_rule.update(chainer.Variable(
cuda.to_gpu(self.data, 0), grad=cuda.to_gpu(self.grad, 0)))
self.update_rule.update_core = update_core
self.update_rule.update(chainer.Variable(
cuda.to_gpu(self.data, 1), grad=cuda.to_gpu(self.grad, 1)))
@attr.gpu
def test_state_copy_to_cpu(self):
self.setup_state()
def update_core(param):
self.assertIsInstance(self.update_rule.state['a'], int)
self.assertIsInstance(self.update_rule.state['b'], np.ndarray)
self.var.to_gpu()
self.update_rule.update(self.var)
self.var.to_cpu()
self.update_rule.update_core = update_core
self.update_rule.update(self.var)
class TestOptimizer(unittest.TestCase):
def setUp(self):
self.optimizer = optimizer.Optimizer()
def test_new_epoch(self):
self.optimizer.new_epoch()
self.assertEqual(1, self.optimizer.epoch)
def test_invalid_new_epoch(self):
self.optimizer.use_auto_new_epoch = True
with self.assertRaises(RuntimeError):
self.optimizer.new_epoch()
def test_auto_new_epoch(self):
self.optimizer.use_auto_new_epoch = True
self.optimizer.new_epoch(auto=True)
self.assertEqual(1, self.optimizer.epoch)
def test_invalid_auto_new_epoch(self):
with self.assertRaises(RuntimeError):
self.optimizer.new_epoch(auto=True)
class TestOptimizerHook(unittest.TestCase):
def setUp(self):
self.optimizer = optimizer.Optimizer()
self.target = SimpleLink(
np.arange(6, dtype=np.float32).reshape(2, 3),
np.arange(3, -3, -1, dtype=np.float32).reshape(2, 3))
def test_add_hook(self):
h1 = mock.MagicMock(timing='pre')
h1.call_for_each_param = False
self.optimizer.setup(self.target)
self.optimizer.add_hook(h1, 'h1')
self.optimizer.call_hooks()
h1.assert_called_with(self.optimizer)
def test_add_hook_call_for_each_param(self):
h1 = mock.MagicMock(timing='pre')
h1.call_for_each_param = True
self.optimizer.setup(self.target)
self.optimizer.add_hook(h1, 'h1')
self.optimizer.call_hooks()
h1.assert_called_with(self.target.param.update_rule, self.target.param)
def test_remove_hook(self):
h1 = mock.MagicMock(timing='pre')
self.optimizer.setup(self.target)
self.optimizer.add_hook(h1, 'h1')
self.optimizer.remove_hook('h1')
self.optimizer.call_hooks()
self.assertFalse(h1.called)
def test_duplicated_hook(self):
self.optimizer.setup(self.target)
self.optimizer.add_hook(lambda s: None, 'h1', timing='pre')
with self.assertRaises(KeyError):
self.optimizer.add_hook(lambda s: None, 'h1', timing='pre')
def test_invalid_hook(self):
with self.assertRaises(TypeError):
self.optimizer.add_hook(1)
def test_add_hook_before_setup(self):
with self.assertRaises(RuntimeError):
self.optimizer.add_hook(lambda s: None, 'h1')
class SimpleLink(chainer.Link):
def __init__(self, w, g):
super(SimpleLink, self).__init__()
with self.init_scope():
self.param = chainer.Parameter(w)
self.param.grad = g
class TestGradientMethod(unittest.TestCase):
def setUp(self):
self.optimizer = chainer.GradientMethod()
self.target = chainer.ChainList(
SimpleLink(np.arange(3).astype(np.float32),
np.arange(3).astype(np.float32)),
SimpleLink(np.arange(3).astype(np.float32),
np.arange(3).astype(np.float32)))
self.optimizer.create_update_rule = mock.MagicMock
def setup_cpu(self):
self.optimizer.setup(self.target)
def setup_gpu(self, device=None):
self.target.to_gpu(device)
self.optimizer.setup(self.target)
def test_setup(self):
create_update_rule = mock.MagicMock()
self.optimizer.create_update_rule = create_update_rule
self.optimizer.setup(self.target)
self.assertEqual(create_update_rule.call_count, 2)
self.assertEqual(create_update_rule.call_args_list[0], [(), {}])
self.assertEqual(create_update_rule.call_args_list[1], [(), {}])
def check_update(self):
self.assertEqual(self.optimizer.t, 0)
self.optimizer.update()
self.assertEqual(self.optimizer.t, 1)
self.target[0].param.update_rule.update.assert_called_once_with(
self.target[0].param)
self.target[1].param.update_rule.update.assert_called_once_with(
self.target[1].param)
def test_update_cpu(self):
self.setup_cpu()
self.check_update()
@attr.gpu
def test_update_gpu(self):
self.setup_gpu()
self.check_update()
@testing.parameterize(*testing.product({
'shape': [(4, 3, 2)],
'dtype': [np.float16, np.float32, np.float64],
'loss_scale': [None, 1, 10],
}))
class TestGradientMethodLossScale(unittest.TestCase):
def setUp(self):
param0_data = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
param0_grad = np.copy(param0_data)
param1_data = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
param1_grad = np.copy(param1_data)
self.target = chainer.ChainList(
SimpleLink(param0_data, param0_grad),
SimpleLink(param1_data, param1_grad))
lr = 1.0
if self.loss_scale is not None:
lr = self.loss_scale
for i in range(2):
self.target[i].param._loss_scale = self.loss_scale
self.optimizer = chainer.optimizers.SGD(lr)
def setup_cpu(self):
self.optimizer.setup(self.target)
def setup_gpu(self, device=None):
self.target.to_gpu(device)
self.optimizer.setup(self.target)
def check_update(self):
self.optimizer.update()
xp = backend.get_array_module(self.target[0].param)
expected_data = xp.zeros(self.shape, dtype=self.dtype)
rtol, atol = 1e-4, 1e-5
if self.dtype is np.float16:
rtol, atol = 1e-1, 1e-2
for i in range(2):
testing.assert_allclose(self.target[i].param.data, expected_data,
rtol=rtol, atol=atol)
def test_update_cpu(self):
self.setup_cpu()
self.check_update()
@attr.gpu
def test_update_gpu(self):
self.setup_gpu()
self.check_update()
class TestCleargradHook(unittest.TestCase):
def setUp(self):
self.target = SimpleLink(
np.arange(6, dtype=np.float32).reshape(2, 3),
np.arange(3, -3, -1, dtype=np.float32).reshape(2, 3))
def check_cleargrad(self):
opt = optimizers.SGD(lr=1)
opt.setup(self.target)
opt.add_hook(CleargradHook(self))
opt.add_hook(DummyHook(self))
opt.update()
def test_cleargrad_cpu(self):
self.check_cleargrad()
@attr.gpu
def test_cleargrad_gpu(self):
self.target.to_gpu()
self.check_cleargrad()
class DummyOptimizer(chainer.GradientMethod):
def __init__(self, test):
super(DummyOptimizer, self).__init__()
self.test = test
def create_update_rule(self):
return mock.MagicMock()
class DummyHook(object):
name = 'Dummy'
timing = 'pre'
def __init__(self, test):
self.test = test
def __call__(self, opt):
for param in opt.target.params():
self.test.assertIsNotNone(param.grad)
class CleargradHook(object):
name = 'Cleargrad'
timing = 'pre'
def __init__(self, _):
pass
def __call__(self, opt):
for param in opt.target.params():
param.cleargrad()
class TestGradientMethodClearGrads(unittest.TestCase):
def setUp(self):
self.optimizer = DummyOptimizer(self)
self.target = SimpleLink(
np.arange(3).astype(np.float32),
np.arange(3).astype(np.float32))
self.optimizer.setup(self.target)
self.optimizer.add_hook(DummyHook(self))
def test_update(self):
self.target.cleargrads()
self.optimizer.update()
class TestDeprecatedOptimizerHooksEmitsWarning(unittest.TestCase):
def setUp(self):
self.context = warnings.catch_warnings(record=True)
self.warnings = self.context.__enter__()
warnings.filterwarnings(action='always', category=DeprecationWarning)
def tearDown(self):
self.context.__exit__()
def test_gradient_clipping(self):
chainer.optimizer.GradientClipping(1.)
self.assertEqual(len(self.warnings), 1)
self.assertIs(self.warnings[-1].category, DeprecationWarning)
def test_gradient_hard_clipping(self):
chainer.optimizer.GradientHardClipping(1., 2.)
self.assertEqual(len(self.warnings), 1)
self.assertIs(self.warnings[-1].category, DeprecationWarning)
def test_gradient_noise(self):
chainer.optimizer.GradientNoise(1.)
self.assertEqual(len(self.warnings), 1)
self.assertIs(self.warnings[-1].category, DeprecationWarning)
def test_lasso(self):
chainer.optimizer.Lasso(1.)
self.assertEqual(len(self.warnings), 1)
self.assertIs(self.warnings[-1].category, DeprecationWarning)
def test_weight_decay(self):
chainer.optimizer.WeightDecay(1.)
self.assertEqual(len(self.warnings), 1)
self.assertIs(self.warnings[-1].category, DeprecationWarning)
testing.run_module(__name__, __file__)
| true | true |
1c4a24cc773c14d0bd1e71136945beaa0f74f406 | 880 | py | Python | algorithms/sorting/bubble_sort.py | FatiahBalo/python-ds | 9eb88425822b6da4d7bd673a124c13fbe6f17523 | [
"MIT"
] | 1,723 | 2019-07-30T07:06:22.000Z | 2022-03-31T15:22:22.000Z | algorithms/sorting/bubble_sort.py | FatiahBalo/python-ds | 9eb88425822b6da4d7bd673a124c13fbe6f17523 | [
"MIT"
] | 213 | 2019-10-06T08:07:47.000Z | 2021-10-04T15:38:36.000Z | algorithms/sorting/bubble_sort.py | FatiahBalo/python-ds | 9eb88425822b6da4d7bd673a124c13fbe6f17523 | [
"MIT"
] | 628 | 2019-10-06T10:26:25.000Z | 2022-03-31T01:41:00.000Z | """
Bubble Sort worst time complexity occurs when array is reverse sorted - O(n^2)
Best time scenario is when array is already sorted - O(n)
"""
def bubble_sort(array):
n = len(array)
for i in range(n):
for j in range(0, n-i-1):
if array[j] > array[j+1]:
array[j], array[j+1] = array[j+1], array[j]
return array
def bubble_sort_optimized(array):
"""
Optimizes on bubble sort by taking care of already swapped cases
Reference - https://github.com/prabhupant/python-ds/pull/346
"""
has_swapped = True
num_of_iterations = 0
while has_swapped:
has_swapped = False
for i in range(len(array) - num_of_iterations - 1):
if array[i] > array[i + 1]:
array[i], array[i + 1] = array[i + 1], array[i]
has_swapped = True
num_of_iterations += 1
| 28.387097 | 78 | 0.590909 |
def bubble_sort(array):
n = len(array)
for i in range(n):
for j in range(0, n-i-1):
if array[j] > array[j+1]:
array[j], array[j+1] = array[j+1], array[j]
return array
def bubble_sort_optimized(array):
has_swapped = True
num_of_iterations = 0
while has_swapped:
has_swapped = False
for i in range(len(array) - num_of_iterations - 1):
if array[i] > array[i + 1]:
array[i], array[i + 1] = array[i + 1], array[i]
has_swapped = True
num_of_iterations += 1
| true | true |
1c4a25bdb5c01fe4151a35fa9ee19f84f4205240 | 59,338 | py | Python | sdks/python/apache_beam/runners/common.py | rehmanmuradali/beam | de8ff705145cbbc41bea7750a0a5d3553924ab3a | [
"Apache-2.0"
] | 1 | 2022-01-24T22:07:52.000Z | 2022-01-24T22:07:52.000Z | sdks/python/apache_beam/runners/common.py | rehmanmuradali/beam | de8ff705145cbbc41bea7750a0a5d3553924ab3a | [
"Apache-2.0"
] | null | null | null | sdks/python/apache_beam/runners/common.py | rehmanmuradali/beam | de8ff705145cbbc41bea7750a0a5d3553924ab3a | [
"Apache-2.0"
] | 1 | 2019-05-21T11:30:31.000Z | 2019-05-21T11:30:31.000Z | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# cython: profile=True
"""Worker operations executor.
For internal use only; no backwards-compatibility guarantees.
"""
# pytype: skip-file
from __future__ import absolute_import
from __future__ import division
import threading
import traceback
from builtins import next
from builtins import object
from builtins import round
from builtins import zip
from typing import TYPE_CHECKING
from typing import Any
from typing import Dict
from typing import Iterable
from typing import List
from typing import Mapping
from typing import Optional
from typing import Tuple
from future.utils import raise_with_traceback
from past.builtins import unicode
from apache_beam.coders import TupleCoder
from apache_beam.internal import util
from apache_beam.options.value_provider import RuntimeValueProvider
from apache_beam.pvalue import TaggedOutput
from apache_beam.runners.sdf_utils import NoOpWatermarkEstimatorProvider
from apache_beam.runners.sdf_utils import RestrictionTrackerView
from apache_beam.runners.sdf_utils import SplitResultPrimary
from apache_beam.runners.sdf_utils import SplitResultResidual
from apache_beam.runners.sdf_utils import ThreadsafeRestrictionTracker
from apache_beam.runners.sdf_utils import ThreadsafeWatermarkEstimator
from apache_beam.transforms import DoFn
from apache_beam.transforms import core
from apache_beam.transforms import userstate
from apache_beam.transforms.core import RestrictionProvider
from apache_beam.transforms.core import WatermarkEstimatorProvider
from apache_beam.transforms.window import GlobalWindow
from apache_beam.transforms.window import TimestampedValue
from apache_beam.transforms.window import WindowFn
from apache_beam.utils.counters import Counter
from apache_beam.utils.counters import CounterName
from apache_beam.utils.timestamp import Timestamp
from apache_beam.utils.windowed_value import WindowedValue
if TYPE_CHECKING:
from apache_beam.transforms import sideinputs
from apache_beam.transforms.core import TimerSpec
from apache_beam.io.iobase import RestrictionProgress
from apache_beam.iobase import RestrictionTracker
from apache_beam.iobase import WatermarkEstimator
class NameContext(object):
"""Holds the name information for a step."""
def __init__(self, step_name, transform_id=None):
# type: (str, Optional[str]) -> None
"""Creates a new step NameContext.
Args:
step_name: The name of the step.
"""
self.step_name = step_name
self.transform_id = transform_id
def __eq__(self, other):
return self.step_name == other.step_name
def __ne__(self, other):
# TODO(BEAM-5949): Needed for Python 2 compatibility.
return not self == other
def __repr__(self):
return 'NameContext(%s)' % self.__dict__
def __hash__(self):
return hash(self.step_name)
def metrics_name(self):
"""Returns the step name used for metrics reporting."""
return self.step_name
def logging_name(self):
"""Returns the step name used for logging."""
return self.step_name
# TODO(BEAM-4028): Move DataflowNameContext to Dataflow internal code.
class DataflowNameContext(NameContext):
"""Holds the name information for a step in Dataflow.
This includes a step_name (e.g. s2), a user_name (e.g. Foo/Bar/ParDo(Fab)),
and a system_name (e.g. s2-shuffle-read34)."""
def __init__(self, step_name, user_name, system_name):
"""Creates a new step NameContext.
Args:
step_name: The internal name of the step (e.g. s2).
user_name: The full user-given name of the step (e.g. Foo/Bar/ParDo(Far)).
system_name: The step name in the optimized graph (e.g. s2-1).
"""
super(DataflowNameContext, self).__init__(step_name)
self.user_name = user_name
self.system_name = system_name
def __eq__(self, other):
return (
self.step_name == other.step_name and
self.user_name == other.user_name and
self.system_name == other.system_name)
def __ne__(self, other):
# TODO(BEAM-5949): Needed for Python 2 compatibility.
return not self == other
def __hash__(self):
return hash((self.step_name, self.user_name, self.system_name))
def __repr__(self):
return 'DataflowNameContext(%s)' % self.__dict__
def logging_name(self):
"""Stackdriver logging relies on user-given step names (e.g. Foo/Bar)."""
return self.user_name
class Receiver(object):
"""For internal use only; no backwards-compatibility guarantees.
An object that consumes a WindowedValue.
This class can be efficiently used to pass values between the
sdk and worker harnesses.
"""
def receive(self, windowed_value):
# type: (WindowedValue) -> None
raise NotImplementedError
class MethodWrapper(object):
"""For internal use only; no backwards-compatibility guarantees.
Represents a method that can be invoked by `DoFnInvoker`."""
def __init__(self, obj_to_invoke, method_name):
"""
Initiates a ``MethodWrapper``.
Args:
obj_to_invoke: the object that contains the method. Has to either be a
`DoFn` object or a `RestrictionProvider` object.
method_name: name of the method as a string.
"""
if not isinstance(obj_to_invoke,
(DoFn, RestrictionProvider, WatermarkEstimatorProvider)):
raise ValueError(
'\'obj_to_invoke\' has to be either a \'DoFn\' or '
'a \'RestrictionProvider\'. Received %r instead.' % obj_to_invoke)
self.args, self.defaults = core.get_function_arguments(obj_to_invoke,
method_name)
# TODO(BEAM-5878) support kwonlyargs on Python 3.
self.method_value = getattr(obj_to_invoke, method_name)
self.has_userstate_arguments = False
self.state_args_to_replace = {} # type: Dict[str, core.StateSpec]
self.timer_args_to_replace = {} # type: Dict[str, core.TimerSpec]
self.timestamp_arg_name = None # type: Optional[str]
self.window_arg_name = None # type: Optional[str]
self.key_arg_name = None # type: Optional[str]
self.restriction_provider = None
self.restriction_provider_arg_name = None
self.watermark_estimator_provider = None
self.watermark_estimator_provider_arg_name = None
if hasattr(self.method_value, 'unbounded_per_element'):
self.unbounded_per_element = True
else:
self.unbounded_per_element = False
for kw, v in zip(self.args[-len(self.defaults):], self.defaults):
if isinstance(v, core.DoFn.StateParam):
self.state_args_to_replace[kw] = v.state_spec
self.has_userstate_arguments = True
elif isinstance(v, core.DoFn.TimerParam):
self.timer_args_to_replace[kw] = v.timer_spec
self.has_userstate_arguments = True
elif core.DoFn.TimestampParam == v:
self.timestamp_arg_name = kw
elif core.DoFn.WindowParam == v:
self.window_arg_name = kw
elif core.DoFn.KeyParam == v:
self.key_arg_name = kw
elif isinstance(v, core.DoFn.RestrictionParam):
self.restriction_provider = v.restriction_provider
self.restriction_provider_arg_name = kw
elif isinstance(v, core.DoFn.WatermarkEstimatorParam):
self.watermark_estimator_provider = v.watermark_estimator_provider
self.watermark_estimator_provider_arg_name = kw
# Create NoOpWatermarkEstimatorProvider if there is no
# WatermarkEstimatorParam provided.
if self.watermark_estimator_provider is None:
self.watermark_estimator_provider = NoOpWatermarkEstimatorProvider()
def invoke_timer_callback(
self, user_state_context, key, window, timestamp, pane_info):
# TODO(ccy): support side inputs.
kwargs = {}
if self.has_userstate_arguments:
for kw, state_spec in self.state_args_to_replace.items():
kwargs[kw] = user_state_context.get_state(state_spec, key, window)
for kw, timer_spec in self.timer_args_to_replace.items():
kwargs[kw] = user_state_context.get_timer(
timer_spec, key, window, timestamp, pane_info)
if self.timestamp_arg_name:
kwargs[self.timestamp_arg_name] = Timestamp.of(timestamp)
if self.window_arg_name:
kwargs[self.window_arg_name] = window
if self.key_arg_name:
kwargs[self.key_arg_name] = key
if kwargs:
return self.method_value(**kwargs)
else:
return self.method_value()
class DoFnSignature(object):
"""Represents the signature of a given ``DoFn`` object.
Signature of a ``DoFn`` provides a view of the properties of a given ``DoFn``.
Among other things, this will give an extensible way for for (1) accessing the
structure of the ``DoFn`` including methods and method parameters
(2) identifying features that a given ``DoFn`` support, for example, whether
a given ``DoFn`` is a Splittable ``DoFn`` (
https://s.apache.org/splittable-do-fn) (3) validating a ``DoFn`` based on the
feature set offered by it.
"""
def __init__(self, do_fn):
# type: (core.DoFn) -> None
# We add a property here for all methods defined by Beam DoFn features.
assert isinstance(do_fn, core.DoFn)
self.do_fn = do_fn
self.process_method = MethodWrapper(do_fn, 'process')
self.start_bundle_method = MethodWrapper(do_fn, 'start_bundle')
self.finish_bundle_method = MethodWrapper(do_fn, 'finish_bundle')
self.setup_lifecycle_method = MethodWrapper(do_fn, 'setup')
self.teardown_lifecycle_method = MethodWrapper(do_fn, 'teardown')
restriction_provider = self.get_restriction_provider()
watermark_estimator_provider = self.get_watermark_estimator_provider()
self.create_watermark_estimator_method = (
MethodWrapper(
watermark_estimator_provider, 'create_watermark_estimator'))
self.initial_restriction_method = (
MethodWrapper(restriction_provider, 'initial_restriction')
if restriction_provider else None)
self.create_tracker_method = (
MethodWrapper(restriction_provider, 'create_tracker')
if restriction_provider else None)
self.split_method = (
MethodWrapper(restriction_provider, 'split')
if restriction_provider else None)
self._validate()
# Handle stateful DoFns.
self._is_stateful_dofn = userstate.is_stateful_dofn(do_fn)
self.timer_methods = {} # type: Dict[TimerSpec, MethodWrapper]
if self._is_stateful_dofn:
# Populate timer firing methods, keyed by TimerSpec.
_, all_timer_specs = userstate.get_dofn_specs(do_fn)
for timer_spec in all_timer_specs:
method = timer_spec._attached_callback
self.timer_methods[timer_spec] = MethodWrapper(do_fn, method.__name__)
def get_restriction_provider(self):
# type: () -> RestrictionProvider
return self.process_method.restriction_provider
def get_watermark_estimator_provider(self):
# type: () -> WatermarkEstimatorProvider
return self.process_method.watermark_estimator_provider
def is_unbounded_per_element(self):
return self.process_method.unbounded_per_element
def _validate(self):
# type: () -> None
self._validate_process()
self._validate_bundle_method(self.start_bundle_method)
self._validate_bundle_method(self.finish_bundle_method)
self._validate_stateful_dofn()
def _validate_process(self):
# type: () -> None
"""Validate that none of the DoFnParameters are repeated in the function
"""
param_ids = [
d.param_id for d in self.process_method.defaults
if isinstance(d, core._DoFnParam)
]
if len(param_ids) != len(set(param_ids)):
raise ValueError(
'DoFn %r has duplicate process method parameters: %s.' %
(self.do_fn, param_ids))
def _validate_bundle_method(self, method_wrapper):
"""Validate that none of the DoFnParameters are used in the function
"""
for param in core.DoFn.DoFnProcessParams:
if param in method_wrapper.defaults:
raise ValueError(
'DoFn.process() method-only parameter %s cannot be used in %s.' %
(param, method_wrapper))
def _validate_stateful_dofn(self):
# type: () -> None
userstate.validate_stateful_dofn(self.do_fn)
def is_splittable_dofn(self):
# type: () -> bool
return self.get_restriction_provider() is not None
def get_restriction_coder(self):
# type: () -> Optional[TupleCoder]
"""Get coder for a restriction when processing an SDF. """
if self.is_splittable_dofn():
return TupleCoder([
(self.get_restriction_provider().restriction_coder()),
(self.get_watermark_estimator_provider().estimator_state_coder())
])
else:
return None
def is_stateful_dofn(self):
# type: () -> bool
return self._is_stateful_dofn
def has_timers(self):
# type: () -> bool
_, all_timer_specs = userstate.get_dofn_specs(self.do_fn)
return bool(all_timer_specs)
def has_bundle_finalization(self):
for sig in (self.start_bundle_method,
self.process_method,
self.finish_bundle_method):
for d in sig.defaults:
try:
if d == DoFn.BundleFinalizerParam:
return True
except Exception: # pylint: disable=broad-except
# Default value might be incomparable.
pass
return False
class DoFnInvoker(object):
"""An abstraction that can be used to execute DoFn methods.
A DoFnInvoker describes a particular way for invoking methods of a DoFn
represented by a given DoFnSignature."""
def __init__(self,
output_processor, # type: OutputProcessor
signature # type: DoFnSignature
):
# type: (...) -> None
"""
Initializes `DoFnInvoker`
:param output_processor: an OutputProcessor for receiving elements produced
by invoking functions of the DoFn.
:param signature: a DoFnSignature for the DoFn being invoked
"""
self.output_processor = output_processor
self.signature = signature
self.user_state_context = None # type: Optional[userstate.UserStateContext]
self.bundle_finalizer_param = None # type: Optional[core._BundleFinalizerParam]
@staticmethod
def create_invoker(
signature, # type: DoFnSignature
output_processor, # type: _OutputProcessor
context=None, # type: Optional[DoFnContext]
side_inputs=None, # type: Optional[List[sideinputs.SideInputMap]]
input_args=None, input_kwargs=None,
process_invocation=True,
user_state_context=None, # type: Optional[userstate.UserStateContext]
bundle_finalizer_param=None # type: Optional[core._BundleFinalizerParam]
):
# type: (...) -> DoFnInvoker
""" Creates a new DoFnInvoker based on given arguments.
Args:
output_processor: an OutputProcessor for receiving elements produced by
invoking functions of the DoFn.
signature: a DoFnSignature for the DoFn being invoked.
context: Context to be used when invoking the DoFn (deprecated).
side_inputs: side inputs to be used when invoking th process method.
input_args: arguments to be used when invoking the process method. Some
of the arguments given here might be placeholders (for
example for side inputs) that get filled before invoking the
process method.
input_kwargs: keyword arguments to be used when invoking the process
method. Some of the keyword arguments given here might be
placeholders (for example for side inputs) that get filled
before invoking the process method.
process_invocation: If True, this function may return an invoker that
performs extra optimizations for invoking process()
method efficiently.
user_state_context: The UserStateContext instance for the current
Stateful DoFn.
bundle_finalizer_param: The param that passed to a process method, which
allows a callback to be registered.
"""
side_inputs = side_inputs or []
default_arg_values = signature.process_method.defaults
use_simple_invoker = not process_invocation or (
not side_inputs and not input_args and not input_kwargs and
not default_arg_values and not signature.is_stateful_dofn())
if use_simple_invoker:
return SimpleInvoker(output_processor, signature)
else:
if context is None:
raise TypeError("Must provide context when not using SimpleInvoker")
return PerWindowInvoker(
output_processor,
signature,
context,
side_inputs,
input_args,
input_kwargs,
user_state_context,
bundle_finalizer_param)
def invoke_process(self,
windowed_value, # type: WindowedValue
restriction=None,
watermark_estimator_state=None,
additional_args=None,
additional_kwargs=None
):
# type: (...) -> Iterable[SplitResultResidual]
"""Invokes the DoFn.process() function.
Args:
windowed_value: a WindowedValue object that gives the element for which
process() method should be invoked along with the window
the element belongs to.
restriction: The restriction to use when executing this splittable DoFn.
Should only be specified for splittable DoFns.
watermark_estimator_state: The watermark estimator state to use when
executing this splittable DoFn. Should only
be specified for splittable DoFns.
additional_args: additional arguments to be passed to the current
`DoFn.process()` invocation, usually as side inputs.
additional_kwargs: additional keyword arguments to be passed to the
current `DoFn.process()` invocation.
"""
raise NotImplementedError
def invoke_setup(self):
# type: () -> None
"""Invokes the DoFn.setup() method
"""
self.signature.setup_lifecycle_method.method_value()
def invoke_start_bundle(self):
# type: () -> None
"""Invokes the DoFn.start_bundle() method.
"""
self.output_processor.start_bundle_outputs(
self.signature.start_bundle_method.method_value())
def invoke_finish_bundle(self):
# type: () -> None
"""Invokes the DoFn.finish_bundle() method.
"""
self.output_processor.finish_bundle_outputs(
self.signature.finish_bundle_method.method_value())
def invoke_teardown(self):
# type: () -> None
"""Invokes the DoFn.teardown() method
"""
self.signature.teardown_lifecycle_method.method_value()
def invoke_user_timer(self, timer_spec, key, window, timestamp, pane_info):
# self.output_processor is Optional, but in practice it won't be None here
self.output_processor.process_outputs(
WindowedValue(None, timestamp, (window, )),
self.signature.timer_methods[timer_spec].invoke_timer_callback(
self.user_state_context, key, window, timestamp, pane_info))
def invoke_create_watermark_estimator(self, estimator_state):
return self.signature.create_watermark_estimator_method.method_value(
estimator_state)
def invoke_split(self, element, restriction):
return self.signature.split_method.method_value(element, restriction)
def invoke_initial_restriction(self, element):
return self.signature.initial_restriction_method.method_value(element)
def invoke_create_tracker(self, restriction):
return self.signature.create_tracker_method.method_value(restriction)
class SimpleInvoker(DoFnInvoker):
"""An invoker that processes elements ignoring windowing information."""
def __init__(self,
output_processor, # type: OutputProcessor
signature # type: DoFnSignature
):
# type: (...) -> None
super(SimpleInvoker, self).__init__(output_processor, signature)
self.process_method = signature.process_method.method_value
def invoke_process(self,
windowed_value, # type: WindowedValue
restriction=None,
watermark_estimator_state=None,
additional_args=None,
additional_kwargs=None
):
# type: (...) -> None
self.output_processor.process_outputs(
windowed_value, self.process_method(windowed_value.value))
class PerWindowInvoker(DoFnInvoker):
"""An invoker that processes elements considering windowing information."""
def __init__(self,
output_processor, # type: _OutputProcessor
signature, # type: DoFnSignature
context, # type: DoFnContext
side_inputs, # type: Iterable[sideinputs.SideInputMap]
input_args,
input_kwargs,
user_state_context, # type: Optional[userstate.UserStateContext]
bundle_finalizer_param # type: Optional[core._BundleFinalizerParam]
):
super(PerWindowInvoker, self).__init__(output_processor, signature)
self.side_inputs = side_inputs
self.context = context
self.process_method = signature.process_method.method_value
default_arg_values = signature.process_method.defaults
self.has_windowed_inputs = (
not all(si.is_globally_windowed() for si in side_inputs) or
(core.DoFn.WindowParam in default_arg_values) or
signature.is_stateful_dofn())
self.user_state_context = user_state_context
self.is_splittable = signature.is_splittable_dofn()
self.threadsafe_restriction_tracker = None # type: Optional[ThreadsafeRestrictionTracker]
self.threadsafe_watermark_estimator = None # type: Optional[ThreadsafeWatermarkEstimator]
self.current_windowed_value = None # type: Optional[WindowedValue]
self.bundle_finalizer_param = bundle_finalizer_param
self.is_key_param_required = False
if self.is_splittable:
self.splitting_lock = threading.Lock()
self.current_window_index = None
self.stop_window_index = None
# Try to prepare all the arguments that can just be filled in
# without any additional work. in the process function.
# Also cache all the placeholders needed in the process function.
# Flag to cache additional arguments on the first element if all
# inputs are within the global window.
self.cache_globally_windowed_args = not self.has_windowed_inputs
input_args = input_args if input_args else []
input_kwargs = input_kwargs if input_kwargs else {}
arg_names = signature.process_method.args
# Create placeholder for element parameter of DoFn.process() method.
# Not to be confused with ArgumentPlaceHolder, which may be passed in
# input_args and is a placeholder for side-inputs.
class ArgPlaceholder(object):
def __init__(self, placeholder):
self.placeholder = placeholder
if core.DoFn.ElementParam not in default_arg_values:
# TODO(BEAM-7867): Handle cases in which len(arg_names) ==
# len(default_arg_values).
args_to_pick = len(arg_names) - len(default_arg_values) - 1
# Positional argument values for process(), with placeholders for special
# values such as the element, timestamp, etc.
args_with_placeholders = ([ArgPlaceholder(core.DoFn.ElementParam)] +
input_args[:args_to_pick])
else:
args_to_pick = len(arg_names) - len(default_arg_values)
args_with_placeholders = input_args[:args_to_pick]
# Fill the OtherPlaceholders for context, key, window or timestamp
remaining_args_iter = iter(input_args[args_to_pick:])
for a, d in zip(arg_names[-len(default_arg_values):], default_arg_values):
if core.DoFn.ElementParam == d:
args_with_placeholders.append(ArgPlaceholder(d))
elif core.DoFn.KeyParam == d:
self.is_key_param_required = True
args_with_placeholders.append(ArgPlaceholder(d))
elif core.DoFn.WindowParam == d:
args_with_placeholders.append(ArgPlaceholder(d))
elif core.DoFn.TimestampParam == d:
args_with_placeholders.append(ArgPlaceholder(d))
elif core.DoFn.PaneInfoParam == d:
args_with_placeholders.append(ArgPlaceholder(d))
elif core.DoFn.SideInputParam == d:
# If no more args are present then the value must be passed via kwarg
try:
args_with_placeholders.append(next(remaining_args_iter))
except StopIteration:
if a not in input_kwargs:
raise ValueError("Value for sideinput %s not provided" % a)
elif isinstance(d, core.DoFn.StateParam):
args_with_placeholders.append(ArgPlaceholder(d))
elif isinstance(d, core.DoFn.TimerParam):
args_with_placeholders.append(ArgPlaceholder(d))
elif isinstance(d, type) and core.DoFn.BundleFinalizerParam == d:
args_with_placeholders.append(ArgPlaceholder(d))
else:
# If no more args are present then the value must be passed via kwarg
try:
args_with_placeholders.append(next(remaining_args_iter))
except StopIteration:
pass
args_with_placeholders.extend(list(remaining_args_iter))
# Stash the list of placeholder positions for performance
self.placeholders = [(i, x.placeholder)
for (i, x) in enumerate(args_with_placeholders)
if isinstance(x, ArgPlaceholder)]
self.args_for_process = args_with_placeholders
self.kwargs_for_process = input_kwargs
def invoke_process(self,
windowed_value, # type: WindowedValue
restriction=None,
watermark_estimator_state=None,
additional_args=None,
additional_kwargs=None
):
# type: (...) -> Iterable[SplitResultResidual]
if not additional_args:
additional_args = []
if not additional_kwargs:
additional_kwargs = {}
self.context.set_element(windowed_value)
# Call for the process function for each window if has windowed side inputs
# or if the process accesses the window parameter. We can just call it once
# otherwise as none of the arguments are changing
residuals = []
if self.is_splittable:
with self.splitting_lock:
self.current_windowed_value = windowed_value
self.restriction = restriction
self.watermark_estimator_state = watermark_estimator_state
try:
if self.has_windowed_inputs and len(windowed_value.windows) > 1:
for i, w in enumerate(windowed_value.windows):
if not self._should_process_window_for_sdf(
windowed_value, additional_kwargs, i):
break
residual = self._invoke_process_per_window(
WindowedValue(
windowed_value.value, windowed_value.timestamp, (w, )),
additional_args,
additional_kwargs)
if residual:
residuals.append(residual)
else:
if self._should_process_window_for_sdf(windowed_value,
additional_kwargs):
residual = self._invoke_process_per_window(
windowed_value, additional_args, additional_kwargs)
if residual:
residuals.append(residual)
finally:
with self.splitting_lock:
self.current_windowed_value = None
self.restriction = None
self.watermark_estimator_state = None
self.current_window_index = None
self.threadsafe_restriction_tracker = None
self.threadsafe_watermark_estimator = None
elif self.has_windowed_inputs and len(windowed_value.windows) != 1:
for w in windowed_value.windows:
self._invoke_process_per_window(
WindowedValue(
windowed_value.value, windowed_value.timestamp, (w, )),
additional_args,
additional_kwargs)
else:
self._invoke_process_per_window(
windowed_value, additional_args, additional_kwargs)
return residuals
def _should_process_window_for_sdf(
self,
windowed_value, # type: WindowedValue
additional_kwargs,
window_index=None, # type: Optional[int]
):
restriction_tracker = self.invoke_create_tracker(self.restriction)
watermark_estimator = self.invoke_create_watermark_estimator(
self.watermark_estimator_state)
with self.splitting_lock:
if window_index:
self.current_window_index = window_index
if window_index == 0:
self.stop_window_index = len(windowed_value.windows)
if window_index == self.stop_window_index:
return False
self.threadsafe_restriction_tracker = ThreadsafeRestrictionTracker(
restriction_tracker)
self.threadsafe_watermark_estimator = (
ThreadsafeWatermarkEstimator(watermark_estimator))
restriction_tracker_param = (
self.signature.process_method.restriction_provider_arg_name)
if not restriction_tracker_param:
raise ValueError(
'DoFn is splittable but DoFn does not have a '
'RestrictionTrackerParam defined')
additional_kwargs[restriction_tracker_param] = (
RestrictionTrackerView(self.threadsafe_restriction_tracker))
watermark_param = (
self.signature.process_method.watermark_estimator_provider_arg_name)
# When the watermark_estimator is a NoOpWatermarkEstimator, the system
# will not add watermark_param into the DoFn param list.
if watermark_param is not None:
additional_kwargs[watermark_param] = self.threadsafe_watermark_estimator
return True
def _invoke_process_per_window(self,
windowed_value, # type: WindowedValue
additional_args,
additional_kwargs,
):
# type: (...) -> Optional[SplitResultResidual]
if self.has_windowed_inputs:
window, = windowed_value.windows
side_inputs = [si[window] for si in self.side_inputs]
side_inputs.extend(additional_args)
args_for_process, kwargs_for_process = util.insert_values_in_args(
self.args_for_process, self.kwargs_for_process,
side_inputs)
elif self.cache_globally_windowed_args:
# Attempt to cache additional args if all inputs are globally
# windowed inputs when processing the first element.
self.cache_globally_windowed_args = False
# Fill in sideInputs if they are globally windowed
global_window = GlobalWindow()
self.args_for_process, self.kwargs_for_process = (
util.insert_values_in_args(
self.args_for_process, self.kwargs_for_process,
[si[global_window] for si in self.side_inputs]))
args_for_process, kwargs_for_process = (
self.args_for_process, self.kwargs_for_process)
else:
args_for_process, kwargs_for_process = (
self.args_for_process, self.kwargs_for_process)
# Extract key in the case of a stateful DoFn. Note that in the case of a
# stateful DoFn, we set during __init__ self.has_windowed_inputs to be
# True. Therefore, windows will be exploded coming into this method, and
# we can rely on the window variable being set above.
if self.user_state_context or self.is_key_param_required:
try:
key, unused_value = windowed_value.value
except (TypeError, ValueError):
raise ValueError((
'Input value to a stateful DoFn or KeyParam must be a KV tuple; '
'instead, got \'%s\'.') % (windowed_value.value, ))
for i, p in self.placeholders:
if core.DoFn.ElementParam == p:
args_for_process[i] = windowed_value.value
elif core.DoFn.KeyParam == p:
args_for_process[i] = key
elif core.DoFn.WindowParam == p:
args_for_process[i] = window
elif core.DoFn.TimestampParam == p:
args_for_process[i] = windowed_value.timestamp
elif core.DoFn.PaneInfoParam == p:
args_for_process[i] = windowed_value.pane_info
elif isinstance(p, core.DoFn.StateParam):
assert self.user_state_context is not None
args_for_process[i] = (
self.user_state_context.get_state(p.state_spec, key, window))
elif isinstance(p, core.DoFn.TimerParam):
assert self.user_state_context is not None
args_for_process[i] = (
self.user_state_context.get_timer(
p.timer_spec,
key,
window,
windowed_value.timestamp,
windowed_value.pane_info))
elif core.DoFn.BundleFinalizerParam == p:
args_for_process[i] = self.bundle_finalizer_param
if additional_kwargs:
if kwargs_for_process is None:
kwargs_for_process = additional_kwargs
else:
for key in additional_kwargs:
kwargs_for_process[key] = additional_kwargs[key]
if kwargs_for_process:
self.output_processor.process_outputs(
windowed_value,
self.process_method(*args_for_process, **kwargs_for_process),
self.threadsafe_watermark_estimator)
else:
self.output_processor.process_outputs(
windowed_value,
self.process_method(*args_for_process),
self.threadsafe_watermark_estimator)
if self.is_splittable:
assert self.threadsafe_restriction_tracker is not None
self.threadsafe_restriction_tracker.check_done()
deferred_status = self.threadsafe_restriction_tracker.deferred_status()
if deferred_status:
deferred_restriction, deferred_timestamp = deferred_status
element = windowed_value.value
size = self.signature.get_restriction_provider().restriction_size(
element, deferred_restriction)
current_watermark = (
self.threadsafe_watermark_estimator.current_watermark())
estimator_state = (
self.threadsafe_watermark_estimator.get_estimator_state())
residual_value = ((element, (deferred_restriction, estimator_state)),
size)
return SplitResultResidual(
residual_value=windowed_value.with_value(residual_value),
current_watermark=current_watermark,
deferred_timestamp=deferred_timestamp)
return None
@staticmethod
def _try_split(fraction,
window_index, # type: Optional[int]
stop_window_index, # type: Optional[int]
windowed_value, # type: WindowedValue
restriction,
watermark_estimator_state,
restriction_provider, # type: RestrictionProvider
restriction_tracker, # type: RestrictionTracker
watermark_estimator, # type: WatermarkEstimator
):
# type: (...) -> Optional[Tuple[Iterable[SplitResultPrimary], Iterable[SplitResultResidual], Optional[int]]]
"""Try to split returning a primaries, residuals and a new stop index.
For non-window observing splittable DoFns we split the current restriction
and assign the primary and residual to all the windows.
For window observing splittable DoFns, we:
1) return a split at a window boundary if the fraction lies outside of the
current window.
2) attempt to split the current restriction, if successful then return
the primary and residual for the current window and an additional
primary and residual for any fully processed and fully unprocessed
windows.
3) fall back to returning a split at the window boundary if possible
Args:
window_index: the current index of the window being processed or None
if the splittable DoFn is not window observing.
stop_window_index: the current index to stop processing at or None
if the splittable DoFn is not window observing.
windowed_value: the current windowed value
restriction: the initial restriction when processing was started.
watermark_estimator_state: the initial watermark estimator state when
processing was started.
restriction_provider: the DoFn's restriction provider
restriction_tracker: the current restriction tracker
watermark_estimator: the current watermark estimator
Returns:
A tuple containing (primaries, residuals, new_stop_index) or None if
splitting was not possible. new_stop_index will only be set if the
splittable DoFn is window observing otherwise it will be None.
"""
def compute_whole_window_split(to_index, from_index):
restriction_size = restriction_provider.restriction_size(
windowed_value, restriction)
# The primary and residual both share the same value only differing
# by the set of windows they are in.
value = ((windowed_value.value, (restriction, watermark_estimator_state)),
restriction_size)
primary_restriction = SplitResultPrimary(
primary_value=WindowedValue(
value,
windowed_value.timestamp,
windowed_value.windows[:to_index])) if to_index > 0 else None
# Don't report any updated watermarks for the residual since they have
# not processed any part of the restriction.
residual_restriction = SplitResultResidual(
residual_value=WindowedValue(
value,
windowed_value.timestamp,
windowed_value.windows[from_index:stop_window_index]),
current_watermark=None,
deferred_timestamp=None) if from_index < stop_window_index else None
return (primary_restriction, residual_restriction)
primary_restrictions = []
residual_restrictions = []
window_observing = window_index is not None
# If we are processing each window separately and we aren't on the last
# window then compute whether the split lies within the current window
# or a future window.
if window_observing and window_index != stop_window_index - 1:
progress = restriction_tracker.current_progress()
if not progress:
# Assume no work has been completed for the current window if progress
# is unavailable.
from apache_beam.io.iobase import RestrictionProgress
progress = RestrictionProgress(completed=0, remaining=1)
scaled_progress = PerWindowInvoker._scale_progress(
progress, window_index, stop_window_index)
# Compute the fraction of the remainder relative to the scaled progress.
# If the value is greater than or equal to progress.remaining_work then we
# should split at the closest window boundary.
fraction_of_remainder = scaled_progress.remaining_work * fraction
if fraction_of_remainder >= progress.remaining_work:
# The fraction is outside of the current window and hence we will
# split at the closest window boundary. Favor a split and return the
# last window if we would have rounded up to the end of the window
# based upon the fraction.
new_stop_window_index = min(
stop_window_index - 1,
window_index + max(
1,
int(
round((
progress.completed_work +
scaled_progress.remaining_work * fraction) /
progress.total_work))))
primary, residual = compute_whole_window_split(
new_stop_window_index, new_stop_window_index)
assert primary is not None
assert residual is not None
return ([primary], [residual], new_stop_window_index)
else:
# The fraction is within the current window being processed so compute
# the updated fraction based upon the number of windows being processed.
new_stop_window_index = window_index + 1
fraction = fraction_of_remainder / progress.remaining_work
# Attempt to split below, if we can't then we'll compute a split
# using only window boundaries
else:
# We aren't splitting within multiple windows so we don't change our
# stop index.
new_stop_window_index = stop_window_index
# Temporary workaround for [BEAM-7473]: get current_watermark before
# split, in case watermark gets advanced before getting split results.
# In worst case, current_watermark is always stale, which is ok.
current_watermark = (watermark_estimator.current_watermark())
current_estimator_state = (watermark_estimator.get_estimator_state())
split = restriction_tracker.try_split(fraction)
if split:
primary, residual = split
element = windowed_value.value
primary_size = restriction_provider.restriction_size(
windowed_value.value, primary)
residual_size = restriction_provider.restriction_size(
windowed_value.value, residual)
# We use the watermark estimator state for the original process call
# for the primary and the updated watermark estimator state for the
# residual for the split.
primary_split_value = ((element, (primary, watermark_estimator_state)),
primary_size)
residual_split_value = ((element, (residual, current_estimator_state)),
residual_size)
windows = (
windowed_value.windows[window_index],
) if window_observing else windowed_value.windows
primary_restrictions.append(
SplitResultPrimary(
primary_value=WindowedValue(
primary_split_value, windowed_value.timestamp, windows)))
residual_restrictions.append(
SplitResultResidual(
residual_value=WindowedValue(
residual_split_value, windowed_value.timestamp, windows),
current_watermark=current_watermark,
deferred_timestamp=None))
if window_observing:
assert new_stop_window_index == window_index + 1
primary, residual = compute_whole_window_split(
window_index, window_index + 1)
if primary:
primary_restrictions.append(primary)
if residual:
residual_restrictions.append(residual)
return (
primary_restrictions, residual_restrictions, new_stop_window_index)
elif new_stop_window_index and new_stop_window_index != stop_window_index:
# If we failed to split but have a new stop index then return a split
# at the window boundary.
primary, residual = compute_whole_window_split(
new_stop_window_index, new_stop_window_index)
assert primary is not None
assert residual is not None
return ([primary], [residual], new_stop_window_index)
else:
return None
def try_split(self, fraction):
# type: (...) -> Optional[Tuple[Iterable[SplitResultPrimary], Iterable[SplitResultResidual]]]
if not self.is_splittable:
return None
with self.splitting_lock:
if not self.threadsafe_restriction_tracker:
return None
# Make a local reference to member variables that change references during
# processing under lock before attempting to split so we have a consistent
# view of all the references.
result = PerWindowInvoker._try_split(
fraction,
self.current_window_index,
self.stop_window_index,
self.current_windowed_value,
self.restriction,
self.watermark_estimator_state,
self.signature.get_restriction_provider(),
self.threadsafe_restriction_tracker,
self.threadsafe_watermark_estimator)
if not result:
return None
residuals, primaries, self.stop_window_index = result
return (residuals, primaries)
@staticmethod
def _scale_progress(progress, window_index, stop_window_index):
# We scale progress based upon the amount of work we will do for one
# window and have it apply for all windows.
completed = window_index * progress.total_work + progress.completed_work
remaining = (
stop_window_index -
(window_index + 1)) * progress.total_work + progress.remaining_work
from apache_beam.io.iobase import RestrictionProgress
return RestrictionProgress(completed=completed, remaining=remaining)
def current_element_progress(self):
# type: () -> Optional[RestrictionProgress]
if not self.is_splittable:
return None
with self.splitting_lock:
current_window_index = self.current_window_index
stop_window_index = self.stop_window_index
threadsafe_restriction_tracker = self.threadsafe_restriction_tracker
if not threadsafe_restriction_tracker:
return None
progress = threadsafe_restriction_tracker.current_progress()
if not current_window_index or not progress:
return progress
# stop_window_index should always be set if current_window_index is set,
# it is an error otherwise.
assert stop_window_index
return PerWindowInvoker._scale_progress(
progress, current_window_index, stop_window_index)
class DoFnRunner:
"""For internal use only; no backwards-compatibility guarantees.
A helper class for executing ParDo operations.
"""
def __init__(self,
fn, # type: core.DoFn
args,
kwargs,
side_inputs, # type: Iterable[sideinputs.SideInputMap]
windowing,
tagged_receivers, # type: Mapping[Optional[str], Receiver]
step_name=None, # type: Optional[str]
logging_context=None,
state=None,
scoped_metrics_container=None,
operation_name=None,
user_state_context=None # type: Optional[userstate.UserStateContext]
):
"""Initializes a DoFnRunner.
Args:
fn: user DoFn to invoke
args: positional side input arguments (static and placeholder), if any
kwargs: keyword side input arguments (static and placeholder), if any
side_inputs: list of sideinput.SideInputMaps for deferred side inputs
windowing: windowing properties of the output PCollection(s)
tagged_receivers: a dict of tag name to Receiver objects
step_name: the name of this step
logging_context: DEPRECATED [BEAM-4728]
state: handle for accessing DoFn state
scoped_metrics_container: DEPRECATED
operation_name: The system name assigned by the runner for this operation.
user_state_context: The UserStateContext instance for the current
Stateful DoFn.
"""
# Need to support multiple iterations.
side_inputs = list(side_inputs)
self.step_name = step_name
self.context = DoFnContext(step_name, state=state)
self.bundle_finalizer_param = DoFn.BundleFinalizerParam()
do_fn_signature = DoFnSignature(fn)
# Optimize for the common case.
main_receivers = tagged_receivers[None]
# TODO(BEAM-3937): Remove if block after output counter released.
if 'outputs_per_element_counter' in RuntimeValueProvider.experiments:
# TODO(BEAM-3955): Make step_name and operation_name less confused.
output_counter_name = (
CounterName('per-element-output-count', step_name=operation_name))
per_element_output_counter = state._counter_factory.get_counter(
output_counter_name, Counter.DATAFLOW_DISTRIBUTION).accumulator
else:
per_element_output_counter = None
output_processor = _OutputProcessor(
windowing.windowfn,
main_receivers,
tagged_receivers,
per_element_output_counter)
if do_fn_signature.is_stateful_dofn() and not user_state_context:
raise Exception(
'Requested execution of a stateful DoFn, but no user state context '
'is available. This likely means that the current runner does not '
'support the execution of stateful DoFns.')
self.do_fn_invoker = DoFnInvoker.create_invoker(
do_fn_signature,
output_processor,
self.context,
side_inputs,
args,
kwargs,
user_state_context=user_state_context,
bundle_finalizer_param=self.bundle_finalizer_param)
def process(self, windowed_value):
# type: (WindowedValue) -> Iterable[SplitResultResidual]
try:
return self.do_fn_invoker.invoke_process(windowed_value)
except BaseException as exn:
self._reraise_augmented(exn)
return []
def process_with_sized_restriction(self, windowed_value):
# type: (WindowedValue) -> Iterable[SplitResultResidual]
(element, (restriction, estimator_state)), _ = windowed_value.value
return self.do_fn_invoker.invoke_process(
windowed_value.with_value(element),
restriction=restriction,
watermark_estimator_state=estimator_state)
def try_split(self, fraction):
# type: (...) -> Optional[Tuple[Iterable[SplitResultPrimary], Iterable[SplitResultResidual]]]
assert isinstance(self.do_fn_invoker, PerWindowInvoker)
return self.do_fn_invoker.try_split(fraction)
def current_element_progress(self):
# type: () -> Optional[RestrictionProgress]
assert isinstance(self.do_fn_invoker, PerWindowInvoker)
return self.do_fn_invoker.current_element_progress()
def process_user_timer(self, timer_spec, key, window, timestamp, pane_info):
try:
self.do_fn_invoker.invoke_user_timer(
timer_spec, key, window, timestamp, pane_info)
except BaseException as exn:
self._reraise_augmented(exn)
def _invoke_bundle_method(self, bundle_method):
try:
self.context.set_element(None)
bundle_method()
except BaseException as exn:
self._reraise_augmented(exn)
def _invoke_lifecycle_method(self, lifecycle_method):
try:
self.context.set_element(None)
lifecycle_method()
except BaseException as exn:
self._reraise_augmented(exn)
def setup(self):
# type: () -> None
self._invoke_lifecycle_method(self.do_fn_invoker.invoke_setup)
def start(self):
# type: () -> None
self._invoke_bundle_method(self.do_fn_invoker.invoke_start_bundle)
def finish(self):
# type: () -> None
self._invoke_bundle_method(self.do_fn_invoker.invoke_finish_bundle)
def teardown(self):
# type: () -> None
self._invoke_lifecycle_method(self.do_fn_invoker.invoke_teardown)
def finalize(self):
# type: () -> None
self.bundle_finalizer_param.finalize_bundle()
def _reraise_augmented(self, exn):
if getattr(exn, '_tagged_with_step', False) or not self.step_name:
raise
step_annotation = " [while running '%s']" % self.step_name
# To emulate exception chaining (not available in Python 2).
try:
# Attempt to construct the same kind of exception
# with an augmented message.
new_exn = type(exn)(exn.args[0] + step_annotation, *exn.args[1:])
new_exn._tagged_with_step = True # Could raise attribute error.
except: # pylint: disable=bare-except
# If anything goes wrong, construct a RuntimeError whose message
# records the original exception's type and message.
new_exn = RuntimeError(
traceback.format_exception_only(type(exn), exn)[-1].strip() +
step_annotation)
new_exn._tagged_with_step = True
raise_with_traceback(new_exn)
class OutputProcessor(object):
def process_outputs(
self, windowed_input_element, results, watermark_estimator=None):
# type: (WindowedValue, Iterable[Any], Optional[WatermarkEstimator]) -> None
raise NotImplementedError
class _OutputProcessor(OutputProcessor):
"""Processes output produced by DoFn method invocations."""
def __init__(self,
window_fn,
main_receivers, # type: Receiver
tagged_receivers, # type: Mapping[Optional[str], Receiver]
per_element_output_counter):
"""Initializes ``_OutputProcessor``.
Args:
window_fn: a windowing function (WindowFn).
main_receivers: a dict of tag name to Receiver objects.
tagged_receivers: main receiver object.
per_element_output_counter: per_element_output_counter of one work_item.
could be none if experimental flag turn off
"""
self.window_fn = window_fn
self.main_receivers = main_receivers
self.tagged_receivers = tagged_receivers
self.per_element_output_counter = per_element_output_counter
def process_outputs(
self, windowed_input_element, results, watermark_estimator=None):
# type: (WindowedValue, Iterable[Any], Optional[WatermarkEstimator]) -> None
"""Dispatch the result of process computation to the appropriate receivers.
A value wrapped in a TaggedOutput object will be unwrapped and
then dispatched to the appropriate indexed output.
"""
if results is None:
# TODO(BEAM-3937): Remove if block after output counter released.
# Only enable per_element_output_counter when counter cythonized.
if (self.per_element_output_counter is not None and
self.per_element_output_counter.is_cythonized):
self.per_element_output_counter.add_input(0)
return
output_element_count = 0
for result in results:
# results here may be a generator, which cannot call len on it.
output_element_count += 1
tag = None
if isinstance(result, TaggedOutput):
tag = result.tag
if not isinstance(tag, (str, unicode)):
raise TypeError('In %s, tag %s is not a string' % (self, tag))
result = result.value
if isinstance(result, WindowedValue):
windowed_value = result
if (windowed_input_element is not None and
len(windowed_input_element.windows) != 1):
windowed_value.windows *= len(windowed_input_element.windows)
elif isinstance(result, TimestampedValue):
assign_context = WindowFn.AssignContext(result.timestamp, result.value)
windowed_value = WindowedValue(
result.value,
result.timestamp,
self.window_fn.assign(assign_context))
if len(windowed_input_element.windows) != 1:
windowed_value.windows *= len(windowed_input_element.windows)
else:
windowed_value = windowed_input_element.with_value(result)
if watermark_estimator is not None:
watermark_estimator.observe_timestamp(windowed_value.timestamp)
if tag is None:
self.main_receivers.receive(windowed_value)
else:
self.tagged_receivers[tag].receive(windowed_value)
# TODO(BEAM-3937): Remove if block after output counter released.
# Only enable per_element_output_counter when counter cythonized
if (self.per_element_output_counter is not None and
self.per_element_output_counter.is_cythonized):
self.per_element_output_counter.add_input(output_element_count)
def start_bundle_outputs(self, results):
"""Validate that start_bundle does not output any elements"""
if results is None:
return
raise RuntimeError(
'Start Bundle should not output any elements but got %s' % results)
def finish_bundle_outputs(self, results):
"""Dispatch the result of finish_bundle to the appropriate receivers.
A value wrapped in a TaggedOutput object will be unwrapped and
then dispatched to the appropriate indexed output.
"""
if results is None:
return
for result in results:
tag = None
if isinstance(result, TaggedOutput):
tag = result.tag
if not isinstance(tag, (str, unicode)):
raise TypeError('In %s, tag %s is not a string' % (self, tag))
result = result.value
if isinstance(result, WindowedValue):
windowed_value = result
else:
raise RuntimeError('Finish Bundle should only output WindowedValue ' +\
'type but got %s' % type(result))
if tag is None:
self.main_receivers.receive(windowed_value)
else:
self.tagged_receivers[tag].receive(windowed_value)
class _NoContext(WindowFn.AssignContext):
"""An uninspectable WindowFn.AssignContext."""
NO_VALUE = object()
def __init__(self, value, timestamp=NO_VALUE):
self.value = value
self._timestamp = timestamp
@property
def timestamp(self):
if self._timestamp is self.NO_VALUE:
raise ValueError('No timestamp in this context.')
else:
return self._timestamp
@property
def existing_windows(self):
raise ValueError('No existing_windows in this context.')
class DoFnState(object):
"""For internal use only; no backwards-compatibility guarantees.
Keeps track of state that DoFns want, currently, user counters.
"""
def __init__(self, counter_factory):
self.step_name = ''
self._counter_factory = counter_factory
def counter_for(self, aggregator):
"""Looks up the counter for this aggregator, creating one if necessary."""
return self._counter_factory.get_aggregator_counter(
self.step_name, aggregator)
# TODO(robertwb): Replace core.DoFnContext with this.
class DoFnContext(object):
"""For internal use only; no backwards-compatibility guarantees."""
def __init__(self, label, element=None, state=None):
self.label = label
self.state = state
if element is not None:
self.set_element(element)
def set_element(self, windowed_value):
# type: (Optional[WindowedValue]) -> None
self.windowed_value = windowed_value
@property
def element(self):
if self.windowed_value is None:
raise AttributeError('element not accessible in this context')
else:
return self.windowed_value.value
@property
def timestamp(self):
if self.windowed_value is None:
raise AttributeError('timestamp not accessible in this context')
else:
return self.windowed_value.timestamp
@property
def windows(self):
if self.windowed_value is None:
raise AttributeError('windows not accessible in this context')
else:
return self.windowed_value.windows
| 39.985175 | 112 | 0.695473 |
from __future__ import absolute_import
from __future__ import division
import threading
import traceback
from builtins import next
from builtins import object
from builtins import round
from builtins import zip
from typing import TYPE_CHECKING
from typing import Any
from typing import Dict
from typing import Iterable
from typing import List
from typing import Mapping
from typing import Optional
from typing import Tuple
from future.utils import raise_with_traceback
from past.builtins import unicode
from apache_beam.coders import TupleCoder
from apache_beam.internal import util
from apache_beam.options.value_provider import RuntimeValueProvider
from apache_beam.pvalue import TaggedOutput
from apache_beam.runners.sdf_utils import NoOpWatermarkEstimatorProvider
from apache_beam.runners.sdf_utils import RestrictionTrackerView
from apache_beam.runners.sdf_utils import SplitResultPrimary
from apache_beam.runners.sdf_utils import SplitResultResidual
from apache_beam.runners.sdf_utils import ThreadsafeRestrictionTracker
from apache_beam.runners.sdf_utils import ThreadsafeWatermarkEstimator
from apache_beam.transforms import DoFn
from apache_beam.transforms import core
from apache_beam.transforms import userstate
from apache_beam.transforms.core import RestrictionProvider
from apache_beam.transforms.core import WatermarkEstimatorProvider
from apache_beam.transforms.window import GlobalWindow
from apache_beam.transforms.window import TimestampedValue
from apache_beam.transforms.window import WindowFn
from apache_beam.utils.counters import Counter
from apache_beam.utils.counters import CounterName
from apache_beam.utils.timestamp import Timestamp
from apache_beam.utils.windowed_value import WindowedValue
if TYPE_CHECKING:
from apache_beam.transforms import sideinputs
from apache_beam.transforms.core import TimerSpec
from apache_beam.io.iobase import RestrictionProgress
from apache_beam.iobase import RestrictionTracker
from apache_beam.iobase import WatermarkEstimator
class NameContext(object):
def __init__(self, step_name, transform_id=None):
self.step_name = step_name
self.transform_id = transform_id
def __eq__(self, other):
return self.step_name == other.step_name
def __ne__(self, other):
return not self == other
def __repr__(self):
return 'NameContext(%s)' % self.__dict__
def __hash__(self):
return hash(self.step_name)
def metrics_name(self):
return self.step_name
def logging_name(self):
return self.step_name
class DataflowNameContext(NameContext):
def __init__(self, step_name, user_name, system_name):
super(DataflowNameContext, self).__init__(step_name)
self.user_name = user_name
self.system_name = system_name
def __eq__(self, other):
return (
self.step_name == other.step_name and
self.user_name == other.user_name and
self.system_name == other.system_name)
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash((self.step_name, self.user_name, self.system_name))
def __repr__(self):
return 'DataflowNameContext(%s)' % self.__dict__
def logging_name(self):
return self.user_name
class Receiver(object):
def receive(self, windowed_value):
raise NotImplementedError
class MethodWrapper(object):
def __init__(self, obj_to_invoke, method_name):
if not isinstance(obj_to_invoke,
(DoFn, RestrictionProvider, WatermarkEstimatorProvider)):
raise ValueError(
'\'obj_to_invoke\' has to be either a \'DoFn\' or '
'a \'RestrictionProvider\'. Received %r instead.' % obj_to_invoke)
self.args, self.defaults = core.get_function_arguments(obj_to_invoke,
method_name)
self.method_value = getattr(obj_to_invoke, method_name)
self.has_userstate_arguments = False
self.state_args_to_replace = {} self.timer_args_to_replace = {} self.timestamp_arg_name = None self.window_arg_name = None self.key_arg_name = None self.restriction_provider = None
self.restriction_provider_arg_name = None
self.watermark_estimator_provider = None
self.watermark_estimator_provider_arg_name = None
if hasattr(self.method_value, 'unbounded_per_element'):
self.unbounded_per_element = True
else:
self.unbounded_per_element = False
for kw, v in zip(self.args[-len(self.defaults):], self.defaults):
if isinstance(v, core.DoFn.StateParam):
self.state_args_to_replace[kw] = v.state_spec
self.has_userstate_arguments = True
elif isinstance(v, core.DoFn.TimerParam):
self.timer_args_to_replace[kw] = v.timer_spec
self.has_userstate_arguments = True
elif core.DoFn.TimestampParam == v:
self.timestamp_arg_name = kw
elif core.DoFn.WindowParam == v:
self.window_arg_name = kw
elif core.DoFn.KeyParam == v:
self.key_arg_name = kw
elif isinstance(v, core.DoFn.RestrictionParam):
self.restriction_provider = v.restriction_provider
self.restriction_provider_arg_name = kw
elif isinstance(v, core.DoFn.WatermarkEstimatorParam):
self.watermark_estimator_provider = v.watermark_estimator_provider
self.watermark_estimator_provider_arg_name = kw
if self.watermark_estimator_provider is None:
self.watermark_estimator_provider = NoOpWatermarkEstimatorProvider()
def invoke_timer_callback(
self, user_state_context, key, window, timestamp, pane_info):
kwargs = {}
if self.has_userstate_arguments:
for kw, state_spec in self.state_args_to_replace.items():
kwargs[kw] = user_state_context.get_state(state_spec, key, window)
for kw, timer_spec in self.timer_args_to_replace.items():
kwargs[kw] = user_state_context.get_timer(
timer_spec, key, window, timestamp, pane_info)
if self.timestamp_arg_name:
kwargs[self.timestamp_arg_name] = Timestamp.of(timestamp)
if self.window_arg_name:
kwargs[self.window_arg_name] = window
if self.key_arg_name:
kwargs[self.key_arg_name] = key
if kwargs:
return self.method_value(**kwargs)
else:
return self.method_value()
class DoFnSignature(object):
def __init__(self, do_fn):
assert isinstance(do_fn, core.DoFn)
self.do_fn = do_fn
self.process_method = MethodWrapper(do_fn, 'process')
self.start_bundle_method = MethodWrapper(do_fn, 'start_bundle')
self.finish_bundle_method = MethodWrapper(do_fn, 'finish_bundle')
self.setup_lifecycle_method = MethodWrapper(do_fn, 'setup')
self.teardown_lifecycle_method = MethodWrapper(do_fn, 'teardown')
restriction_provider = self.get_restriction_provider()
watermark_estimator_provider = self.get_watermark_estimator_provider()
self.create_watermark_estimator_method = (
MethodWrapper(
watermark_estimator_provider, 'create_watermark_estimator'))
self.initial_restriction_method = (
MethodWrapper(restriction_provider, 'initial_restriction')
if restriction_provider else None)
self.create_tracker_method = (
MethodWrapper(restriction_provider, 'create_tracker')
if restriction_provider else None)
self.split_method = (
MethodWrapper(restriction_provider, 'split')
if restriction_provider else None)
self._validate()
self._is_stateful_dofn = userstate.is_stateful_dofn(do_fn)
self.timer_methods = {} if self._is_stateful_dofn:
_, all_timer_specs = userstate.get_dofn_specs(do_fn)
for timer_spec in all_timer_specs:
method = timer_spec._attached_callback
self.timer_methods[timer_spec] = MethodWrapper(do_fn, method.__name__)
def get_restriction_provider(self):
return self.process_method.restriction_provider
def get_watermark_estimator_provider(self):
return self.process_method.watermark_estimator_provider
def is_unbounded_per_element(self):
return self.process_method.unbounded_per_element
def _validate(self):
self._validate_process()
self._validate_bundle_method(self.start_bundle_method)
self._validate_bundle_method(self.finish_bundle_method)
self._validate_stateful_dofn()
def _validate_process(self):
param_ids = [
d.param_id for d in self.process_method.defaults
if isinstance(d, core._DoFnParam)
]
if len(param_ids) != len(set(param_ids)):
raise ValueError(
'DoFn %r has duplicate process method parameters: %s.' %
(self.do_fn, param_ids))
def _validate_bundle_method(self, method_wrapper):
for param in core.DoFn.DoFnProcessParams:
if param in method_wrapper.defaults:
raise ValueError(
'DoFn.process() method-only parameter %s cannot be used in %s.' %
(param, method_wrapper))
def _validate_stateful_dofn(self):
userstate.validate_stateful_dofn(self.do_fn)
def is_splittable_dofn(self):
return self.get_restriction_provider() is not None
def get_restriction_coder(self):
if self.is_splittable_dofn():
return TupleCoder([
(self.get_restriction_provider().restriction_coder()),
(self.get_watermark_estimator_provider().estimator_state_coder())
])
else:
return None
def is_stateful_dofn(self):
return self._is_stateful_dofn
def has_timers(self):
_, all_timer_specs = userstate.get_dofn_specs(self.do_fn)
return bool(all_timer_specs)
def has_bundle_finalization(self):
for sig in (self.start_bundle_method,
self.process_method,
self.finish_bundle_method):
for d in sig.defaults:
try:
if d == DoFn.BundleFinalizerParam:
return True
except Exception: pass
return False
class DoFnInvoker(object):
def __init__(self,
output_processor, signature ):
self.output_processor = output_processor
self.signature = signature
self.user_state_context = None self.bundle_finalizer_param = None
@staticmethod
def create_invoker(
signature, output_processor, context=None, side_inputs=None, input_args=None, input_kwargs=None,
process_invocation=True,
user_state_context=None, bundle_finalizer_param=None ):
side_inputs = side_inputs or []
default_arg_values = signature.process_method.defaults
use_simple_invoker = not process_invocation or (
not side_inputs and not input_args and not input_kwargs and
not default_arg_values and not signature.is_stateful_dofn())
if use_simple_invoker:
return SimpleInvoker(output_processor, signature)
else:
if context is None:
raise TypeError("Must provide context when not using SimpleInvoker")
return PerWindowInvoker(
output_processor,
signature,
context,
side_inputs,
input_args,
input_kwargs,
user_state_context,
bundle_finalizer_param)
def invoke_process(self,
windowed_value, restriction=None,
watermark_estimator_state=None,
additional_args=None,
additional_kwargs=None
):
raise NotImplementedError
def invoke_setup(self):
self.signature.setup_lifecycle_method.method_value()
def invoke_start_bundle(self):
self.output_processor.start_bundle_outputs(
self.signature.start_bundle_method.method_value())
def invoke_finish_bundle(self):
self.output_processor.finish_bundle_outputs(
self.signature.finish_bundle_method.method_value())
def invoke_teardown(self):
self.signature.teardown_lifecycle_method.method_value()
def invoke_user_timer(self, timer_spec, key, window, timestamp, pane_info):
self.output_processor.process_outputs(
WindowedValue(None, timestamp, (window, )),
self.signature.timer_methods[timer_spec].invoke_timer_callback(
self.user_state_context, key, window, timestamp, pane_info))
def invoke_create_watermark_estimator(self, estimator_state):
return self.signature.create_watermark_estimator_method.method_value(
estimator_state)
def invoke_split(self, element, restriction):
return self.signature.split_method.method_value(element, restriction)
def invoke_initial_restriction(self, element):
return self.signature.initial_restriction_method.method_value(element)
def invoke_create_tracker(self, restriction):
return self.signature.create_tracker_method.method_value(restriction)
class SimpleInvoker(DoFnInvoker):
def __init__(self,
output_processor, # type: OutputProcessor
signature # type: DoFnSignature
):
# type: (...) -> None
super(SimpleInvoker, self).__init__(output_processor, signature)
self.process_method = signature.process_method.method_value
def invoke_process(self,
windowed_value, # type: WindowedValue
restriction=None,
watermark_estimator_state=None,
additional_args=None,
additional_kwargs=None
):
# type: (...) -> None
self.output_processor.process_outputs(
windowed_value, self.process_method(windowed_value.value))
class PerWindowInvoker(DoFnInvoker):
def __init__(self,
output_processor, # type: _OutputProcessor
signature, # type: DoFnSignature
context, # type: DoFnContext
side_inputs, # type: Iterable[sideinputs.SideInputMap]
input_args,
input_kwargs,
user_state_context, # type: Optional[userstate.UserStateContext]
bundle_finalizer_param # type: Optional[core._BundleFinalizerParam]
):
super(PerWindowInvoker, self).__init__(output_processor, signature)
self.side_inputs = side_inputs
self.context = context
self.process_method = signature.process_method.method_value
default_arg_values = signature.process_method.defaults
self.has_windowed_inputs = (
not all(si.is_globally_windowed() for si in side_inputs) or
(core.DoFn.WindowParam in default_arg_values) or
signature.is_stateful_dofn())
self.user_state_context = user_state_context
self.is_splittable = signature.is_splittable_dofn()
self.threadsafe_restriction_tracker = None # type: Optional[ThreadsafeRestrictionTracker]
self.threadsafe_watermark_estimator = None # type: Optional[ThreadsafeWatermarkEstimator]
self.current_windowed_value = None # type: Optional[WindowedValue]
self.bundle_finalizer_param = bundle_finalizer_param
self.is_key_param_required = False
if self.is_splittable:
self.splitting_lock = threading.Lock()
self.current_window_index = None
self.stop_window_index = None
# Try to prepare all the arguments that can just be filled in
# without any additional work. in the process function.
# Also cache all the placeholders needed in the process function.
# Flag to cache additional arguments on the first element if all
# inputs are within the global window.
self.cache_globally_windowed_args = not self.has_windowed_inputs
input_args = input_args if input_args else []
input_kwargs = input_kwargs if input_kwargs else {}
arg_names = signature.process_method.args
# Create placeholder for element parameter of DoFn.process() method.
# Not to be confused with ArgumentPlaceHolder, which may be passed in
# input_args and is a placeholder for side-inputs.
class ArgPlaceholder(object):
def __init__(self, placeholder):
self.placeholder = placeholder
if core.DoFn.ElementParam not in default_arg_values:
# TODO(BEAM-7867): Handle cases in which len(arg_names) ==
# len(default_arg_values).
args_to_pick = len(arg_names) - len(default_arg_values) - 1
# Positional argument values for process(), with placeholders for special
# values such as the element, timestamp, etc.
args_with_placeholders = ([ArgPlaceholder(core.DoFn.ElementParam)] +
input_args[:args_to_pick])
else:
args_to_pick = len(arg_names) - len(default_arg_values)
args_with_placeholders = input_args[:args_to_pick]
# Fill the OtherPlaceholders for context, key, window or timestamp
remaining_args_iter = iter(input_args[args_to_pick:])
for a, d in zip(arg_names[-len(default_arg_values):], default_arg_values):
if core.DoFn.ElementParam == d:
args_with_placeholders.append(ArgPlaceholder(d))
elif core.DoFn.KeyParam == d:
self.is_key_param_required = True
args_with_placeholders.append(ArgPlaceholder(d))
elif core.DoFn.WindowParam == d:
args_with_placeholders.append(ArgPlaceholder(d))
elif core.DoFn.TimestampParam == d:
args_with_placeholders.append(ArgPlaceholder(d))
elif core.DoFn.PaneInfoParam == d:
args_with_placeholders.append(ArgPlaceholder(d))
elif core.DoFn.SideInputParam == d:
# If no more args are present then the value must be passed via kwarg
try:
args_with_placeholders.append(next(remaining_args_iter))
except StopIteration:
if a not in input_kwargs:
raise ValueError("Value for sideinput %s not provided" % a)
elif isinstance(d, core.DoFn.StateParam):
args_with_placeholders.append(ArgPlaceholder(d))
elif isinstance(d, core.DoFn.TimerParam):
args_with_placeholders.append(ArgPlaceholder(d))
elif isinstance(d, type) and core.DoFn.BundleFinalizerParam == d:
args_with_placeholders.append(ArgPlaceholder(d))
else:
# If no more args are present then the value must be passed via kwarg
try:
args_with_placeholders.append(next(remaining_args_iter))
except StopIteration:
pass
args_with_placeholders.extend(list(remaining_args_iter))
# Stash the list of placeholder positions for performance
self.placeholders = [(i, x.placeholder)
for (i, x) in enumerate(args_with_placeholders)
if isinstance(x, ArgPlaceholder)]
self.args_for_process = args_with_placeholders
self.kwargs_for_process = input_kwargs
def invoke_process(self,
windowed_value, # type: WindowedValue
restriction=None,
watermark_estimator_state=None,
additional_args=None,
additional_kwargs=None
):
# type: (...) -> Iterable[SplitResultResidual]
if not additional_args:
additional_args = []
if not additional_kwargs:
additional_kwargs = {}
self.context.set_element(windowed_value)
# Call for the process function for each window if has windowed side inputs
# or if the process accesses the window parameter. We can just call it once
# otherwise as none of the arguments are changing
residuals = []
if self.is_splittable:
with self.splitting_lock:
self.current_windowed_value = windowed_value
self.restriction = restriction
self.watermark_estimator_state = watermark_estimator_state
try:
if self.has_windowed_inputs and len(windowed_value.windows) > 1:
for i, w in enumerate(windowed_value.windows):
if not self._should_process_window_for_sdf(
windowed_value, additional_kwargs, i):
break
residual = self._invoke_process_per_window(
WindowedValue(
windowed_value.value, windowed_value.timestamp, (w, )),
additional_args,
additional_kwargs)
if residual:
residuals.append(residual)
else:
if self._should_process_window_for_sdf(windowed_value,
additional_kwargs):
residual = self._invoke_process_per_window(
windowed_value, additional_args, additional_kwargs)
if residual:
residuals.append(residual)
finally:
with self.splitting_lock:
self.current_windowed_value = None
self.restriction = None
self.watermark_estimator_state = None
self.current_window_index = None
self.threadsafe_restriction_tracker = None
self.threadsafe_watermark_estimator = None
elif self.has_windowed_inputs and len(windowed_value.windows) != 1:
for w in windowed_value.windows:
self._invoke_process_per_window(
WindowedValue(
windowed_value.value, windowed_value.timestamp, (w, )),
additional_args,
additional_kwargs)
else:
self._invoke_process_per_window(
windowed_value, additional_args, additional_kwargs)
return residuals
def _should_process_window_for_sdf(
self,
windowed_value, # type: WindowedValue
additional_kwargs,
window_index=None, # type: Optional[int]
):
restriction_tracker = self.invoke_create_tracker(self.restriction)
watermark_estimator = self.invoke_create_watermark_estimator(
self.watermark_estimator_state)
with self.splitting_lock:
if window_index:
self.current_window_index = window_index
if window_index == 0:
self.stop_window_index = len(windowed_value.windows)
if window_index == self.stop_window_index:
return False
self.threadsafe_restriction_tracker = ThreadsafeRestrictionTracker(
restriction_tracker)
self.threadsafe_watermark_estimator = (
ThreadsafeWatermarkEstimator(watermark_estimator))
restriction_tracker_param = (
self.signature.process_method.restriction_provider_arg_name)
if not restriction_tracker_param:
raise ValueError(
'DoFn is splittable but DoFn does not have a '
'RestrictionTrackerParam defined')
additional_kwargs[restriction_tracker_param] = (
RestrictionTrackerView(self.threadsafe_restriction_tracker))
watermark_param = (
self.signature.process_method.watermark_estimator_provider_arg_name)
# When the watermark_estimator is a NoOpWatermarkEstimator, the system
# will not add watermark_param into the DoFn param list.
if watermark_param is not None:
additional_kwargs[watermark_param] = self.threadsafe_watermark_estimator
return True
def _invoke_process_per_window(self,
windowed_value, # type: WindowedValue
additional_args,
additional_kwargs,
):
# type: (...) -> Optional[SplitResultResidual]
if self.has_windowed_inputs:
window, = windowed_value.windows
side_inputs = [si[window] for si in self.side_inputs]
side_inputs.extend(additional_args)
args_for_process, kwargs_for_process = util.insert_values_in_args(
self.args_for_process, self.kwargs_for_process,
side_inputs)
elif self.cache_globally_windowed_args:
# Attempt to cache additional args if all inputs are globally
# windowed inputs when processing the first element.
self.cache_globally_windowed_args = False
# Fill in sideInputs if they are globally windowed
global_window = GlobalWindow()
self.args_for_process, self.kwargs_for_process = (
util.insert_values_in_args(
self.args_for_process, self.kwargs_for_process,
[si[global_window] for si in self.side_inputs]))
args_for_process, kwargs_for_process = (
self.args_for_process, self.kwargs_for_process)
else:
args_for_process, kwargs_for_process = (
self.args_for_process, self.kwargs_for_process)
# Extract key in the case of a stateful DoFn. Note that in the case of a
# stateful DoFn, we set during __init__ self.has_windowed_inputs to be
# True. Therefore, windows will be exploded coming into this method, and
# we can rely on the window variable being set above.
if self.user_state_context or self.is_key_param_required:
try:
key, unused_value = windowed_value.value
except (TypeError, ValueError):
raise ValueError((
'Input value to a stateful DoFn or KeyParam must be a KV tuple; '
'instead, got \'%s\'.') % (windowed_value.value, ))
for i, p in self.placeholders:
if core.DoFn.ElementParam == p:
args_for_process[i] = windowed_value.value
elif core.DoFn.KeyParam == p:
args_for_process[i] = key
elif core.DoFn.WindowParam == p:
args_for_process[i] = window
elif core.DoFn.TimestampParam == p:
args_for_process[i] = windowed_value.timestamp
elif core.DoFn.PaneInfoParam == p:
args_for_process[i] = windowed_value.pane_info
elif isinstance(p, core.DoFn.StateParam):
assert self.user_state_context is not None
args_for_process[i] = (
self.user_state_context.get_state(p.state_spec, key, window))
elif isinstance(p, core.DoFn.TimerParam):
assert self.user_state_context is not None
args_for_process[i] = (
self.user_state_context.get_timer(
p.timer_spec,
key,
window,
windowed_value.timestamp,
windowed_value.pane_info))
elif core.DoFn.BundleFinalizerParam == p:
args_for_process[i] = self.bundle_finalizer_param
if additional_kwargs:
if kwargs_for_process is None:
kwargs_for_process = additional_kwargs
else:
for key in additional_kwargs:
kwargs_for_process[key] = additional_kwargs[key]
if kwargs_for_process:
self.output_processor.process_outputs(
windowed_value,
self.process_method(*args_for_process, **kwargs_for_process),
self.threadsafe_watermark_estimator)
else:
self.output_processor.process_outputs(
windowed_value,
self.process_method(*args_for_process),
self.threadsafe_watermark_estimator)
if self.is_splittable:
assert self.threadsafe_restriction_tracker is not None
self.threadsafe_restriction_tracker.check_done()
deferred_status = self.threadsafe_restriction_tracker.deferred_status()
if deferred_status:
deferred_restriction, deferred_timestamp = deferred_status
element = windowed_value.value
size = self.signature.get_restriction_provider().restriction_size(
element, deferred_restriction)
current_watermark = (
self.threadsafe_watermark_estimator.current_watermark())
estimator_state = (
self.threadsafe_watermark_estimator.get_estimator_state())
residual_value = ((element, (deferred_restriction, estimator_state)),
size)
return SplitResultResidual(
residual_value=windowed_value.with_value(residual_value),
current_watermark=current_watermark,
deferred_timestamp=deferred_timestamp)
return None
@staticmethod
def _try_split(fraction,
window_index, # type: Optional[int]
stop_window_index, # type: Optional[int]
windowed_value, # type: WindowedValue
restriction,
watermark_estimator_state,
restriction_provider, # type: RestrictionProvider
restriction_tracker, # type: RestrictionTracker
watermark_estimator, # type: WatermarkEstimator
):
# type: (...) -> Optional[Tuple[Iterable[SplitResultPrimary], Iterable[SplitResultResidual], Optional[int]]]
def compute_whole_window_split(to_index, from_index):
restriction_size = restriction_provider.restriction_size(
windowed_value, restriction)
# The primary and residual both share the same value only differing
# by the set of windows they are in.
value = ((windowed_value.value, (restriction, watermark_estimator_state)),
restriction_size)
primary_restriction = SplitResultPrimary(
primary_value=WindowedValue(
value,
windowed_value.timestamp,
windowed_value.windows[:to_index])) if to_index > 0 else None
# Don't report any updated watermarks for the residual since they have
residual_restriction = SplitResultResidual(
residual_value=WindowedValue(
value,
windowed_value.timestamp,
windowed_value.windows[from_index:stop_window_index]),
current_watermark=None,
deferred_timestamp=None) if from_index < stop_window_index else None
return (primary_restriction, residual_restriction)
primary_restrictions = []
residual_restrictions = []
window_observing = window_index is not None
# window then compute whether the split lies within the current window
# or a future window.
if window_observing and window_index != stop_window_index - 1:
progress = restriction_tracker.current_progress()
if not progress:
# Assume no work has been completed for the current window if progress
# is unavailable.
from apache_beam.io.iobase import RestrictionProgress
progress = RestrictionProgress(completed=0, remaining=1)
scaled_progress = PerWindowInvoker._scale_progress(
progress, window_index, stop_window_index)
# Compute the fraction of the remainder relative to the scaled progress.
# If the value is greater than or equal to progress.remaining_work then we
# should split at the closest window boundary.
fraction_of_remainder = scaled_progress.remaining_work * fraction
if fraction_of_remainder >= progress.remaining_work:
# The fraction is outside of the current window and hence we will
# split at the closest window boundary. Favor a split and return the
# last window if we would have rounded up to the end of the window
# based upon the fraction.
new_stop_window_index = min(
stop_window_index - 1,
window_index + max(
1,
int(
round((
progress.completed_work +
scaled_progress.remaining_work * fraction) /
progress.total_work))))
primary, residual = compute_whole_window_split(
new_stop_window_index, new_stop_window_index)
assert primary is not None
assert residual is not None
return ([primary], [residual], new_stop_window_index)
else:
# The fraction is within the current window being processed so compute
# the updated fraction based upon the number of windows being processed.
new_stop_window_index = window_index + 1
fraction = fraction_of_remainder / progress.remaining_work
# Attempt to split below, if we can't then we'll compute a split
# using only window boundaries
else:
# We aren't splitting within multiple windows so we don't change our
# stop index.
new_stop_window_index = stop_window_index
# Temporary workaround for [BEAM-7473]: get current_watermark before
# split, in case watermark gets advanced before getting split results.
# In worst case, current_watermark is always stale, which is ok.
current_watermark = (watermark_estimator.current_watermark())
current_estimator_state = (watermark_estimator.get_estimator_state())
split = restriction_tracker.try_split(fraction)
if split:
primary, residual = split
element = windowed_value.value
primary_size = restriction_provider.restriction_size(
windowed_value.value, primary)
residual_size = restriction_provider.restriction_size(
windowed_value.value, residual)
# We use the watermark estimator state for the original process call
# for the primary and the updated watermark estimator state for the
# residual for the split.
primary_split_value = ((element, (primary, watermark_estimator_state)),
primary_size)
residual_split_value = ((element, (residual, current_estimator_state)),
residual_size)
windows = (
windowed_value.windows[window_index],
) if window_observing else windowed_value.windows
primary_restrictions.append(
SplitResultPrimary(
primary_value=WindowedValue(
primary_split_value, windowed_value.timestamp, windows)))
residual_restrictions.append(
SplitResultResidual(
residual_value=WindowedValue(
residual_split_value, windowed_value.timestamp, windows),
current_watermark=current_watermark,
deferred_timestamp=None))
if window_observing:
assert new_stop_window_index == window_index + 1
primary, residual = compute_whole_window_split(
window_index, window_index + 1)
if primary:
primary_restrictions.append(primary)
if residual:
residual_restrictions.append(residual)
return (
primary_restrictions, residual_restrictions, new_stop_window_index)
elif new_stop_window_index and new_stop_window_index != stop_window_index:
# If we failed to split but have a new stop index then return a split
# at the window boundary.
primary, residual = compute_whole_window_split(
new_stop_window_index, new_stop_window_index)
assert primary is not None
assert residual is not None
return ([primary], [residual], new_stop_window_index)
else:
return None
def try_split(self, fraction):
# type: (...) -> Optional[Tuple[Iterable[SplitResultPrimary], Iterable[SplitResultResidual]]]
if not self.is_splittable:
return None
with self.splitting_lock:
if not self.threadsafe_restriction_tracker:
return None
# Make a local reference to member variables that change references during
# processing under lock before attempting to split so we have a consistent
# view of all the references.
result = PerWindowInvoker._try_split(
fraction,
self.current_window_index,
self.stop_window_index,
self.current_windowed_value,
self.restriction,
self.watermark_estimator_state,
self.signature.get_restriction_provider(),
self.threadsafe_restriction_tracker,
self.threadsafe_watermark_estimator)
if not result:
return None
residuals, primaries, self.stop_window_index = result
return (residuals, primaries)
@staticmethod
def _scale_progress(progress, window_index, stop_window_index):
# We scale progress based upon the amount of work we will do for one
# window and have it apply for all windows.
completed = window_index * progress.total_work + progress.completed_work
remaining = (
stop_window_index -
(window_index + 1)) * progress.total_work + progress.remaining_work
from apache_beam.io.iobase import RestrictionProgress
return RestrictionProgress(completed=completed, remaining=remaining)
def current_element_progress(self):
# type: () -> Optional[RestrictionProgress]
if not self.is_splittable:
return None
with self.splitting_lock:
current_window_index = self.current_window_index
stop_window_index = self.stop_window_index
threadsafe_restriction_tracker = self.threadsafe_restriction_tracker
if not threadsafe_restriction_tracker:
return None
progress = threadsafe_restriction_tracker.current_progress()
if not current_window_index or not progress:
return progress
# stop_window_index should always be set if current_window_index is set,
# it is an error otherwise.
assert stop_window_index
return PerWindowInvoker._scale_progress(
progress, current_window_index, stop_window_index)
class DoFnRunner:
def __init__(self,
fn, # type: core.DoFn
args,
kwargs,
side_inputs, # type: Iterable[sideinputs.SideInputMap]
windowing,
tagged_receivers, # type: Mapping[Optional[str], Receiver]
step_name=None, # type: Optional[str]
logging_context=None,
state=None,
scoped_metrics_container=None,
operation_name=None,
user_state_context=None # type: Optional[userstate.UserStateContext]
):
# Need to support multiple iterations.
side_inputs = list(side_inputs)
self.step_name = step_name
self.context = DoFnContext(step_name, state=state)
self.bundle_finalizer_param = DoFn.BundleFinalizerParam()
do_fn_signature = DoFnSignature(fn)
# Optimize for the common case.
main_receivers = tagged_receivers[None]
# TODO(BEAM-3937): Remove if block after output counter released.
if 'outputs_per_element_counter' in RuntimeValueProvider.experiments:
# TODO(BEAM-3955): Make step_name and operation_name less confused.
output_counter_name = (
CounterName('per-element-output-count', step_name=operation_name))
per_element_output_counter = state._counter_factory.get_counter(
output_counter_name, Counter.DATAFLOW_DISTRIBUTION).accumulator
else:
per_element_output_counter = None
output_processor = _OutputProcessor(
windowing.windowfn,
main_receivers,
tagged_receivers,
per_element_output_counter)
if do_fn_signature.is_stateful_dofn() and not user_state_context:
raise Exception(
'Requested execution of a stateful DoFn, but no user state context '
'is available. This likely means that the current runner does not '
'support the execution of stateful DoFns.')
self.do_fn_invoker = DoFnInvoker.create_invoker(
do_fn_signature,
output_processor,
self.context,
side_inputs,
args,
kwargs,
user_state_context=user_state_context,
bundle_finalizer_param=self.bundle_finalizer_param)
def process(self, windowed_value):
# type: (WindowedValue) -> Iterable[SplitResultResidual]
try:
return self.do_fn_invoker.invoke_process(windowed_value)
except BaseException as exn:
self._reraise_augmented(exn)
return []
def process_with_sized_restriction(self, windowed_value):
# type: (WindowedValue) -> Iterable[SplitResultResidual]
(element, (restriction, estimator_state)), _ = windowed_value.value
return self.do_fn_invoker.invoke_process(
windowed_value.with_value(element),
restriction=restriction,
watermark_estimator_state=estimator_state)
def try_split(self, fraction):
# type: (...) -> Optional[Tuple[Iterable[SplitResultPrimary], Iterable[SplitResultResidual]]]
assert isinstance(self.do_fn_invoker, PerWindowInvoker)
return self.do_fn_invoker.try_split(fraction)
def current_element_progress(self):
# type: () -> Optional[RestrictionProgress]
assert isinstance(self.do_fn_invoker, PerWindowInvoker)
return self.do_fn_invoker.current_element_progress()
def process_user_timer(self, timer_spec, key, window, timestamp, pane_info):
try:
self.do_fn_invoker.invoke_user_timer(
timer_spec, key, window, timestamp, pane_info)
except BaseException as exn:
self._reraise_augmented(exn)
def _invoke_bundle_method(self, bundle_method):
try:
self.context.set_element(None)
bundle_method()
except BaseException as exn:
self._reraise_augmented(exn)
def _invoke_lifecycle_method(self, lifecycle_method):
try:
self.context.set_element(None)
lifecycle_method()
except BaseException as exn:
self._reraise_augmented(exn)
def setup(self):
# type: () -> None
self._invoke_lifecycle_method(self.do_fn_invoker.invoke_setup)
def start(self):
# type: () -> None
self._invoke_bundle_method(self.do_fn_invoker.invoke_start_bundle)
def finish(self):
# type: () -> None
self._invoke_bundle_method(self.do_fn_invoker.invoke_finish_bundle)
def teardown(self):
# type: () -> None
self._invoke_lifecycle_method(self.do_fn_invoker.invoke_teardown)
def finalize(self):
# type: () -> None
self.bundle_finalizer_param.finalize_bundle()
def _reraise_augmented(self, exn):
if getattr(exn, '_tagged_with_step', False) or not self.step_name:
raise
step_annotation = " [while running '%s']" % self.step_name
# To emulate exception chaining (not available in Python 2).
try:
# Attempt to construct the same kind of exception
# with an augmented message.
new_exn = type(exn)(exn.args[0] + step_annotation, *exn.args[1:])
new_exn._tagged_with_step = True # Could raise attribute error.
except: # pylint: disable=bare-except
# If anything goes wrong, construct a RuntimeError whose message
# records the original exception's type and message.
new_exn = RuntimeError(
traceback.format_exception_only(type(exn), exn)[-1].strip() +
step_annotation)
new_exn._tagged_with_step = True
raise_with_traceback(new_exn)
class OutputProcessor(object):
def process_outputs(
self, windowed_input_element, results, watermark_estimator=None):
raise NotImplementedError
class _OutputProcessor(OutputProcessor):
def __init__(self,
window_fn,
main_receivers, tagged_receivers, per_element_output_counter):
self.window_fn = window_fn
self.main_receivers = main_receivers
self.tagged_receivers = tagged_receivers
self.per_element_output_counter = per_element_output_counter
def process_outputs(
self, windowed_input_element, results, watermark_estimator=None):
if results is None:
if (self.per_element_output_counter is not None and
self.per_element_output_counter.is_cythonized):
self.per_element_output_counter.add_input(0)
return
output_element_count = 0
for result in results:
output_element_count += 1
tag = None
if isinstance(result, TaggedOutput):
tag = result.tag
if not isinstance(tag, (str, unicode)):
raise TypeError('In %s, tag %s is not a string' % (self, tag))
result = result.value
if isinstance(result, WindowedValue):
windowed_value = result
if (windowed_input_element is not None and
len(windowed_input_element.windows) != 1):
windowed_value.windows *= len(windowed_input_element.windows)
elif isinstance(result, TimestampedValue):
assign_context = WindowFn.AssignContext(result.timestamp, result.value)
windowed_value = WindowedValue(
result.value,
result.timestamp,
self.window_fn.assign(assign_context))
if len(windowed_input_element.windows) != 1:
windowed_value.windows *= len(windowed_input_element.windows)
else:
windowed_value = windowed_input_element.with_value(result)
if watermark_estimator is not None:
watermark_estimator.observe_timestamp(windowed_value.timestamp)
if tag is None:
self.main_receivers.receive(windowed_value)
else:
self.tagged_receivers[tag].receive(windowed_value)
if (self.per_element_output_counter is not None and
self.per_element_output_counter.is_cythonized):
self.per_element_output_counter.add_input(output_element_count)
def start_bundle_outputs(self, results):
if results is None:
return
raise RuntimeError(
'Start Bundle should not output any elements but got %s' % results)
def finish_bundle_outputs(self, results):
if results is None:
return
for result in results:
tag = None
if isinstance(result, TaggedOutput):
tag = result.tag
if not isinstance(tag, (str, unicode)):
raise TypeError('In %s, tag %s is not a string' % (self, tag))
result = result.value
if isinstance(result, WindowedValue):
windowed_value = result
else:
raise RuntimeError('Finish Bundle should only output WindowedValue ' +\
'type but got %s' % type(result))
if tag is None:
self.main_receivers.receive(windowed_value)
else:
self.tagged_receivers[tag].receive(windowed_value)
class _NoContext(WindowFn.AssignContext):
NO_VALUE = object()
def __init__(self, value, timestamp=NO_VALUE):
self.value = value
self._timestamp = timestamp
@property
def timestamp(self):
if self._timestamp is self.NO_VALUE:
raise ValueError('No timestamp in this context.')
else:
return self._timestamp
@property
def existing_windows(self):
raise ValueError('No existing_windows in this context.')
class DoFnState(object):
def __init__(self, counter_factory):
self.step_name = ''
self._counter_factory = counter_factory
def counter_for(self, aggregator):
return self._counter_factory.get_aggregator_counter(
self.step_name, aggregator)
class DoFnContext(object):
def __init__(self, label, element=None, state=None):
self.label = label
self.state = state
if element is not None:
self.set_element(element)
def set_element(self, windowed_value):
self.windowed_value = windowed_value
@property
def element(self):
if self.windowed_value is None:
raise AttributeError('element not accessible in this context')
else:
return self.windowed_value.value
@property
def timestamp(self):
if self.windowed_value is None:
raise AttributeError('timestamp not accessible in this context')
else:
return self.windowed_value.timestamp
@property
def windows(self):
if self.windowed_value is None:
raise AttributeError('windows not accessible in this context')
else:
return self.windowed_value.windows
| true | true |
1c4a2669e7ad9b363505ce2e8e13f926ee994dbe | 55,879 | py | Python | vistrails/db/versions/v0_8_0/persistence/xml/auto_gen.py | celiafish/VisTrails | d8cb575b8b121941de190fe608003ad1427ef9f6 | [
"BSD-3-Clause"
] | 1 | 2015-05-11T16:46:49.000Z | 2015-05-11T16:46:49.000Z | vistrails/db/versions/v0_8_0/persistence/xml/auto_gen.py | celiafish/VisTrails | d8cb575b8b121941de190fe608003ad1427ef9f6 | [
"BSD-3-Clause"
] | null | null | null | vistrails/db/versions/v0_8_0/persistence/xml/auto_gen.py | celiafish/VisTrails | d8cb575b8b121941de190fe608003ad1427ef9f6 | [
"BSD-3-Clause"
] | null | null | null | ###############################################################################
##
## Copyright (C) 2011-2014, NYU-Poly.
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: [email protected]
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the University of Utah nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
"""generated automatically by auto_dao.py"""
from vistrails.core.system import get_elementtree_library
from xml_dao import XMLDAO
from vistrails.db.versions.v0_8_0.domain import *
ElementTree = get_elementtree_library()
class DBPortSpecXMLDAOBase(XMLDAO):
def __init__(self, daoList):
self.daoList = daoList
def getDao(self, dao):
return self.daoList[dao]
def fromXML(self, node):
if node.tag != 'portSpec':
return None
# read attributes
data = node.get('id', None)
id = self.convertFromStr(data, 'long')
data = node.get('name', None)
name = self.convertFromStr(data, 'str')
data = node.get('type', None)
type = self.convertFromStr(data, 'str')
data = node.get('spec', None)
spec = self.convertFromStr(data, 'str')
obj = DBPortSpec(id=id,
name=name,
type=type,
spec=spec)
obj.is_dirty = False
return obj
def toXML(self, portSpec, node=None):
if node is None:
node = ElementTree.Element('portSpec')
# set attributes
node.set('id',self.convertToStr(portSpec.db_id, 'long'))
node.set('name',self.convertToStr(portSpec.db_name, 'str'))
node.set('type',self.convertToStr(portSpec.db_type, 'str'))
node.set('spec',self.convertToStr(portSpec.db_spec, 'str'))
return node
class DBModuleXMLDAOBase(XMLDAO):
def __init__(self, daoList):
self.daoList = daoList
def getDao(self, dao):
return self.daoList[dao]
def fromXML(self, node):
if node.tag != 'module':
return None
# read attributes
data = node.get('id', None)
id = self.convertFromStr(data, 'long')
data = node.get('cache', None)
cache = self.convertFromStr(data, 'int')
data = node.get('name', None)
name = self.convertFromStr(data, 'str')
data = node.get('package', None)
package = self.convertFromStr(data, 'str')
data = node.get('version', None)
version = self.convertFromStr(data, 'str')
location = None
functions = []
annotations = []
portSpecs = []
# read children
for child in node.getchildren():
if child.tag == 'location':
_data = self.getDao('location').fromXML(child)
location = _data
elif child.tag == 'function':
_data = self.getDao('function').fromXML(child)
functions.append(_data)
elif child.tag == 'annotation':
_data = self.getDao('annotation').fromXML(child)
annotations.append(_data)
elif child.tag == 'portSpec':
_data = self.getDao('portSpec').fromXML(child)
portSpecs.append(_data)
elif child.text.strip() == '':
pass
else:
print '*** ERROR *** tag = %s' % child.tag
obj = DBModule(id=id,
cache=cache,
name=name,
package=package,
version=version,
location=location,
functions=functions,
annotations=annotations,
portSpecs=portSpecs)
obj.is_dirty = False
return obj
def toXML(self, module, node=None):
if node is None:
node = ElementTree.Element('module')
# set attributes
node.set('id',self.convertToStr(module.db_id, 'long'))
node.set('cache',self.convertToStr(module.db_cache, 'int'))
node.set('name',self.convertToStr(module.db_name, 'str'))
node.set('package',self.convertToStr(module.db_package, 'str'))
node.set('version',self.convertToStr(module.db_version, 'str'))
# set elements
location = module.db_location
if location is not None:
childNode = ElementTree.SubElement(node, 'location')
self.getDao('location').toXML(location, childNode)
functions = module.db_functions
for function in functions:
childNode = ElementTree.SubElement(node, 'function')
self.getDao('function').toXML(function, childNode)
annotations = module.db_annotations
for annotation in annotations:
childNode = ElementTree.SubElement(node, 'annotation')
self.getDao('annotation').toXML(annotation, childNode)
portSpecs = module.db_portSpecs
for portSpec in portSpecs:
childNode = ElementTree.SubElement(node, 'portSpec')
self.getDao('portSpec').toXML(portSpec, childNode)
return node
class DBTagXMLDAOBase(XMLDAO):
def __init__(self, daoList):
self.daoList = daoList
def getDao(self, dao):
return self.daoList[dao]
def fromXML(self, node):
if node.tag != 'tag':
return None
# read attributes
data = node.get('id', None)
id = self.convertFromStr(data, 'long')
data = node.get('name', None)
name = self.convertFromStr(data, 'str')
obj = DBTag(id=id,
name=name)
obj.is_dirty = False
return obj
def toXML(self, tag, node=None):
if node is None:
node = ElementTree.Element('tag')
# set attributes
node.set('id',self.convertToStr(tag.db_id, 'long'))
node.set('name',self.convertToStr(tag.db_name, 'str'))
return node
class DBPortXMLDAOBase(XMLDAO):
def __init__(self, daoList):
self.daoList = daoList
def getDao(self, dao):
return self.daoList[dao]
def fromXML(self, node):
if node.tag != 'port':
return None
# read attributes
data = node.get('id', None)
id = self.convertFromStr(data, 'long')
data = node.get('type', None)
type = self.convertFromStr(data, 'str')
data = node.get('moduleId', None)
moduleId = self.convertFromStr(data, 'long')
data = node.get('moduleName', None)
moduleName = self.convertFromStr(data, 'str')
data = node.get('name', None)
name = self.convertFromStr(data, 'str')
data = node.get('spec', None)
spec = self.convertFromStr(data, 'str')
obj = DBPort(id=id,
type=type,
moduleId=moduleId,
moduleName=moduleName,
name=name,
spec=spec)
obj.is_dirty = False
return obj
def toXML(self, port, node=None):
if node is None:
node = ElementTree.Element('port')
# set attributes
node.set('id',self.convertToStr(port.db_id, 'long'))
node.set('type',self.convertToStr(port.db_type, 'str'))
node.set('moduleId',self.convertToStr(port.db_moduleId, 'long'))
node.set('moduleName',self.convertToStr(port.db_moduleName, 'str'))
node.set('name',self.convertToStr(port.db_name, 'str'))
node.set('spec',self.convertToStr(port.db_spec, 'str'))
return node
class DBLogXMLDAOBase(XMLDAO):
def __init__(self, daoList):
self.daoList = daoList
def getDao(self, dao):
return self.daoList[dao]
def fromXML(self, node):
if node.tag != 'log':
return None
# read attributes
data = node.get('id', None)
id = self.convertFromStr(data, 'long')
workflow_execs = []
machines = []
# read children
for child in node.getchildren():
if child.tag == 'workflowExec':
_data = self.getDao('workflow_exec').fromXML(child)
workflow_execs.append(_data)
elif child.tag == 'machine':
_data = self.getDao('machine').fromXML(child)
machines.append(_data)
elif child.text.strip() == '':
pass
else:
print '*** ERROR *** tag = %s' % child.tag
obj = DBLog(id=id,
workflow_execs=workflow_execs,
machines=machines)
obj.is_dirty = False
return obj
def toXML(self, log, node=None):
if node is None:
node = ElementTree.Element('log')
# set attributes
node.set('id',self.convertToStr(log.db_id, 'long'))
# set elements
workflow_execs = log.db_workflow_execs
for workflow_exec in workflow_execs:
childNode = ElementTree.SubElement(node, 'workflow_exec')
self.getDao('workflow_exec').toXML(workflow_exec, childNode)
machines = log.db_machines
for machine in machines:
childNode = ElementTree.SubElement(node, 'machine')
self.getDao('machine').toXML(machine, childNode)
return node
class DBMachineXMLDAOBase(XMLDAO):
def __init__(self, daoList):
self.daoList = daoList
def getDao(self, dao):
return self.daoList[dao]
def fromXML(self, node):
if node.tag != 'machine':
return None
# read attributes
data = node.get('id', None)
id = self.convertFromStr(data, 'long')
data = node.get('name', None)
name = self.convertFromStr(data, 'str')
data = node.get('os', None)
os = self.convertFromStr(data, 'str')
data = node.get('architecture', None)
architecture = self.convertFromStr(data, 'str')
data = node.get('processor', None)
processor = self.convertFromStr(data, 'str')
data = node.get('ram', None)
ram = self.convertFromStr(data, 'int')
obj = DBMachine(id=id,
name=name,
os=os,
architecture=architecture,
processor=processor,
ram=ram)
obj.is_dirty = False
return obj
def toXML(self, machine, node=None):
if node is None:
node = ElementTree.Element('machine')
# set attributes
node.set('id',self.convertToStr(machine.db_id, 'long'))
node.set('name',self.convertToStr(machine.db_name, 'str'))
node.set('os',self.convertToStr(machine.db_os, 'str'))
node.set('architecture',self.convertToStr(machine.db_architecture, 'str'))
node.set('processor',self.convertToStr(machine.db_processor, 'str'))
node.set('ram',self.convertToStr(machine.db_ram, 'int'))
return node
class DBAddXMLDAOBase(XMLDAO):
def __init__(self, daoList):
self.daoList = daoList
def getDao(self, dao):
return self.daoList[dao]
def fromXML(self, node):
if node.tag != 'add':
return None
# read attributes
data = node.get('id', None)
id = self.convertFromStr(data, 'long')
data = node.get('what', None)
what = self.convertFromStr(data, 'str')
data = node.get('objectId', None)
objectId = self.convertFromStr(data, 'long')
data = node.get('parentObjId', None)
parentObjId = self.convertFromStr(data, 'long')
data = node.get('parentObjType', None)
parentObjType = self.convertFromStr(data, 'str')
data = None
# read children
for child in node.getchildren():
if child.tag == 'module':
_data = self.getDao('module').fromXML(child)
data = _data
elif child.tag == 'location':
_data = self.getDao('location').fromXML(child)
data = _data
elif child.tag == 'annotation':
_data = self.getDao('annotation').fromXML(child)
data = _data
elif child.tag == 'function':
_data = self.getDao('function').fromXML(child)
data = _data
elif child.tag == 'connection':
_data = self.getDao('connection').fromXML(child)
data = _data
elif child.tag == 'port':
_data = self.getDao('port').fromXML(child)
data = _data
elif child.tag == 'parameter':
_data = self.getDao('parameter').fromXML(child)
data = _data
elif child.tag == 'portSpec':
_data = self.getDao('portSpec').fromXML(child)
data = _data
elif child.tag == 'abstractionRef':
_data = self.getDao('abstractionRef').fromXML(child)
data = _data
elif child.tag == 'other':
_data = self.getDao('other').fromXML(child)
data = _data
elif child.text.strip() == '':
pass
else:
print '*** ERROR *** tag = %s' % child.tag
obj = DBAdd(data=data,
id=id,
what=what,
objectId=objectId,
parentObjId=parentObjId,
parentObjType=parentObjType)
obj.is_dirty = False
return obj
def toXML(self, add, node=None):
if node is None:
node = ElementTree.Element('add')
# set attributes
node.set('id',self.convertToStr(add.db_id, 'long'))
node.set('what',self.convertToStr(add.db_what, 'str'))
node.set('objectId',self.convertToStr(add.db_objectId, 'long'))
node.set('parentObjId',self.convertToStr(add.db_parentObjId, 'long'))
node.set('parentObjType',self.convertToStr(add.db_parentObjType, 'str'))
# set elements
data = add.db_data
if data is not None:
if data.vtType == 'module':
childNode = ElementTree.SubElement(node, 'module')
self.getDao('module').toXML(data, childNode)
elif data.vtType == 'location':
childNode = ElementTree.SubElement(node, 'location')
self.getDao('location').toXML(data, childNode)
elif data.vtType == 'annotation':
childNode = ElementTree.SubElement(node, 'annotation')
self.getDao('annotation').toXML(data, childNode)
elif data.vtType == 'function':
childNode = ElementTree.SubElement(node, 'function')
self.getDao('function').toXML(data, childNode)
elif data.vtType == 'connection':
childNode = ElementTree.SubElement(node, 'connection')
self.getDao('connection').toXML(data, childNode)
elif data.vtType == 'port':
childNode = ElementTree.SubElement(node, 'port')
self.getDao('port').toXML(data, childNode)
elif data.vtType == 'parameter':
childNode = ElementTree.SubElement(node, 'parameter')
self.getDao('parameter').toXML(data, childNode)
elif data.vtType == 'portSpec':
childNode = ElementTree.SubElement(node, 'portSpec')
self.getDao('portSpec').toXML(data, childNode)
elif data.vtType == 'abstractionRef':
childNode = ElementTree.SubElement(node, 'abstractionRef')
self.getDao('abstractionRef').toXML(data, childNode)
elif data.vtType == 'other':
childNode = ElementTree.SubElement(node, 'other')
self.getDao('other').toXML(data, childNode)
return node
class DBOtherXMLDAOBase(XMLDAO):
def __init__(self, daoList):
self.daoList = daoList
def getDao(self, dao):
return self.daoList[dao]
def fromXML(self, node):
if node.tag != 'other':
return None
# read attributes
data = node.get('id', None)
id = self.convertFromStr(data, 'long')
data = node.get('key', None)
key = self.convertFromStr(data, 'str')
value = None
# read children
for child in node.getchildren():
if child.tag == 'value':
_data = self.convertFromStr(child.text,'')
value = _data
elif child.text.strip() == '':
pass
else:
print '*** ERROR *** tag = %s' % child.tag
obj = DBOther(id=id,
key=key,
value=value)
obj.is_dirty = False
return obj
def toXML(self, other, node=None):
if node is None:
node = ElementTree.Element('other')
# set attributes
node.set('id',self.convertToStr(other.db_id, 'long'))
node.set('key',self.convertToStr(other.db_key, 'str'))
# set elements
return node
class DBLocationXMLDAOBase(XMLDAO):
def __init__(self, daoList):
self.daoList = daoList
def getDao(self, dao):
return self.daoList[dao]
def fromXML(self, node):
if node.tag != 'location':
return None
# read attributes
data = node.get('id', None)
id = self.convertFromStr(data, 'long')
data = node.get('x', None)
x = self.convertFromStr(data, 'float')
data = node.get('y', None)
y = self.convertFromStr(data, 'float')
obj = DBLocation(id=id,
x=x,
y=y)
obj.is_dirty = False
return obj
def toXML(self, location, node=None):
if node is None:
node = ElementTree.Element('location')
# set attributes
node.set('id',self.convertToStr(location.db_id, 'long'))
node.set('x',self.convertToStr(location.db_x, 'float'))
node.set('y',self.convertToStr(location.db_y, 'float'))
return node
class DBWorkflowExecXMLDAOBase(XMLDAO):
def __init__(self, daoList):
self.daoList = daoList
def getDao(self, dao):
return self.daoList[dao]
def fromXML(self, node):
if node.tag != 'workflowExec':
return None
# read attributes
data = node.get('id', None)
id = self.convertFromStr(data, 'long')
data = node.get('user', None)
user = self.convertFromStr(data, 'str')
data = node.get('ip', None)
ip = self.convertFromStr(data, 'str')
data = node.get('vtVersion', None)
vt_version = self.convertFromStr(data, 'str')
data = node.get('tsStart', None)
ts_start = self.convertFromStr(data, 'datetime')
data = node.get('tsEnd', None)
ts_end = self.convertFromStr(data, 'datetime')
data = node.get('parentId', None)
parent_id = self.convertFromStr(data, 'long')
data = node.get('parentType', None)
parent_type = self.convertFromStr(data, 'str')
data = node.get('parentVersion', None)
parent_version = self.convertFromStr(data, 'long')
data = node.get('name', None)
name = self.convertFromStr(data, 'str')
module_execs = []
# read children
for child in node.getchildren():
if child.tag == 'moduleExec':
_data = self.getDao('module_exec').fromXML(child)
module_execs.append(_data)
elif child.text.strip() == '':
pass
else:
print '*** ERROR *** tag = %s' % child.tag
obj = DBWorkflowExec(id=id,
user=user,
ip=ip,
vt_version=vt_version,
ts_start=ts_start,
ts_end=ts_end,
parent_id=parent_id,
parent_type=parent_type,
parent_version=parent_version,
name=name,
module_execs=module_execs)
obj.is_dirty = False
return obj
def toXML(self, workflow_exec, node=None):
if node is None:
node = ElementTree.Element('workflowExec')
# set attributes
node.set('id',self.convertToStr(workflow_exec.db_id, 'long'))
node.set('user',self.convertToStr(workflow_exec.db_user, 'str'))
node.set('ip',self.convertToStr(workflow_exec.db_ip, 'str'))
node.set('vtVersion',self.convertToStr(workflow_exec.db_vt_version, 'str'))
node.set('tsStart',self.convertToStr(workflow_exec.db_ts_start, 'datetime'))
node.set('tsEnd',self.convertToStr(workflow_exec.db_ts_end, 'datetime'))
node.set('parentId',self.convertToStr(workflow_exec.db_parent_id, 'long'))
node.set('parentType',self.convertToStr(workflow_exec.db_parent_type, 'str'))
node.set('parentVersion',self.convertToStr(workflow_exec.db_parent_version, 'long'))
node.set('name',self.convertToStr(workflow_exec.db_name, 'str'))
# set elements
module_execs = workflow_exec.db_module_execs
for module_exec in module_execs:
childNode = ElementTree.SubElement(node, 'module_exec')
self.getDao('module_exec').toXML(module_exec, childNode)
return node
class DBFunctionXMLDAOBase(XMLDAO):
def __init__(self, daoList):
self.daoList = daoList
def getDao(self, dao):
return self.daoList[dao]
def fromXML(self, node):
if node.tag != 'function':
return None
# read attributes
data = node.get('id', None)
id = self.convertFromStr(data, 'long')
data = node.get('pos', None)
pos = self.convertFromStr(data, 'long')
data = node.get('name', None)
name = self.convertFromStr(data, 'str')
parameters = []
# read children
for child in node.getchildren():
if child.tag == 'parameter':
_data = self.getDao('parameter').fromXML(child)
parameters.append(_data)
elif child.text.strip() == '':
pass
else:
print '*** ERROR *** tag = %s' % child.tag
obj = DBFunction(id=id,
pos=pos,
name=name,
parameters=parameters)
obj.is_dirty = False
return obj
def toXML(self, function, node=None):
if node is None:
node = ElementTree.Element('function')
# set attributes
node.set('id',self.convertToStr(function.db_id, 'long'))
node.set('pos',self.convertToStr(function.db_pos, 'long'))
node.set('name',self.convertToStr(function.db_name, 'str'))
# set elements
parameters = function.db_parameters
for parameter in parameters:
childNode = ElementTree.SubElement(node, 'parameter')
self.getDao('parameter').toXML(parameter, childNode)
return node
class DBAbstractionXMLDAOBase(XMLDAO):
def __init__(self, daoList):
self.daoList = daoList
def getDao(self, dao):
return self.daoList[dao]
def fromXML(self, node):
if node.tag != 'abstraction':
return None
# read attributes
data = node.get('id', None)
id = self.convertFromStr(data, 'long')
data = node.get('name', None)
name = self.convertFromStr(data, 'str')
actions = []
tags = []
# read children
for child in node.getchildren():
if child.tag == 'action':
_data = self.getDao('action').fromXML(child)
actions.append(_data)
elif child.tag == 'tag':
_data = self.getDao('tag').fromXML(child)
tags.append(_data)
elif child.text.strip() == '':
pass
else:
print '*** ERROR *** tag = %s' % child.tag
obj = DBAbstraction(id=id,
name=name,
actions=actions,
tags=tags)
obj.is_dirty = False
return obj
def toXML(self, abstraction, node=None):
if node is None:
node = ElementTree.Element('abstraction')
# set attributes
node.set('id',self.convertToStr(abstraction.db_id, 'long'))
node.set('name',self.convertToStr(abstraction.db_name, 'str'))
# set elements
actions = abstraction.db_actions
for action in actions:
childNode = ElementTree.SubElement(node, 'action')
self.getDao('action').toXML(action, childNode)
tags = abstraction.db_tags
for tag in tags:
childNode = ElementTree.SubElement(node, 'tag')
self.getDao('tag').toXML(tag, childNode)
return node
class DBWorkflowXMLDAOBase(XMLDAO):
def __init__(self, daoList):
self.daoList = daoList
def getDao(self, dao):
return self.daoList[dao]
def fromXML(self, node):
if node.tag != 'workflow':
return None
# read attributes
data = node.get('id', None)
id = self.convertFromStr(data, 'long')
data = node.get('name', None)
name = self.convertFromStr(data, 'str')
connections = []
annotations = []
others = []
modules = []
# read children
for child in node.getchildren():
if child.tag == 'connection':
_data = self.getDao('connection').fromXML(child)
connections.append(_data)
elif child.tag == 'annotation':
_data = self.getDao('annotation').fromXML(child)
annotations.append(_data)
elif child.tag == 'other':
_data = self.getDao('other').fromXML(child)
others.append(_data)
elif child.tag == 'module':
_data = self.getDao('module').fromXML(child)
modules.append(_data)
elif child.tag == 'abstractionRef':
_data = self.getDao('abstractionRef').fromXML(child)
modules.append(_data)
elif child.text.strip() == '':
pass
else:
print '*** ERROR *** tag = %s' % child.tag
obj = DBWorkflow(modules=modules,
id=id,
name=name,
connections=connections,
annotations=annotations,
others=others)
obj.is_dirty = False
return obj
def toXML(self, workflow, node=None):
if node is None:
node = ElementTree.Element('workflow')
# set attributes
node.set('id',self.convertToStr(workflow.db_id, 'long'))
node.set('name',self.convertToStr(workflow.db_name, 'str'))
# set elements
connections = workflow.db_connections
for connection in connections:
childNode = ElementTree.SubElement(node, 'connection')
self.getDao('connection').toXML(connection, childNode)
annotations = workflow.db_annotations
for annotation in annotations:
childNode = ElementTree.SubElement(node, 'annotation')
self.getDao('annotation').toXML(annotation, childNode)
others = workflow.db_others
for other in others:
childNode = ElementTree.SubElement(node, 'other')
self.getDao('other').toXML(other, childNode)
modules = workflow.db_modules
for module in modules:
if module.vtType == 'module':
childNode = ElementTree.SubElement(node, 'module')
self.getDao('module').toXML(module, childNode)
elif module.vtType == 'abstractionRef':
childNode = ElementTree.SubElement(node, 'abstractionRef')
self.getDao('abstractionRef').toXML(module, childNode)
return node
class DBAbstractionRefXMLDAOBase(XMLDAO):
def __init__(self, daoList):
self.daoList = daoList
def getDao(self, dao):
return self.daoList[dao]
def fromXML(self, node):
if node.tag != 'abstractionRef':
return None
# read attributes
data = node.get('id', None)
id = self.convertFromStr(data, 'long')
data = node.get('name', None)
name = self.convertFromStr(data, 'str')
data = node.get('cache', None)
cache = self.convertFromStr(data, 'int')
data = node.get('abstractionId', None)
abstraction_id = self.convertFromStr(data, 'long')
data = node.get('version', None)
version = self.convertFromStr(data, 'long')
location = None
functions = []
annotations = []
# read children
for child in node.getchildren():
if child.tag == 'location':
_data = self.getDao('location').fromXML(child)
location = _data
elif child.tag == 'function':
_data = self.getDao('function').fromXML(child)
functions.append(_data)
elif child.tag == 'annotation':
_data = self.getDao('annotation').fromXML(child)
annotations.append(_data)
elif child.text.strip() == '':
pass
else:
print '*** ERROR *** tag = %s' % child.tag
obj = DBAbstractionRef(id=id,
name=name,
cache=cache,
abstraction_id=abstraction_id,
version=version,
location=location,
functions=functions,
annotations=annotations)
obj.is_dirty = False
return obj
def toXML(self, abstractionRef, node=None):
if node is None:
node = ElementTree.Element('abstractionRef')
# set attributes
node.set('id',self.convertToStr(abstractionRef.db_id, 'long'))
node.set('name',self.convertToStr(abstractionRef.db_name, 'str'))
node.set('cache',self.convertToStr(abstractionRef.db_cache, 'int'))
node.set('abstractionId',self.convertToStr(abstractionRef.db_abstraction_id, 'long'))
node.set('version',self.convertToStr(abstractionRef.db_version, 'long'))
# set elements
location = abstractionRef.db_location
if location is not None:
childNode = ElementTree.SubElement(node, 'location')
self.getDao('location').toXML(location, childNode)
functions = abstractionRef.db_functions
for function in functions:
childNode = ElementTree.SubElement(node, 'function')
self.getDao('function').toXML(function, childNode)
annotations = abstractionRef.db_annotations
for annotation in annotations:
childNode = ElementTree.SubElement(node, 'annotation')
self.getDao('annotation').toXML(annotation, childNode)
return node
class DBAnnotationXMLDAOBase(XMLDAO):
def __init__(self, daoList):
self.daoList = daoList
def getDao(self, dao):
return self.daoList[dao]
def fromXML(self, node):
if node.tag != 'annotation':
return None
# read attributes
data = node.get('id', None)
id = self.convertFromStr(data, 'long')
data = node.get('key', None)
key = self.convertFromStr(data, 'str')
data = node.get('value', None)
value = self.convertFromStr(data, 'str')
obj = DBAnnotation(id=id,
key=key,
value=value)
obj.is_dirty = False
return obj
def toXML(self, annotation, node=None):
if node is None:
node = ElementTree.Element('annotation')
# set attributes
node.set('id',self.convertToStr(annotation.db_id, 'long'))
node.set('key',self.convertToStr(annotation.db_key, 'str'))
node.set('value',self.convertToStr(annotation.db_value, 'str'))
return node
class DBChangeXMLDAOBase(XMLDAO):
def __init__(self, daoList):
self.daoList = daoList
def getDao(self, dao):
return self.daoList[dao]
def fromXML(self, node):
if node.tag != 'change':
return None
# read attributes
data = node.get('id', None)
id = self.convertFromStr(data, 'long')
data = node.get('what', None)
what = self.convertFromStr(data, 'str')
data = node.get('oldObjId', None)
oldObjId = self.convertFromStr(data, 'long')
data = node.get('newObjId', None)
newObjId = self.convertFromStr(data, 'long')
data = node.get('parentObjId', None)
parentObjId = self.convertFromStr(data, 'long')
data = node.get('parentObjType', None)
parentObjType = self.convertFromStr(data, 'str')
data = None
# read children
for child in node.getchildren():
if child.tag == 'module':
_data = self.getDao('module').fromXML(child)
data = _data
elif child.tag == 'location':
_data = self.getDao('location').fromXML(child)
data = _data
elif child.tag == 'annotation':
_data = self.getDao('annotation').fromXML(child)
data = _data
elif child.tag == 'function':
_data = self.getDao('function').fromXML(child)
data = _data
elif child.tag == 'connection':
_data = self.getDao('connection').fromXML(child)
data = _data
elif child.tag == 'port':
_data = self.getDao('port').fromXML(child)
data = _data
elif child.tag == 'parameter':
_data = self.getDao('parameter').fromXML(child)
data = _data
elif child.tag == 'portSpec':
_data = self.getDao('portSpec').fromXML(child)
data = _data
elif child.tag == 'abstractionRef':
_data = self.getDao('abstractionRef').fromXML(child)
data = _data
elif child.tag == 'other':
_data = self.getDao('other').fromXML(child)
data = _data
elif child.text.strip() == '':
pass
else:
print '*** ERROR *** tag = %s' % child.tag
obj = DBChange(data=data,
id=id,
what=what,
oldObjId=oldObjId,
newObjId=newObjId,
parentObjId=parentObjId,
parentObjType=parentObjType)
obj.is_dirty = False
return obj
def toXML(self, change, node=None):
if node is None:
node = ElementTree.Element('change')
# set attributes
node.set('id',self.convertToStr(change.db_id, 'long'))
node.set('what',self.convertToStr(change.db_what, 'str'))
node.set('oldObjId',self.convertToStr(change.db_oldObjId, 'long'))
node.set('newObjId',self.convertToStr(change.db_newObjId, 'long'))
node.set('parentObjId',self.convertToStr(change.db_parentObjId, 'long'))
node.set('parentObjType',self.convertToStr(change.db_parentObjType, 'str'))
# set elements
data = change.db_data
if data is not None:
if data.vtType == 'module':
childNode = ElementTree.SubElement(node, 'module')
self.getDao('module').toXML(data, childNode)
elif data.vtType == 'location':
childNode = ElementTree.SubElement(node, 'location')
self.getDao('location').toXML(data, childNode)
elif data.vtType == 'annotation':
childNode = ElementTree.SubElement(node, 'annotation')
self.getDao('annotation').toXML(data, childNode)
elif data.vtType == 'function':
childNode = ElementTree.SubElement(node, 'function')
self.getDao('function').toXML(data, childNode)
elif data.vtType == 'connection':
childNode = ElementTree.SubElement(node, 'connection')
self.getDao('connection').toXML(data, childNode)
elif data.vtType == 'port':
childNode = ElementTree.SubElement(node, 'port')
self.getDao('port').toXML(data, childNode)
elif data.vtType == 'parameter':
childNode = ElementTree.SubElement(node, 'parameter')
self.getDao('parameter').toXML(data, childNode)
elif data.vtType == 'portSpec':
childNode = ElementTree.SubElement(node, 'portSpec')
self.getDao('portSpec').toXML(data, childNode)
elif data.vtType == 'abstractionRef':
childNode = ElementTree.SubElement(node, 'abstractionRef')
self.getDao('abstractionRef').toXML(data, childNode)
elif data.vtType == 'other':
childNode = ElementTree.SubElement(node, 'other')
self.getDao('other').toXML(data, childNode)
return node
class DBParameterXMLDAOBase(XMLDAO):
def __init__(self, daoList):
self.daoList = daoList
def getDao(self, dao):
return self.daoList[dao]
def fromXML(self, node):
if node.tag != 'parameter':
return None
# read attributes
data = node.get('id', None)
id = self.convertFromStr(data, 'long')
data = node.get('pos', None)
pos = self.convertFromStr(data, 'long')
data = node.get('name', None)
name = self.convertFromStr(data, 'str')
data = node.get('type', None)
type = self.convertFromStr(data, 'str')
data = node.get('val', None)
val = self.convertFromStr(data, 'str')
data = node.get('alias', None)
alias = self.convertFromStr(data, 'str')
obj = DBParameter(id=id,
pos=pos,
name=name,
type=type,
val=val,
alias=alias)
obj.is_dirty = False
return obj
def toXML(self, parameter, node=None):
if node is None:
node = ElementTree.Element('parameter')
# set attributes
node.set('id',self.convertToStr(parameter.db_id, 'long'))
node.set('pos',self.convertToStr(parameter.db_pos, 'long'))
node.set('name',self.convertToStr(parameter.db_name, 'str'))
node.set('type',self.convertToStr(parameter.db_type, 'str'))
node.set('val',self.convertToStr(parameter.db_val, 'str'))
node.set('alias',self.convertToStr(parameter.db_alias, 'str'))
return node
class DBConnectionXMLDAOBase(XMLDAO):
def __init__(self, daoList):
self.daoList = daoList
def getDao(self, dao):
return self.daoList[dao]
def fromXML(self, node):
if node.tag != 'connection':
return None
# read attributes
data = node.get('id', None)
id = self.convertFromStr(data, 'long')
ports = []
# read children
for child in node.getchildren():
if child.tag == 'port':
_data = self.getDao('port').fromXML(child)
ports.append(_data)
elif child.text.strip() == '':
pass
else:
print '*** ERROR *** tag = %s' % child.tag
obj = DBConnection(id=id,
ports=ports)
obj.is_dirty = False
return obj
def toXML(self, connection, node=None):
if node is None:
node = ElementTree.Element('connection')
# set attributes
node.set('id',self.convertToStr(connection.db_id, 'long'))
# set elements
ports = connection.db_ports
for port in ports:
childNode = ElementTree.SubElement(node, 'port')
self.getDao('port').toXML(port, childNode)
return node
class DBActionXMLDAOBase(XMLDAO):
def __init__(self, daoList):
self.daoList = daoList
def getDao(self, dao):
return self.daoList[dao]
def fromXML(self, node):
if node.tag != 'action':
return None
# read attributes
data = node.get('id', None)
id = self.convertFromStr(data, 'long')
data = node.get('prevId', None)
prevId = self.convertFromStr(data, 'long')
data = node.get('date', None)
date = self.convertFromStr(data, 'datetime')
data = node.get('session', None)
session = self.convertFromStr(data, 'str')
data = node.get('user', None)
user = self.convertFromStr(data, 'str')
data = node.get('prune', None)
prune = self.convertFromStr(data, 'int')
annotations = []
operations = []
# read children
for child in node.getchildren():
if child.tag == 'annotation':
_data = self.getDao('annotation').fromXML(child)
annotations.append(_data)
elif child.tag == 'add':
_data = self.getDao('add').fromXML(child)
operations.append(_data)
elif child.tag == 'delete':
_data = self.getDao('delete').fromXML(child)
operations.append(_data)
elif child.tag == 'change':
_data = self.getDao('change').fromXML(child)
operations.append(_data)
elif child.text.strip() == '':
pass
else:
print '*** ERROR *** tag = %s' % child.tag
obj = DBAction(operations=operations,
id=id,
prevId=prevId,
date=date,
session=session,
user=user,
prune=prune,
annotations=annotations)
obj.is_dirty = False
return obj
def toXML(self, action, node=None):
if node is None:
node = ElementTree.Element('action')
# set attributes
node.set('id',self.convertToStr(action.db_id, 'long'))
node.set('prevId',self.convertToStr(action.db_prevId, 'long'))
node.set('date',self.convertToStr(action.db_date, 'datetime'))
node.set('session',self.convertToStr(action.db_session, 'str'))
node.set('user',self.convertToStr(action.db_user, 'str'))
node.set('prune',self.convertToStr(action.db_prune, 'int'))
# set elements
annotations = action.db_annotations
for annotation in annotations:
childNode = ElementTree.SubElement(node, 'annotation')
self.getDao('annotation').toXML(annotation, childNode)
operations = action.db_operations
for operation in operations:
if operation.vtType == 'add':
childNode = ElementTree.SubElement(node, 'add')
self.getDao('add').toXML(operation, childNode)
elif operation.vtType == 'delete':
childNode = ElementTree.SubElement(node, 'delete')
self.getDao('delete').toXML(operation, childNode)
elif operation.vtType == 'change':
childNode = ElementTree.SubElement(node, 'change')
self.getDao('change').toXML(operation, childNode)
return node
class DBDeleteXMLDAOBase(XMLDAO):
def __init__(self, daoList):
self.daoList = daoList
def getDao(self, dao):
return self.daoList[dao]
def fromXML(self, node):
if node.tag != 'delete':
return None
# read attributes
data = node.get('id', None)
id = self.convertFromStr(data, 'long')
data = node.get('what', None)
what = self.convertFromStr(data, 'str')
data = node.get('objectId', None)
objectId = self.convertFromStr(data, 'long')
data = node.get('parentObjId', None)
parentObjId = self.convertFromStr(data, 'long')
data = node.get('parentObjType', None)
parentObjType = self.convertFromStr(data, 'str')
obj = DBDelete(id=id,
what=what,
objectId=objectId,
parentObjId=parentObjId,
parentObjType=parentObjType)
obj.is_dirty = False
return obj
def toXML(self, delete, node=None):
if node is None:
node = ElementTree.Element('delete')
# set attributes
node.set('id',self.convertToStr(delete.db_id, 'long'))
node.set('what',self.convertToStr(delete.db_what, 'str'))
node.set('objectId',self.convertToStr(delete.db_objectId, 'long'))
node.set('parentObjId',self.convertToStr(delete.db_parentObjId, 'long'))
node.set('parentObjType',self.convertToStr(delete.db_parentObjType, 'str'))
return node
class DBVistrailXMLDAOBase(XMLDAO):
def __init__(self, daoList):
self.daoList = daoList
def getDao(self, dao):
return self.daoList[dao]
def fromXML(self, node):
if node.tag != 'vistrail':
return None
# read attributes
data = node.get('id', None)
id = self.convertFromStr(data, 'long')
data = node.get('version', None)
version = self.convertFromStr(data, 'str')
data = node.get('name', None)
name = self.convertFromStr(data, 'str')
data = node.get('dbHost', None)
dbHost = self.convertFromStr(data, 'str')
data = node.get('dbPort', None)
dbPort = self.convertFromStr(data, 'int')
data = node.get('dbName', None)
dbName = self.convertFromStr(data, 'str')
actions = []
tags = []
abstractions = []
# read children
for child in node.getchildren():
if child.tag == 'action':
_data = self.getDao('action').fromXML(child)
actions.append(_data)
elif child.tag == 'tag':
_data = self.getDao('tag').fromXML(child)
tags.append(_data)
elif child.tag == 'abstraction':
_data = self.getDao('abstraction').fromXML(child)
abstractions.append(_data)
elif child.text.strip() == '':
pass
else:
print '*** ERROR *** tag = %s' % child.tag
obj = DBVistrail(id=id,
version=version,
name=name,
dbHost=dbHost,
dbPort=dbPort,
dbName=dbName,
actions=actions,
tags=tags,
abstractions=abstractions)
obj.is_dirty = False
return obj
def toXML(self, vistrail, node=None):
if node is None:
node = ElementTree.Element('vistrail')
# set attributes
node.set('id',self.convertToStr(vistrail.db_id, 'long'))
node.set('version',self.convertToStr(vistrail.db_version, 'str'))
node.set('name',self.convertToStr(vistrail.db_name, 'str'))
node.set('dbHost',self.convertToStr(vistrail.db_dbHost, 'str'))
node.set('dbPort',self.convertToStr(vistrail.db_dbPort, 'int'))
node.set('dbName',self.convertToStr(vistrail.db_dbName, 'str'))
# set elements
actions = vistrail.db_actions
for action in actions:
childNode = ElementTree.SubElement(node, 'action')
self.getDao('action').toXML(action, childNode)
tags = vistrail.db_tags
for tag in tags:
childNode = ElementTree.SubElement(node, 'tag')
self.getDao('tag').toXML(tag, childNode)
abstractions = vistrail.db_abstractions
for abstraction in abstractions:
childNode = ElementTree.SubElement(node, 'abstraction')
self.getDao('abstraction').toXML(abstraction, childNode)
return node
class DBModuleExecXMLDAOBase(XMLDAO):
def __init__(self, daoList):
self.daoList = daoList
def getDao(self, dao):
return self.daoList[dao]
def fromXML(self, node):
if node.tag != 'moduleExec':
return None
# read attributes
data = node.get('id', None)
id = self.convertFromStr(data, 'long')
data = node.get('tsStart', None)
ts_start = self.convertFromStr(data, 'datetime')
data = node.get('tsEnd', None)
ts_end = self.convertFromStr(data, 'datetime')
data = node.get('moduleId', None)
module_id = self.convertFromStr(data, 'long')
data = node.get('moduleName', None)
module_name = self.convertFromStr(data, 'str')
annotations = []
# read children
for child in node.getchildren():
if child.tag == 'annotation':
_data = self.getDao('annotation').fromXML(child)
annotations.append(_data)
elif child.text.strip() == '':
pass
else:
print '*** ERROR *** tag = %s' % child.tag
obj = DBModuleExec(id=id,
ts_start=ts_start,
ts_end=ts_end,
module_id=module_id,
module_name=module_name,
annotations=annotations)
obj.is_dirty = False
return obj
def toXML(self, module_exec, node=None):
if node is None:
node = ElementTree.Element('moduleExec')
# set attributes
node.set('id',self.convertToStr(module_exec.db_id, 'long'))
node.set('tsStart',self.convertToStr(module_exec.db_ts_start, 'datetime'))
node.set('tsEnd',self.convertToStr(module_exec.db_ts_end, 'datetime'))
node.set('moduleId',self.convertToStr(module_exec.db_module_id, 'long'))
node.set('moduleName',self.convertToStr(module_exec.db_module_name, 'str'))
# set elements
annotations = module_exec.db_annotations
for annotation in annotations:
childNode = ElementTree.SubElement(node, 'annotation')
self.getDao('annotation').toXML(annotation, childNode)
return node
"""generated automatically by auto_dao.py"""
class XMLDAOListBase(dict):
def __init__(self, daos=None):
if daos is not None:
dict.update(self, daos)
if 'portSpec' not in self:
self['portSpec'] = DBPortSpecXMLDAOBase(self)
if 'module' not in self:
self['module'] = DBModuleXMLDAOBase(self)
if 'tag' not in self:
self['tag'] = DBTagXMLDAOBase(self)
if 'port' not in self:
self['port'] = DBPortXMLDAOBase(self)
if 'log' not in self:
self['log'] = DBLogXMLDAOBase(self)
if 'machine' not in self:
self['machine'] = DBMachineXMLDAOBase(self)
if 'add' not in self:
self['add'] = DBAddXMLDAOBase(self)
if 'other' not in self:
self['other'] = DBOtherXMLDAOBase(self)
if 'location' not in self:
self['location'] = DBLocationXMLDAOBase(self)
if 'workflow_exec' not in self:
self['workflow_exec'] = DBWorkflowExecXMLDAOBase(self)
if 'function' not in self:
self['function'] = DBFunctionXMLDAOBase(self)
if 'abstraction' not in self:
self['abstraction'] = DBAbstractionXMLDAOBase(self)
if 'workflow' not in self:
self['workflow'] = DBWorkflowXMLDAOBase(self)
if 'abstractionRef' not in self:
self['abstractionRef'] = DBAbstractionRefXMLDAOBase(self)
if 'annotation' not in self:
self['annotation'] = DBAnnotationXMLDAOBase(self)
if 'change' not in self:
self['change'] = DBChangeXMLDAOBase(self)
if 'parameter' not in self:
self['parameter'] = DBParameterXMLDAOBase(self)
if 'connection' not in self:
self['connection'] = DBConnectionXMLDAOBase(self)
if 'action' not in self:
self['action'] = DBActionXMLDAOBase(self)
if 'delete' not in self:
self['delete'] = DBDeleteXMLDAOBase(self)
if 'vistrail' not in self:
self['vistrail'] = DBVistrailXMLDAOBase(self)
if 'module_exec' not in self:
self['module_exec'] = DBModuleExecXMLDAOBase(self)
| 36.474543 | 93 | 0.546824 | ## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the University of Utah nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
"""generated automatically by auto_dao.py"""
from vistrails.core.system import get_elementtree_library
from xml_dao import XMLDAO
from vistrails.db.versions.v0_8_0.domain import *
ElementTree = get_elementtree_library()
class DBPortSpecXMLDAOBase(XMLDAO):
def __init__(self, daoList):
self.daoList = daoList
def getDao(self, dao):
return self.daoList[dao]
def fromXML(self, node):
if node.tag != 'portSpec':
return None
data = node.get('id', None)
id = self.convertFromStr(data, 'long')
data = node.get('name', None)
name = self.convertFromStr(data, 'str')
data = node.get('type', None)
type = self.convertFromStr(data, 'str')
data = node.get('spec', None)
spec = self.convertFromStr(data, 'str')
obj = DBPortSpec(id=id,
name=name,
type=type,
spec=spec)
obj.is_dirty = False
return obj
def toXML(self, portSpec, node=None):
if node is None:
node = ElementTree.Element('portSpec')
node.set('id',self.convertToStr(portSpec.db_id, 'long'))
node.set('name',self.convertToStr(portSpec.db_name, 'str'))
node.set('type',self.convertToStr(portSpec.db_type, 'str'))
node.set('spec',self.convertToStr(portSpec.db_spec, 'str'))
return node
class DBModuleXMLDAOBase(XMLDAO):
def __init__(self, daoList):
self.daoList = daoList
def getDao(self, dao):
return self.daoList[dao]
def fromXML(self, node):
if node.tag != 'module':
return None
data = node.get('id', None)
id = self.convertFromStr(data, 'long')
data = node.get('cache', None)
cache = self.convertFromStr(data, 'int')
data = node.get('name', None)
name = self.convertFromStr(data, 'str')
data = node.get('package', None)
package = self.convertFromStr(data, 'str')
data = node.get('version', None)
version = self.convertFromStr(data, 'str')
location = None
functions = []
annotations = []
portSpecs = []
for child in node.getchildren():
if child.tag == 'location':
_data = self.getDao('location').fromXML(child)
location = _data
elif child.tag == 'function':
_data = self.getDao('function').fromXML(child)
functions.append(_data)
elif child.tag == 'annotation':
_data = self.getDao('annotation').fromXML(child)
annotations.append(_data)
elif child.tag == 'portSpec':
_data = self.getDao('portSpec').fromXML(child)
portSpecs.append(_data)
elif child.text.strip() == '':
pass
else:
print '*** ERROR *** tag = %s' % child.tag
obj = DBModule(id=id,
cache=cache,
name=name,
package=package,
version=version,
location=location,
functions=functions,
annotations=annotations,
portSpecs=portSpecs)
obj.is_dirty = False
return obj
def toXML(self, module, node=None):
if node is None:
node = ElementTree.Element('module')
node.set('id',self.convertToStr(module.db_id, 'long'))
node.set('cache',self.convertToStr(module.db_cache, 'int'))
node.set('name',self.convertToStr(module.db_name, 'str'))
node.set('package',self.convertToStr(module.db_package, 'str'))
node.set('version',self.convertToStr(module.db_version, 'str'))
location = module.db_location
if location is not None:
childNode = ElementTree.SubElement(node, 'location')
self.getDao('location').toXML(location, childNode)
functions = module.db_functions
for function in functions:
childNode = ElementTree.SubElement(node, 'function')
self.getDao('function').toXML(function, childNode)
annotations = module.db_annotations
for annotation in annotations:
childNode = ElementTree.SubElement(node, 'annotation')
self.getDao('annotation').toXML(annotation, childNode)
portSpecs = module.db_portSpecs
for portSpec in portSpecs:
childNode = ElementTree.SubElement(node, 'portSpec')
self.getDao('portSpec').toXML(portSpec, childNode)
return node
class DBTagXMLDAOBase(XMLDAO):
def __init__(self, daoList):
self.daoList = daoList
def getDao(self, dao):
return self.daoList[dao]
def fromXML(self, node):
if node.tag != 'tag':
return None
data = node.get('id', None)
id = self.convertFromStr(data, 'long')
data = node.get('name', None)
name = self.convertFromStr(data, 'str')
obj = DBTag(id=id,
name=name)
obj.is_dirty = False
return obj
def toXML(self, tag, node=None):
if node is None:
node = ElementTree.Element('tag')
node.set('id',self.convertToStr(tag.db_id, 'long'))
node.set('name',self.convertToStr(tag.db_name, 'str'))
return node
class DBPortXMLDAOBase(XMLDAO):
def __init__(self, daoList):
self.daoList = daoList
def getDao(self, dao):
return self.daoList[dao]
def fromXML(self, node):
if node.tag != 'port':
return None
data = node.get('id', None)
id = self.convertFromStr(data, 'long')
data = node.get('type', None)
type = self.convertFromStr(data, 'str')
data = node.get('moduleId', None)
moduleId = self.convertFromStr(data, 'long')
data = node.get('moduleName', None)
moduleName = self.convertFromStr(data, 'str')
data = node.get('name', None)
name = self.convertFromStr(data, 'str')
data = node.get('spec', None)
spec = self.convertFromStr(data, 'str')
obj = DBPort(id=id,
type=type,
moduleId=moduleId,
moduleName=moduleName,
name=name,
spec=spec)
obj.is_dirty = False
return obj
def toXML(self, port, node=None):
if node is None:
node = ElementTree.Element('port')
node.set('id',self.convertToStr(port.db_id, 'long'))
node.set('type',self.convertToStr(port.db_type, 'str'))
node.set('moduleId',self.convertToStr(port.db_moduleId, 'long'))
node.set('moduleName',self.convertToStr(port.db_moduleName, 'str'))
node.set('name',self.convertToStr(port.db_name, 'str'))
node.set('spec',self.convertToStr(port.db_spec, 'str'))
return node
class DBLogXMLDAOBase(XMLDAO):
def __init__(self, daoList):
self.daoList = daoList
def getDao(self, dao):
return self.daoList[dao]
def fromXML(self, node):
if node.tag != 'log':
return None
data = node.get('id', None)
id = self.convertFromStr(data, 'long')
workflow_execs = []
machines = []
for child in node.getchildren():
if child.tag == 'workflowExec':
_data = self.getDao('workflow_exec').fromXML(child)
workflow_execs.append(_data)
elif child.tag == 'machine':
_data = self.getDao('machine').fromXML(child)
machines.append(_data)
elif child.text.strip() == '':
pass
else:
print '*** ERROR *** tag = %s' % child.tag
obj = DBLog(id=id,
workflow_execs=workflow_execs,
machines=machines)
obj.is_dirty = False
return obj
def toXML(self, log, node=None):
if node is None:
node = ElementTree.Element('log')
node.set('id',self.convertToStr(log.db_id, 'long'))
workflow_execs = log.db_workflow_execs
for workflow_exec in workflow_execs:
childNode = ElementTree.SubElement(node, 'workflow_exec')
self.getDao('workflow_exec').toXML(workflow_exec, childNode)
machines = log.db_machines
for machine in machines:
childNode = ElementTree.SubElement(node, 'machine')
self.getDao('machine').toXML(machine, childNode)
return node
class DBMachineXMLDAOBase(XMLDAO):
def __init__(self, daoList):
self.daoList = daoList
def getDao(self, dao):
return self.daoList[dao]
def fromXML(self, node):
if node.tag != 'machine':
return None
data = node.get('id', None)
id = self.convertFromStr(data, 'long')
data = node.get('name', None)
name = self.convertFromStr(data, 'str')
data = node.get('os', None)
os = self.convertFromStr(data, 'str')
data = node.get('architecture', None)
architecture = self.convertFromStr(data, 'str')
data = node.get('processor', None)
processor = self.convertFromStr(data, 'str')
data = node.get('ram', None)
ram = self.convertFromStr(data, 'int')
obj = DBMachine(id=id,
name=name,
os=os,
architecture=architecture,
processor=processor,
ram=ram)
obj.is_dirty = False
return obj
def toXML(self, machine, node=None):
if node is None:
node = ElementTree.Element('machine')
node.set('id',self.convertToStr(machine.db_id, 'long'))
node.set('name',self.convertToStr(machine.db_name, 'str'))
node.set('os',self.convertToStr(machine.db_os, 'str'))
node.set('architecture',self.convertToStr(machine.db_architecture, 'str'))
node.set('processor',self.convertToStr(machine.db_processor, 'str'))
node.set('ram',self.convertToStr(machine.db_ram, 'int'))
return node
class DBAddXMLDAOBase(XMLDAO):
def __init__(self, daoList):
self.daoList = daoList
def getDao(self, dao):
return self.daoList[dao]
def fromXML(self, node):
if node.tag != 'add':
return None
data = node.get('id', None)
id = self.convertFromStr(data, 'long')
data = node.get('what', None)
what = self.convertFromStr(data, 'str')
data = node.get('objectId', None)
objectId = self.convertFromStr(data, 'long')
data = node.get('parentObjId', None)
parentObjId = self.convertFromStr(data, 'long')
data = node.get('parentObjType', None)
parentObjType = self.convertFromStr(data, 'str')
data = None
for child in node.getchildren():
if child.tag == 'module':
_data = self.getDao('module').fromXML(child)
data = _data
elif child.tag == 'location':
_data = self.getDao('location').fromXML(child)
data = _data
elif child.tag == 'annotation':
_data = self.getDao('annotation').fromXML(child)
data = _data
elif child.tag == 'function':
_data = self.getDao('function').fromXML(child)
data = _data
elif child.tag == 'connection':
_data = self.getDao('connection').fromXML(child)
data = _data
elif child.tag == 'port':
_data = self.getDao('port').fromXML(child)
data = _data
elif child.tag == 'parameter':
_data = self.getDao('parameter').fromXML(child)
data = _data
elif child.tag == 'portSpec':
_data = self.getDao('portSpec').fromXML(child)
data = _data
elif child.tag == 'abstractionRef':
_data = self.getDao('abstractionRef').fromXML(child)
data = _data
elif child.tag == 'other':
_data = self.getDao('other').fromXML(child)
data = _data
elif child.text.strip() == '':
pass
else:
print '*** ERROR *** tag = %s' % child.tag
obj = DBAdd(data=data,
id=id,
what=what,
objectId=objectId,
parentObjId=parentObjId,
parentObjType=parentObjType)
obj.is_dirty = False
return obj
def toXML(self, add, node=None):
if node is None:
node = ElementTree.Element('add')
node.set('id',self.convertToStr(add.db_id, 'long'))
node.set('what',self.convertToStr(add.db_what, 'str'))
node.set('objectId',self.convertToStr(add.db_objectId, 'long'))
node.set('parentObjId',self.convertToStr(add.db_parentObjId, 'long'))
node.set('parentObjType',self.convertToStr(add.db_parentObjType, 'str'))
data = add.db_data
if data is not None:
if data.vtType == 'module':
childNode = ElementTree.SubElement(node, 'module')
self.getDao('module').toXML(data, childNode)
elif data.vtType == 'location':
childNode = ElementTree.SubElement(node, 'location')
self.getDao('location').toXML(data, childNode)
elif data.vtType == 'annotation':
childNode = ElementTree.SubElement(node, 'annotation')
self.getDao('annotation').toXML(data, childNode)
elif data.vtType == 'function':
childNode = ElementTree.SubElement(node, 'function')
self.getDao('function').toXML(data, childNode)
elif data.vtType == 'connection':
childNode = ElementTree.SubElement(node, 'connection')
self.getDao('connection').toXML(data, childNode)
elif data.vtType == 'port':
childNode = ElementTree.SubElement(node, 'port')
self.getDao('port').toXML(data, childNode)
elif data.vtType == 'parameter':
childNode = ElementTree.SubElement(node, 'parameter')
self.getDao('parameter').toXML(data, childNode)
elif data.vtType == 'portSpec':
childNode = ElementTree.SubElement(node, 'portSpec')
self.getDao('portSpec').toXML(data, childNode)
elif data.vtType == 'abstractionRef':
childNode = ElementTree.SubElement(node, 'abstractionRef')
self.getDao('abstractionRef').toXML(data, childNode)
elif data.vtType == 'other':
childNode = ElementTree.SubElement(node, 'other')
self.getDao('other').toXML(data, childNode)
return node
class DBOtherXMLDAOBase(XMLDAO):
def __init__(self, daoList):
self.daoList = daoList
def getDao(self, dao):
return self.daoList[dao]
def fromXML(self, node):
if node.tag != 'other':
return None
data = node.get('id', None)
id = self.convertFromStr(data, 'long')
data = node.get('key', None)
key = self.convertFromStr(data, 'str')
value = None
for child in node.getchildren():
if child.tag == 'value':
_data = self.convertFromStr(child.text,'')
value = _data
elif child.text.strip() == '':
pass
else:
print '*** ERROR *** tag = %s' % child.tag
obj = DBOther(id=id,
key=key,
value=value)
obj.is_dirty = False
return obj
def toXML(self, other, node=None):
if node is None:
node = ElementTree.Element('other')
node.set('id',self.convertToStr(other.db_id, 'long'))
node.set('key',self.convertToStr(other.db_key, 'str'))
return node
class DBLocationXMLDAOBase(XMLDAO):
def __init__(self, daoList):
self.daoList = daoList
def getDao(self, dao):
return self.daoList[dao]
def fromXML(self, node):
if node.tag != 'location':
return None
data = node.get('id', None)
id = self.convertFromStr(data, 'long')
data = node.get('x', None)
x = self.convertFromStr(data, 'float')
data = node.get('y', None)
y = self.convertFromStr(data, 'float')
obj = DBLocation(id=id,
x=x,
y=y)
obj.is_dirty = False
return obj
def toXML(self, location, node=None):
if node is None:
node = ElementTree.Element('location')
node.set('id',self.convertToStr(location.db_id, 'long'))
node.set('x',self.convertToStr(location.db_x, 'float'))
node.set('y',self.convertToStr(location.db_y, 'float'))
return node
class DBWorkflowExecXMLDAOBase(XMLDAO):
def __init__(self, daoList):
self.daoList = daoList
def getDao(self, dao):
return self.daoList[dao]
def fromXML(self, node):
if node.tag != 'workflowExec':
return None
data = node.get('id', None)
id = self.convertFromStr(data, 'long')
data = node.get('user', None)
user = self.convertFromStr(data, 'str')
data = node.get('ip', None)
ip = self.convertFromStr(data, 'str')
data = node.get('vtVersion', None)
vt_version = self.convertFromStr(data, 'str')
data = node.get('tsStart', None)
ts_start = self.convertFromStr(data, 'datetime')
data = node.get('tsEnd', None)
ts_end = self.convertFromStr(data, 'datetime')
data = node.get('parentId', None)
parent_id = self.convertFromStr(data, 'long')
data = node.get('parentType', None)
parent_type = self.convertFromStr(data, 'str')
data = node.get('parentVersion', None)
parent_version = self.convertFromStr(data, 'long')
data = node.get('name', None)
name = self.convertFromStr(data, 'str')
module_execs = []
for child in node.getchildren():
if child.tag == 'moduleExec':
_data = self.getDao('module_exec').fromXML(child)
module_execs.append(_data)
elif child.text.strip() == '':
pass
else:
print '*** ERROR *** tag = %s' % child.tag
obj = DBWorkflowExec(id=id,
user=user,
ip=ip,
vt_version=vt_version,
ts_start=ts_start,
ts_end=ts_end,
parent_id=parent_id,
parent_type=parent_type,
parent_version=parent_version,
name=name,
module_execs=module_execs)
obj.is_dirty = False
return obj
def toXML(self, workflow_exec, node=None):
if node is None:
node = ElementTree.Element('workflowExec')
node.set('id',self.convertToStr(workflow_exec.db_id, 'long'))
node.set('user',self.convertToStr(workflow_exec.db_user, 'str'))
node.set('ip',self.convertToStr(workflow_exec.db_ip, 'str'))
node.set('vtVersion',self.convertToStr(workflow_exec.db_vt_version, 'str'))
node.set('tsStart',self.convertToStr(workflow_exec.db_ts_start, 'datetime'))
node.set('tsEnd',self.convertToStr(workflow_exec.db_ts_end, 'datetime'))
node.set('parentId',self.convertToStr(workflow_exec.db_parent_id, 'long'))
node.set('parentType',self.convertToStr(workflow_exec.db_parent_type, 'str'))
node.set('parentVersion',self.convertToStr(workflow_exec.db_parent_version, 'long'))
node.set('name',self.convertToStr(workflow_exec.db_name, 'str'))
module_execs = workflow_exec.db_module_execs
for module_exec in module_execs:
childNode = ElementTree.SubElement(node, 'module_exec')
self.getDao('module_exec').toXML(module_exec, childNode)
return node
class DBFunctionXMLDAOBase(XMLDAO):
def __init__(self, daoList):
self.daoList = daoList
def getDao(self, dao):
return self.daoList[dao]
def fromXML(self, node):
if node.tag != 'function':
return None
data = node.get('id', None)
id = self.convertFromStr(data, 'long')
data = node.get('pos', None)
pos = self.convertFromStr(data, 'long')
data = node.get('name', None)
name = self.convertFromStr(data, 'str')
parameters = []
for child in node.getchildren():
if child.tag == 'parameter':
_data = self.getDao('parameter').fromXML(child)
parameters.append(_data)
elif child.text.strip() == '':
pass
else:
print '*** ERROR *** tag = %s' % child.tag
obj = DBFunction(id=id,
pos=pos,
name=name,
parameters=parameters)
obj.is_dirty = False
return obj
def toXML(self, function, node=None):
if node is None:
node = ElementTree.Element('function')
node.set('id',self.convertToStr(function.db_id, 'long'))
node.set('pos',self.convertToStr(function.db_pos, 'long'))
node.set('name',self.convertToStr(function.db_name, 'str'))
parameters = function.db_parameters
for parameter in parameters:
childNode = ElementTree.SubElement(node, 'parameter')
self.getDao('parameter').toXML(parameter, childNode)
return node
class DBAbstractionXMLDAOBase(XMLDAO):
def __init__(self, daoList):
self.daoList = daoList
def getDao(self, dao):
return self.daoList[dao]
def fromXML(self, node):
if node.tag != 'abstraction':
return None
data = node.get('id', None)
id = self.convertFromStr(data, 'long')
data = node.get('name', None)
name = self.convertFromStr(data, 'str')
actions = []
tags = []
for child in node.getchildren():
if child.tag == 'action':
_data = self.getDao('action').fromXML(child)
actions.append(_data)
elif child.tag == 'tag':
_data = self.getDao('tag').fromXML(child)
tags.append(_data)
elif child.text.strip() == '':
pass
else:
print '*** ERROR *** tag = %s' % child.tag
obj = DBAbstraction(id=id,
name=name,
actions=actions,
tags=tags)
obj.is_dirty = False
return obj
def toXML(self, abstraction, node=None):
if node is None:
node = ElementTree.Element('abstraction')
node.set('id',self.convertToStr(abstraction.db_id, 'long'))
node.set('name',self.convertToStr(abstraction.db_name, 'str'))
actions = abstraction.db_actions
for action in actions:
childNode = ElementTree.SubElement(node, 'action')
self.getDao('action').toXML(action, childNode)
tags = abstraction.db_tags
for tag in tags:
childNode = ElementTree.SubElement(node, 'tag')
self.getDao('tag').toXML(tag, childNode)
return node
class DBWorkflowXMLDAOBase(XMLDAO):
def __init__(self, daoList):
self.daoList = daoList
def getDao(self, dao):
return self.daoList[dao]
def fromXML(self, node):
if node.tag != 'workflow':
return None
data = node.get('id', None)
id = self.convertFromStr(data, 'long')
data = node.get('name', None)
name = self.convertFromStr(data, 'str')
connections = []
annotations = []
others = []
modules = []
for child in node.getchildren():
if child.tag == 'connection':
_data = self.getDao('connection').fromXML(child)
connections.append(_data)
elif child.tag == 'annotation':
_data = self.getDao('annotation').fromXML(child)
annotations.append(_data)
elif child.tag == 'other':
_data = self.getDao('other').fromXML(child)
others.append(_data)
elif child.tag == 'module':
_data = self.getDao('module').fromXML(child)
modules.append(_data)
elif child.tag == 'abstractionRef':
_data = self.getDao('abstractionRef').fromXML(child)
modules.append(_data)
elif child.text.strip() == '':
pass
else:
print '*** ERROR *** tag = %s' % child.tag
obj = DBWorkflow(modules=modules,
id=id,
name=name,
connections=connections,
annotations=annotations,
others=others)
obj.is_dirty = False
return obj
def toXML(self, workflow, node=None):
if node is None:
node = ElementTree.Element('workflow')
node.set('id',self.convertToStr(workflow.db_id, 'long'))
node.set('name',self.convertToStr(workflow.db_name, 'str'))
connections = workflow.db_connections
for connection in connections:
childNode = ElementTree.SubElement(node, 'connection')
self.getDao('connection').toXML(connection, childNode)
annotations = workflow.db_annotations
for annotation in annotations:
childNode = ElementTree.SubElement(node, 'annotation')
self.getDao('annotation').toXML(annotation, childNode)
others = workflow.db_others
for other in others:
childNode = ElementTree.SubElement(node, 'other')
self.getDao('other').toXML(other, childNode)
modules = workflow.db_modules
for module in modules:
if module.vtType == 'module':
childNode = ElementTree.SubElement(node, 'module')
self.getDao('module').toXML(module, childNode)
elif module.vtType == 'abstractionRef':
childNode = ElementTree.SubElement(node, 'abstractionRef')
self.getDao('abstractionRef').toXML(module, childNode)
return node
class DBAbstractionRefXMLDAOBase(XMLDAO):
def __init__(self, daoList):
self.daoList = daoList
def getDao(self, dao):
return self.daoList[dao]
def fromXML(self, node):
if node.tag != 'abstractionRef':
return None
data = node.get('id', None)
id = self.convertFromStr(data, 'long')
data = node.get('name', None)
name = self.convertFromStr(data, 'str')
data = node.get('cache', None)
cache = self.convertFromStr(data, 'int')
data = node.get('abstractionId', None)
abstraction_id = self.convertFromStr(data, 'long')
data = node.get('version', None)
version = self.convertFromStr(data, 'long')
location = None
functions = []
annotations = []
for child in node.getchildren():
if child.tag == 'location':
_data = self.getDao('location').fromXML(child)
location = _data
elif child.tag == 'function':
_data = self.getDao('function').fromXML(child)
functions.append(_data)
elif child.tag == 'annotation':
_data = self.getDao('annotation').fromXML(child)
annotations.append(_data)
elif child.text.strip() == '':
pass
else:
print '*** ERROR *** tag = %s' % child.tag
obj = DBAbstractionRef(id=id,
name=name,
cache=cache,
abstraction_id=abstraction_id,
version=version,
location=location,
functions=functions,
annotations=annotations)
obj.is_dirty = False
return obj
def toXML(self, abstractionRef, node=None):
if node is None:
node = ElementTree.Element('abstractionRef')
node.set('id',self.convertToStr(abstractionRef.db_id, 'long'))
node.set('name',self.convertToStr(abstractionRef.db_name, 'str'))
node.set('cache',self.convertToStr(abstractionRef.db_cache, 'int'))
node.set('abstractionId',self.convertToStr(abstractionRef.db_abstraction_id, 'long'))
node.set('version',self.convertToStr(abstractionRef.db_version, 'long'))
location = abstractionRef.db_location
if location is not None:
childNode = ElementTree.SubElement(node, 'location')
self.getDao('location').toXML(location, childNode)
functions = abstractionRef.db_functions
for function in functions:
childNode = ElementTree.SubElement(node, 'function')
self.getDao('function').toXML(function, childNode)
annotations = abstractionRef.db_annotations
for annotation in annotations:
childNode = ElementTree.SubElement(node, 'annotation')
self.getDao('annotation').toXML(annotation, childNode)
return node
class DBAnnotationXMLDAOBase(XMLDAO):
def __init__(self, daoList):
self.daoList = daoList
def getDao(self, dao):
return self.daoList[dao]
def fromXML(self, node):
if node.tag != 'annotation':
return None
data = node.get('id', None)
id = self.convertFromStr(data, 'long')
data = node.get('key', None)
key = self.convertFromStr(data, 'str')
data = node.get('value', None)
value = self.convertFromStr(data, 'str')
obj = DBAnnotation(id=id,
key=key,
value=value)
obj.is_dirty = False
return obj
def toXML(self, annotation, node=None):
if node is None:
node = ElementTree.Element('annotation')
node.set('id',self.convertToStr(annotation.db_id, 'long'))
node.set('key',self.convertToStr(annotation.db_key, 'str'))
node.set('value',self.convertToStr(annotation.db_value, 'str'))
return node
class DBChangeXMLDAOBase(XMLDAO):
def __init__(self, daoList):
self.daoList = daoList
def getDao(self, dao):
return self.daoList[dao]
def fromXML(self, node):
if node.tag != 'change':
return None
data = node.get('id', None)
id = self.convertFromStr(data, 'long')
data = node.get('what', None)
what = self.convertFromStr(data, 'str')
data = node.get('oldObjId', None)
oldObjId = self.convertFromStr(data, 'long')
data = node.get('newObjId', None)
newObjId = self.convertFromStr(data, 'long')
data = node.get('parentObjId', None)
parentObjId = self.convertFromStr(data, 'long')
data = node.get('parentObjType', None)
parentObjType = self.convertFromStr(data, 'str')
data = None
for child in node.getchildren():
if child.tag == 'module':
_data = self.getDao('module').fromXML(child)
data = _data
elif child.tag == 'location':
_data = self.getDao('location').fromXML(child)
data = _data
elif child.tag == 'annotation':
_data = self.getDao('annotation').fromXML(child)
data = _data
elif child.tag == 'function':
_data = self.getDao('function').fromXML(child)
data = _data
elif child.tag == 'connection':
_data = self.getDao('connection').fromXML(child)
data = _data
elif child.tag == 'port':
_data = self.getDao('port').fromXML(child)
data = _data
elif child.tag == 'parameter':
_data = self.getDao('parameter').fromXML(child)
data = _data
elif child.tag == 'portSpec':
_data = self.getDao('portSpec').fromXML(child)
data = _data
elif child.tag == 'abstractionRef':
_data = self.getDao('abstractionRef').fromXML(child)
data = _data
elif child.tag == 'other':
_data = self.getDao('other').fromXML(child)
data = _data
elif child.text.strip() == '':
pass
else:
print '*** ERROR *** tag = %s' % child.tag
obj = DBChange(data=data,
id=id,
what=what,
oldObjId=oldObjId,
newObjId=newObjId,
parentObjId=parentObjId,
parentObjType=parentObjType)
obj.is_dirty = False
return obj
def toXML(self, change, node=None):
if node is None:
node = ElementTree.Element('change')
node.set('id',self.convertToStr(change.db_id, 'long'))
node.set('what',self.convertToStr(change.db_what, 'str'))
node.set('oldObjId',self.convertToStr(change.db_oldObjId, 'long'))
node.set('newObjId',self.convertToStr(change.db_newObjId, 'long'))
node.set('parentObjId',self.convertToStr(change.db_parentObjId, 'long'))
node.set('parentObjType',self.convertToStr(change.db_parentObjType, 'str'))
data = change.db_data
if data is not None:
if data.vtType == 'module':
childNode = ElementTree.SubElement(node, 'module')
self.getDao('module').toXML(data, childNode)
elif data.vtType == 'location':
childNode = ElementTree.SubElement(node, 'location')
self.getDao('location').toXML(data, childNode)
elif data.vtType == 'annotation':
childNode = ElementTree.SubElement(node, 'annotation')
self.getDao('annotation').toXML(data, childNode)
elif data.vtType == 'function':
childNode = ElementTree.SubElement(node, 'function')
self.getDao('function').toXML(data, childNode)
elif data.vtType == 'connection':
childNode = ElementTree.SubElement(node, 'connection')
self.getDao('connection').toXML(data, childNode)
elif data.vtType == 'port':
childNode = ElementTree.SubElement(node, 'port')
self.getDao('port').toXML(data, childNode)
elif data.vtType == 'parameter':
childNode = ElementTree.SubElement(node, 'parameter')
self.getDao('parameter').toXML(data, childNode)
elif data.vtType == 'portSpec':
childNode = ElementTree.SubElement(node, 'portSpec')
self.getDao('portSpec').toXML(data, childNode)
elif data.vtType == 'abstractionRef':
childNode = ElementTree.SubElement(node, 'abstractionRef')
self.getDao('abstractionRef').toXML(data, childNode)
elif data.vtType == 'other':
childNode = ElementTree.SubElement(node, 'other')
self.getDao('other').toXML(data, childNode)
return node
class DBParameterXMLDAOBase(XMLDAO):
def __init__(self, daoList):
self.daoList = daoList
def getDao(self, dao):
return self.daoList[dao]
def fromXML(self, node):
if node.tag != 'parameter':
return None
data = node.get('id', None)
id = self.convertFromStr(data, 'long')
data = node.get('pos', None)
pos = self.convertFromStr(data, 'long')
data = node.get('name', None)
name = self.convertFromStr(data, 'str')
data = node.get('type', None)
type = self.convertFromStr(data, 'str')
data = node.get('val', None)
val = self.convertFromStr(data, 'str')
data = node.get('alias', None)
alias = self.convertFromStr(data, 'str')
obj = DBParameter(id=id,
pos=pos,
name=name,
type=type,
val=val,
alias=alias)
obj.is_dirty = False
return obj
def toXML(self, parameter, node=None):
if node is None:
node = ElementTree.Element('parameter')
node.set('id',self.convertToStr(parameter.db_id, 'long'))
node.set('pos',self.convertToStr(parameter.db_pos, 'long'))
node.set('name',self.convertToStr(parameter.db_name, 'str'))
node.set('type',self.convertToStr(parameter.db_type, 'str'))
node.set('val',self.convertToStr(parameter.db_val, 'str'))
node.set('alias',self.convertToStr(parameter.db_alias, 'str'))
return node
class DBConnectionXMLDAOBase(XMLDAO):
def __init__(self, daoList):
self.daoList = daoList
def getDao(self, dao):
return self.daoList[dao]
def fromXML(self, node):
if node.tag != 'connection':
return None
data = node.get('id', None)
id = self.convertFromStr(data, 'long')
ports = []
for child in node.getchildren():
if child.tag == 'port':
_data = self.getDao('port').fromXML(child)
ports.append(_data)
elif child.text.strip() == '':
pass
else:
print '*** ERROR *** tag = %s' % child.tag
obj = DBConnection(id=id,
ports=ports)
obj.is_dirty = False
return obj
def toXML(self, connection, node=None):
if node is None:
node = ElementTree.Element('connection')
node.set('id',self.convertToStr(connection.db_id, 'long'))
ports = connection.db_ports
for port in ports:
childNode = ElementTree.SubElement(node, 'port')
self.getDao('port').toXML(port, childNode)
return node
class DBActionXMLDAOBase(XMLDAO):
def __init__(self, daoList):
self.daoList = daoList
def getDao(self, dao):
return self.daoList[dao]
def fromXML(self, node):
if node.tag != 'action':
return None
data = node.get('id', None)
id = self.convertFromStr(data, 'long')
data = node.get('prevId', None)
prevId = self.convertFromStr(data, 'long')
data = node.get('date', None)
date = self.convertFromStr(data, 'datetime')
data = node.get('session', None)
session = self.convertFromStr(data, 'str')
data = node.get('user', None)
user = self.convertFromStr(data, 'str')
data = node.get('prune', None)
prune = self.convertFromStr(data, 'int')
annotations = []
operations = []
for child in node.getchildren():
if child.tag == 'annotation':
_data = self.getDao('annotation').fromXML(child)
annotations.append(_data)
elif child.tag == 'add':
_data = self.getDao('add').fromXML(child)
operations.append(_data)
elif child.tag == 'delete':
_data = self.getDao('delete').fromXML(child)
operations.append(_data)
elif child.tag == 'change':
_data = self.getDao('change').fromXML(child)
operations.append(_data)
elif child.text.strip() == '':
pass
else:
print '*** ERROR *** tag = %s' % child.tag
obj = DBAction(operations=operations,
id=id,
prevId=prevId,
date=date,
session=session,
user=user,
prune=prune,
annotations=annotations)
obj.is_dirty = False
return obj
def toXML(self, action, node=None):
if node is None:
node = ElementTree.Element('action')
node.set('id',self.convertToStr(action.db_id, 'long'))
node.set('prevId',self.convertToStr(action.db_prevId, 'long'))
node.set('date',self.convertToStr(action.db_date, 'datetime'))
node.set('session',self.convertToStr(action.db_session, 'str'))
node.set('user',self.convertToStr(action.db_user, 'str'))
node.set('prune',self.convertToStr(action.db_prune, 'int'))
annotations = action.db_annotations
for annotation in annotations:
childNode = ElementTree.SubElement(node, 'annotation')
self.getDao('annotation').toXML(annotation, childNode)
operations = action.db_operations
for operation in operations:
if operation.vtType == 'add':
childNode = ElementTree.SubElement(node, 'add')
self.getDao('add').toXML(operation, childNode)
elif operation.vtType == 'delete':
childNode = ElementTree.SubElement(node, 'delete')
self.getDao('delete').toXML(operation, childNode)
elif operation.vtType == 'change':
childNode = ElementTree.SubElement(node, 'change')
self.getDao('change').toXML(operation, childNode)
return node
class DBDeleteXMLDAOBase(XMLDAO):
def __init__(self, daoList):
self.daoList = daoList
def getDao(self, dao):
return self.daoList[dao]
def fromXML(self, node):
if node.tag != 'delete':
return None
data = node.get('id', None)
id = self.convertFromStr(data, 'long')
data = node.get('what', None)
what = self.convertFromStr(data, 'str')
data = node.get('objectId', None)
objectId = self.convertFromStr(data, 'long')
data = node.get('parentObjId', None)
parentObjId = self.convertFromStr(data, 'long')
data = node.get('parentObjType', None)
parentObjType = self.convertFromStr(data, 'str')
obj = DBDelete(id=id,
what=what,
objectId=objectId,
parentObjId=parentObjId,
parentObjType=parentObjType)
obj.is_dirty = False
return obj
def toXML(self, delete, node=None):
if node is None:
node = ElementTree.Element('delete')
node.set('id',self.convertToStr(delete.db_id, 'long'))
node.set('what',self.convertToStr(delete.db_what, 'str'))
node.set('objectId',self.convertToStr(delete.db_objectId, 'long'))
node.set('parentObjId',self.convertToStr(delete.db_parentObjId, 'long'))
node.set('parentObjType',self.convertToStr(delete.db_parentObjType, 'str'))
return node
class DBVistrailXMLDAOBase(XMLDAO):
def __init__(self, daoList):
self.daoList = daoList
def getDao(self, dao):
return self.daoList[dao]
def fromXML(self, node):
if node.tag != 'vistrail':
return None
data = node.get('id', None)
id = self.convertFromStr(data, 'long')
data = node.get('version', None)
version = self.convertFromStr(data, 'str')
data = node.get('name', None)
name = self.convertFromStr(data, 'str')
data = node.get('dbHost', None)
dbHost = self.convertFromStr(data, 'str')
data = node.get('dbPort', None)
dbPort = self.convertFromStr(data, 'int')
data = node.get('dbName', None)
dbName = self.convertFromStr(data, 'str')
actions = []
tags = []
abstractions = []
for child in node.getchildren():
if child.tag == 'action':
_data = self.getDao('action').fromXML(child)
actions.append(_data)
elif child.tag == 'tag':
_data = self.getDao('tag').fromXML(child)
tags.append(_data)
elif child.tag == 'abstraction':
_data = self.getDao('abstraction').fromXML(child)
abstractions.append(_data)
elif child.text.strip() == '':
pass
else:
print '*** ERROR *** tag = %s' % child.tag
obj = DBVistrail(id=id,
version=version,
name=name,
dbHost=dbHost,
dbPort=dbPort,
dbName=dbName,
actions=actions,
tags=tags,
abstractions=abstractions)
obj.is_dirty = False
return obj
def toXML(self, vistrail, node=None):
if node is None:
node = ElementTree.Element('vistrail')
node.set('id',self.convertToStr(vistrail.db_id, 'long'))
node.set('version',self.convertToStr(vistrail.db_version, 'str'))
node.set('name',self.convertToStr(vistrail.db_name, 'str'))
node.set('dbHost',self.convertToStr(vistrail.db_dbHost, 'str'))
node.set('dbPort',self.convertToStr(vistrail.db_dbPort, 'int'))
node.set('dbName',self.convertToStr(vistrail.db_dbName, 'str'))
actions = vistrail.db_actions
for action in actions:
childNode = ElementTree.SubElement(node, 'action')
self.getDao('action').toXML(action, childNode)
tags = vistrail.db_tags
for tag in tags:
childNode = ElementTree.SubElement(node, 'tag')
self.getDao('tag').toXML(tag, childNode)
abstractions = vistrail.db_abstractions
for abstraction in abstractions:
childNode = ElementTree.SubElement(node, 'abstraction')
self.getDao('abstraction').toXML(abstraction, childNode)
return node
class DBModuleExecXMLDAOBase(XMLDAO):
def __init__(self, daoList):
self.daoList = daoList
def getDao(self, dao):
return self.daoList[dao]
def fromXML(self, node):
if node.tag != 'moduleExec':
return None
data = node.get('id', None)
id = self.convertFromStr(data, 'long')
data = node.get('tsStart', None)
ts_start = self.convertFromStr(data, 'datetime')
data = node.get('tsEnd', None)
ts_end = self.convertFromStr(data, 'datetime')
data = node.get('moduleId', None)
module_id = self.convertFromStr(data, 'long')
data = node.get('moduleName', None)
module_name = self.convertFromStr(data, 'str')
annotations = []
for child in node.getchildren():
if child.tag == 'annotation':
_data = self.getDao('annotation').fromXML(child)
annotations.append(_data)
elif child.text.strip() == '':
pass
else:
print '*** ERROR *** tag = %s' % child.tag
obj = DBModuleExec(id=id,
ts_start=ts_start,
ts_end=ts_end,
module_id=module_id,
module_name=module_name,
annotations=annotations)
obj.is_dirty = False
return obj
def toXML(self, module_exec, node=None):
if node is None:
node = ElementTree.Element('moduleExec')
node.set('id',self.convertToStr(module_exec.db_id, 'long'))
node.set('tsStart',self.convertToStr(module_exec.db_ts_start, 'datetime'))
node.set('tsEnd',self.convertToStr(module_exec.db_ts_end, 'datetime'))
node.set('moduleId',self.convertToStr(module_exec.db_module_id, 'long'))
node.set('moduleName',self.convertToStr(module_exec.db_module_name, 'str'))
annotations = module_exec.db_annotations
for annotation in annotations:
childNode = ElementTree.SubElement(node, 'annotation')
self.getDao('annotation').toXML(annotation, childNode)
return node
"""generated automatically by auto_dao.py"""
class XMLDAOListBase(dict):
def __init__(self, daos=None):
if daos is not None:
dict.update(self, daos)
if 'portSpec' not in self:
self['portSpec'] = DBPortSpecXMLDAOBase(self)
if 'module' not in self:
self['module'] = DBModuleXMLDAOBase(self)
if 'tag' not in self:
self['tag'] = DBTagXMLDAOBase(self)
if 'port' not in self:
self['port'] = DBPortXMLDAOBase(self)
if 'log' not in self:
self['log'] = DBLogXMLDAOBase(self)
if 'machine' not in self:
self['machine'] = DBMachineXMLDAOBase(self)
if 'add' not in self:
self['add'] = DBAddXMLDAOBase(self)
if 'other' not in self:
self['other'] = DBOtherXMLDAOBase(self)
if 'location' not in self:
self['location'] = DBLocationXMLDAOBase(self)
if 'workflow_exec' not in self:
self['workflow_exec'] = DBWorkflowExecXMLDAOBase(self)
if 'function' not in self:
self['function'] = DBFunctionXMLDAOBase(self)
if 'abstraction' not in self:
self['abstraction'] = DBAbstractionXMLDAOBase(self)
if 'workflow' not in self:
self['workflow'] = DBWorkflowXMLDAOBase(self)
if 'abstractionRef' not in self:
self['abstractionRef'] = DBAbstractionRefXMLDAOBase(self)
if 'annotation' not in self:
self['annotation'] = DBAnnotationXMLDAOBase(self)
if 'change' not in self:
self['change'] = DBChangeXMLDAOBase(self)
if 'parameter' not in self:
self['parameter'] = DBParameterXMLDAOBase(self)
if 'connection' not in self:
self['connection'] = DBConnectionXMLDAOBase(self)
if 'action' not in self:
self['action'] = DBActionXMLDAOBase(self)
if 'delete' not in self:
self['delete'] = DBDeleteXMLDAOBase(self)
if 'vistrail' not in self:
self['vistrail'] = DBVistrailXMLDAOBase(self)
if 'module_exec' not in self:
self['module_exec'] = DBModuleExecXMLDAOBase(self)
| false | true |
1c4a26ec5b0edc44879d37cfc3b85a407c83e08a | 9,618 | py | Python | src/python/pants/reporting/plaintext_reporter.py | qma/pants | 604f58a366b66bc5cfa83e7250cb8af8130832cf | [
"Apache-2.0"
] | null | null | null | src/python/pants/reporting/plaintext_reporter.py | qma/pants | 604f58a366b66bc5cfa83e7250cb8af8130832cf | [
"Apache-2.0"
] | null | null | null | src/python/pants/reporting/plaintext_reporter.py | qma/pants | 604f58a366b66bc5cfa83e7250cb8af8130832cf | [
"Apache-2.0"
] | 1 | 2019-06-10T17:24:34.000Z | 2019-06-10T17:24:34.000Z | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from collections import namedtuple
import six
from colors import cyan, green, red, yellow
from pants.base.workunit import WorkUnit, WorkUnitLabel
from pants.reporting.plaintext_reporter_base import PlainTextReporterBase
from pants.reporting.report import Report
from pants.reporting.reporter import Reporter
from pants.util.memo import memoized_method
class ToolOutputFormat(object):
"""Configuration item for displaying Tool Output to the console."""
SUPPRESS = 'SUPPRESS' # Do not display output from the workunit unless its outcome != SUCCESS
INDENT = 'INDENT' # Indent the output to line up with the indentation of the other log output
UNINDENTED = 'UNINDENTED' # Display the output raw, with no leading indentation
@classmethod
@memoized_method
def keys(cls):
return [key for key in dir(cls) if not key.startswith('_') and key.isupper()]
class LabelFormat(object):
"""Configuration item for displaying a workunit label to the console."""
SUPPRESS = 'SUPPRESS' # Don't show the label at all
DOT = 'DOT' # Just output a single '.' with no newline
FULL = 'FULL' # Show the timestamp and label
CHILD_SUPPRESS = 'CHILD_SUPPRESS' # Suppress labels for all children of this node
CHILD_DOT = 'CHILD_DOT' # Display a dot for all children of this node
@classmethod
@memoized_method
def keys(cls):
return [key for key in dir(cls) if not key.startswith('_') and key.isupper()]
class PlainTextReporter(PlainTextReporterBase):
"""Plain-text reporting to stdout.
We only report progress for things under the default work root. It gets too
confusing to try and show progress for background work too.
"""
# Console reporting settings.
# outfile: Write to this file-like object.
# color: use ANSI colors in output.
# indent: Whether to indent the reporting to reflect the nesting of workunits.
# timing: Show timing report at the end of the run.
# cache_stats: Show artifact cache report at the end of the run.
Settings = namedtuple('Settings',
Reporter.Settings._fields + ('outfile', 'color', 'indent', 'timing',
'cache_stats', 'label_format',
'tool_output_format'))
_COLOR_BY_LEVEL = {
Report.FATAL: red,
Report.ERROR: red,
Report.WARN: yellow,
Report.INFO: green,
Report.DEBUG: cyan
}
# Format the std output from these workunit types as specified. If no format is specified, the
# default is ToolOutputFormat.SUPPRESS
TOOL_OUTPUT_FORMATTING = {
WorkUnitLabel.MULTITOOL: ToolOutputFormat.SUPPRESS,
WorkUnitLabel.BOOTSTRAP: ToolOutputFormat.SUPPRESS,
WorkUnitLabel.COMPILER : ToolOutputFormat.INDENT,
WorkUnitLabel.TEST : ToolOutputFormat.INDENT,
WorkUnitLabel.REPL : ToolOutputFormat.UNINDENTED,
WorkUnitLabel.RUN : ToolOutputFormat.UNINDENTED
}
# Format the labels from these workunit types as specified. If no format is specified, the
# default is LabelFormat.FULL
LABEL_FORMATTING = {
WorkUnitLabel.MULTITOOL: LabelFormat.CHILD_DOT,
WorkUnitLabel.BOOTSTRAP: LabelFormat.CHILD_SUPPRESS,
}
def __init__(self, run_tracker, settings):
super(PlainTextReporter, self).__init__(run_tracker, settings)
for key, value in settings.label_format.items():
if key not in WorkUnitLabel.keys():
self.emit('*** Got invalid key {} for --reporting-console-label-format. Expected one of {}\n'
.format(key, WorkUnitLabel.keys()))
if value not in LabelFormat.keys():
self.emit('*** Got invalid value {} for --reporting-console-label-format. Expected one of {}\n'
.format(value, LabelFormat.keys()))
for key, value in settings.tool_output_format.items():
if key not in WorkUnitLabel.keys():
self.emit('*** Got invalid key {} for --reporting-console-tool-output-format. Expected one of {}\n'
.format(key, WorkUnitLabel.keys()))
if value not in ToolOutputFormat.keys():
self.emit('*** Got invalid value {} for --reporting-console-tool-output-format. Expected one of {}\n'
.format(value, ToolOutputFormat.keys()))
# Mix in the new settings with the defaults.
self.LABEL_FORMATTING.update(settings.label_format.items())
self.TOOL_OUTPUT_FORMATTING.update(settings.tool_output_format.items())
def open(self):
"""Implementation of Reporter callback."""
pass
def close(self):
"""Implementation of Reporter callback."""
self.emit(self.generate_epilog(self.settings))
def start_workunit(self, workunit):
"""Implementation of Reporter callback."""
if not self.is_under_main_root(workunit):
return
label_format = self._get_label_format(workunit)
if label_format == LabelFormat.FULL:
self._emit_indented_workunit_label(workunit)
# Start output on a new line.
tool_output_format = self._get_tool_output_format(workunit)
if tool_output_format == ToolOutputFormat.INDENT:
self.emit(self._prefix(workunit, b'\n'))
elif tool_output_format == ToolOutputFormat.UNINDENTED:
self.emit(b'\n')
elif label_format == LabelFormat.DOT:
self.emit(b'.')
self.flush()
def end_workunit(self, workunit):
"""Implementation of Reporter callback."""
if not self.is_under_main_root(workunit):
return
if workunit.outcome() != WorkUnit.SUCCESS and not self._show_output(workunit):
# Emit the suppressed workunit output, if any, to aid in debugging the problem.
if self._get_label_format(workunit) != LabelFormat.FULL:
self._emit_indented_workunit_label(workunit)
for name, outbuf in workunit.outputs().items():
self.emit(self._prefix(workunit, b'\n==== {} ====\n'.format(name)))
self.emit(self._prefix(workunit, outbuf.read_from(0)))
self.flush()
def do_handle_log(self, workunit, level, *msg_elements):
"""Implementation of Reporter callback."""
if not self.is_under_main_root(workunit):
return
# If the element is a (msg, detail) pair, we ignore the detail. There's no
# useful way to display it on the console.
elements = [e if isinstance(e, six.string_types) else e[0] for e in msg_elements]
msg = b'\n' + b''.join(elements)
if self.use_color_for_workunit(workunit, self.settings.color):
msg = self._COLOR_BY_LEVEL.get(level, lambda x: x)(msg)
self.emit(self._prefix(workunit, msg))
self.flush()
def handle_output(self, workunit, label, s):
"""Implementation of Reporter callback."""
if not self.is_under_main_root(workunit):
return
tool_output_format = self._get_tool_output_format(workunit)
if tool_output_format == ToolOutputFormat.INDENT:
self.emit(self._prefix(workunit, s))
elif tool_output_format == ToolOutputFormat.UNINDENTED:
self.emit(s)
self.flush()
def emit(self, s):
self.settings.outfile.write(s)
def flush(self):
self.settings.outfile.flush()
def _get_label_format(self, workunit):
for label, label_format in self.LABEL_FORMATTING.items():
if workunit.has_label(label):
return label_format
# Recursively look for a setting to suppress child label formatting.
if workunit.parent:
label_format = self._get_label_format(workunit.parent)
if label_format == LabelFormat.CHILD_DOT:
return LabelFormat.DOT
if label_format == LabelFormat.CHILD_SUPPRESS:
return LabelFormat.SUPPRESS
return LabelFormat.FULL
def _get_tool_output_format(self, workunit):
for label, tool_output_format in self.TOOL_OUTPUT_FORMATTING.items():
if workunit.has_label(label):
return tool_output_format
return ToolOutputFormat.SUPPRESS
def _emit_indented_workunit_label(self, workunit):
self.emit(b'\n{} {} {}[{}]'.format(
workunit.start_time_string,
workunit.start_delta_string,
self._indent(workunit),
workunit.name if self.settings.indent else workunit.path()))
# Emit output from some tools and not others.
# This is an arbitrary choice, but one that turns out to be useful to users in practice.
def _show_output(self, workunit):
tool_output_format = self._get_tool_output_format(workunit)
return not tool_output_format == ToolOutputFormat.SUPPRESS
def _format_aggregated_timings(self, aggregated_timings):
return b'\n'.join([b'{timing:.3f} {label}'.format(**x) for x in aggregated_timings.get_all()])
def _format_artifact_cache_stats(self, artifact_cache_stats):
stats = artifact_cache_stats.get_all()
return b'No artifact cache reads.' if not stats else \
b'\n'.join([b'{cache_name} - Hits: {num_hits} Misses: {num_misses}'.format(**x)
for x in stats])
def _indent(self, workunit):
return b' ' * (len(workunit.ancestors()) - 1)
_time_string_filler = b' ' * len('HH:MM:SS mm:ss ')
def _prefix(self, workunit, s):
if self.settings.indent:
def replace(x, c):
return x.replace(c, c + PlainTextReporter._time_string_filler + self._indent(workunit))
return replace(replace(s, b'\r'), b'\n')
else:
return PlainTextReporter._time_string_filler + s
| 39.580247 | 109 | 0.694843 |
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from collections import namedtuple
import six
from colors import cyan, green, red, yellow
from pants.base.workunit import WorkUnit, WorkUnitLabel
from pants.reporting.plaintext_reporter_base import PlainTextReporterBase
from pants.reporting.report import Report
from pants.reporting.reporter import Reporter
from pants.util.memo import memoized_method
class ToolOutputFormat(object):
SUPPRESS = 'SUPPRESS' INDENT = 'INDENT' UNINDENTED = 'UNINDENTED'
@classmethod
@memoized_method
def keys(cls):
return [key for key in dir(cls) if not key.startswith('_') and key.isupper()]
class LabelFormat(object):
SUPPRESS = 'SUPPRESS' DOT = 'DOT' # Just output a single '.' with no newline
FULL = 'FULL' # Show the timestamp and label
CHILD_SUPPRESS = 'CHILD_SUPPRESS' # Suppress labels for all children of this node
CHILD_DOT = 'CHILD_DOT' # Display a dot for all children of this node
@classmethod
@memoized_method
def keys(cls):
return [key for key in dir(cls) if not key.startswith('_') and key.isupper()]
class PlainTextReporter(PlainTextReporterBase):
# Console reporting settings.
# outfile: Write to this file-like object.
# color: use ANSI colors in output.
# indent: Whether to indent the reporting to reflect the nesting of workunits.
# timing: Show timing report at the end of the run.
# cache_stats: Show artifact cache report at the end of the run.
Settings = namedtuple('Settings',
Reporter.Settings._fields + ('outfile', 'color', 'indent', 'timing',
'cache_stats', 'label_format',
'tool_output_format'))
_COLOR_BY_LEVEL = {
Report.FATAL: red,
Report.ERROR: red,
Report.WARN: yellow,
Report.INFO: green,
Report.DEBUG: cyan
}
# Format the std output from these workunit types as specified. If no format is specified, the
# default is ToolOutputFormat.SUPPRESS
TOOL_OUTPUT_FORMATTING = {
WorkUnitLabel.MULTITOOL: ToolOutputFormat.SUPPRESS,
WorkUnitLabel.BOOTSTRAP: ToolOutputFormat.SUPPRESS,
WorkUnitLabel.COMPILER : ToolOutputFormat.INDENT,
WorkUnitLabel.TEST : ToolOutputFormat.INDENT,
WorkUnitLabel.REPL : ToolOutputFormat.UNINDENTED,
WorkUnitLabel.RUN : ToolOutputFormat.UNINDENTED
}
# Format the labels from these workunit types as specified. If no format is specified, the
# default is LabelFormat.FULL
LABEL_FORMATTING = {
WorkUnitLabel.MULTITOOL: LabelFormat.CHILD_DOT,
WorkUnitLabel.BOOTSTRAP: LabelFormat.CHILD_SUPPRESS,
}
def __init__(self, run_tracker, settings):
super(PlainTextReporter, self).__init__(run_tracker, settings)
for key, value in settings.label_format.items():
if key not in WorkUnitLabel.keys():
self.emit('*** Got invalid key {} for --reporting-console-label-format. Expected one of {}\n'
.format(key, WorkUnitLabel.keys()))
if value not in LabelFormat.keys():
self.emit('*** Got invalid value {} for --reporting-console-label-format. Expected one of {}\n'
.format(value, LabelFormat.keys()))
for key, value in settings.tool_output_format.items():
if key not in WorkUnitLabel.keys():
self.emit('*** Got invalid key {} for --reporting-console-tool-output-format. Expected one of {}\n'
.format(key, WorkUnitLabel.keys()))
if value not in ToolOutputFormat.keys():
self.emit('*** Got invalid value {} for --reporting-console-tool-output-format. Expected one of {}\n'
.format(value, ToolOutputFormat.keys()))
# Mix in the new settings with the defaults.
self.LABEL_FORMATTING.update(settings.label_format.items())
self.TOOL_OUTPUT_FORMATTING.update(settings.tool_output_format.items())
def open(self):
pass
def close(self):
self.emit(self.generate_epilog(self.settings))
def start_workunit(self, workunit):
if not self.is_under_main_root(workunit):
return
label_format = self._get_label_format(workunit)
if label_format == LabelFormat.FULL:
self._emit_indented_workunit_label(workunit)
# Start output on a new line.
tool_output_format = self._get_tool_output_format(workunit)
if tool_output_format == ToolOutputFormat.INDENT:
self.emit(self._prefix(workunit, b'\n'))
elif tool_output_format == ToolOutputFormat.UNINDENTED:
self.emit(b'\n')
elif label_format == LabelFormat.DOT:
self.emit(b'.')
self.flush()
def end_workunit(self, workunit):
if not self.is_under_main_root(workunit):
return
if workunit.outcome() != WorkUnit.SUCCESS and not self._show_output(workunit):
# Emit the suppressed workunit output, if any, to aid in debugging the problem.
if self._get_label_format(workunit) != LabelFormat.FULL:
self._emit_indented_workunit_label(workunit)
for name, outbuf in workunit.outputs().items():
self.emit(self._prefix(workunit, b'\n==== {} ====\n'.format(name)))
self.emit(self._prefix(workunit, outbuf.read_from(0)))
self.flush()
def do_handle_log(self, workunit, level, *msg_elements):
if not self.is_under_main_root(workunit):
return
# If the element is a (msg, detail) pair, we ignore the detail. There's no
elements = [e if isinstance(e, six.string_types) else e[0] for e in msg_elements]
msg = b'\n' + b''.join(elements)
if self.use_color_for_workunit(workunit, self.settings.color):
msg = self._COLOR_BY_LEVEL.get(level, lambda x: x)(msg)
self.emit(self._prefix(workunit, msg))
self.flush()
def handle_output(self, workunit, label, s):
if not self.is_under_main_root(workunit):
return
tool_output_format = self._get_tool_output_format(workunit)
if tool_output_format == ToolOutputFormat.INDENT:
self.emit(self._prefix(workunit, s))
elif tool_output_format == ToolOutputFormat.UNINDENTED:
self.emit(s)
self.flush()
def emit(self, s):
self.settings.outfile.write(s)
def flush(self):
self.settings.outfile.flush()
def _get_label_format(self, workunit):
for label, label_format in self.LABEL_FORMATTING.items():
if workunit.has_label(label):
return label_format
if workunit.parent:
label_format = self._get_label_format(workunit.parent)
if label_format == LabelFormat.CHILD_DOT:
return LabelFormat.DOT
if label_format == LabelFormat.CHILD_SUPPRESS:
return LabelFormat.SUPPRESS
return LabelFormat.FULL
def _get_tool_output_format(self, workunit):
for label, tool_output_format in self.TOOL_OUTPUT_FORMATTING.items():
if workunit.has_label(label):
return tool_output_format
return ToolOutputFormat.SUPPRESS
def _emit_indented_workunit_label(self, workunit):
self.emit(b'\n{} {} {}[{}]'.format(
workunit.start_time_string,
workunit.start_delta_string,
self._indent(workunit),
workunit.name if self.settings.indent else workunit.path()))
def _show_output(self, workunit):
tool_output_format = self._get_tool_output_format(workunit)
return not tool_output_format == ToolOutputFormat.SUPPRESS
def _format_aggregated_timings(self, aggregated_timings):
return b'\n'.join([b'{timing:.3f} {label}'.format(**x) for x in aggregated_timings.get_all()])
def _format_artifact_cache_stats(self, artifact_cache_stats):
stats = artifact_cache_stats.get_all()
return b'No artifact cache reads.' if not stats else \
b'\n'.join([b'{cache_name} - Hits: {num_hits} Misses: {num_misses}'.format(**x)
for x in stats])
def _indent(self, workunit):
return b' ' * (len(workunit.ancestors()) - 1)
_time_string_filler = b' ' * len('HH:MM:SS mm:ss ')
def _prefix(self, workunit, s):
if self.settings.indent:
def replace(x, c):
return x.replace(c, c + PlainTextReporter._time_string_filler + self._indent(workunit))
return replace(replace(s, b'\r'), b'\n')
else:
return PlainTextReporter._time_string_filler + s
| true | true |
1c4a285a311b988d2b7f90c24434d040cd54642a | 3,886 | py | Python | setup.py | herrkaefer/psycopgr | 376e8511ac591d32533118b5006135458cb5f27f | [
"MIT"
] | 23 | 2017-12-01T03:47:23.000Z | 2022-01-06T23:36:56.000Z | setup.py | herrkaefer/psycopgr | 376e8511ac591d32533118b5006135458cb5f27f | [
"MIT"
] | 9 | 2017-11-10T00:33:50.000Z | 2021-06-06T01:27:20.000Z | setup.py | herrkaefer/psycopgr | 376e8511ac591d32533118b5006135458cb5f27f | [
"MIT"
] | 9 | 2017-11-09T13:38:10.000Z | 2022-02-17T16:08:35.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Ackownledgement:
# This file is modified from https://github.com/kennethreitz/setup.py
#
# Note: To use the 'upload' functionality of this file, you must:
# $ pipenv install twine --dev
import io
import os
import sys
from shutil import rmtree
from setuptools import find_packages, setup, Command
# Package meta-data.
NAME = 'psycopgr'
DESCRIPTION = 'A Python wrapper of pgRouting for routing from nodes to nodes on real map.'
URL = 'https://github.com/herrkaefer/psycopgr'
EMAIL = '[email protected]'
AUTHOR = 'Yang Liu'
REQUIRES_PYTHON = '>=2.7'
VERSION = '1.0.6'
# What packages are required for this module to be executed?
REQUIRED = [
'psycopg2',
]
# What packages are optional?
EXTRAS = {
# 'fancy feature': ['django'],
}
# The rest you shouldn't have to touch too much :)
# ------------------------------------------------
# Except, perhaps the License and Trove Classifiers!
# If you do change the License, remember to change the Trove Classifier for that!
here = os.path.abspath(os.path.dirname(__file__))
# Import the README and use it as the long-description.
# Note: this will only work if 'README.md' is present in your MANIFEST.in file!
try:
with io.open(os.path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = '\n' + f.read()
except FileNotFoundError:
long_description = DESCRIPTION
# Load the package's __version__.py module as a dictionary.
about = {}
if not VERSION:
project_slug = NAME.lower().replace("-", "_").replace(" ", "_")
with open(os.path.join(here, project_slug, '__version__.py')) as f:
exec(f.read(), about)
else:
about['__version__'] = VERSION
class UploadCommand(Command):
"""Support setup.py upload."""
description = 'Build and publish the package.'
user_options = []
@staticmethod
def status(s):
"""Prints things in bold."""
print('\033[1m{0}\033[0m'.format(s))
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
try:
self.status('Removing previous builds…')
rmtree(os.path.join(here, 'dist'))
except OSError:
pass
self.status('Building Source and Wheel (universal) distribution…')
os.system('{0} setup.py sdist bdist_wheel --universal'.format(sys.executable))
self.status('Uploading the package to PyPI via Twine…')
os.system('twine upload dist/*')
self.status('Pushing git tags…')
os.system('git tag v{0}'.format(about['__version__']))
os.system('git push --tags')
sys.exit()
# Where the magic happens:
setup(
name=NAME,
version=about['__version__'],
description=DESCRIPTION,
long_description=long_description,
long_description_content_type='text/markdown',
author=AUTHOR,
author_email=EMAIL,
python_requires=REQUIRES_PYTHON,
url=URL,
packages=find_packages(exclude=["tests", "*.tests", "*.tests.*", "tests.*"]),
# If your package is a single module, use this instead of 'packages':
# py_modules=['mypackage'],
# entry_points={
# 'console_scripts': ['mycli=mymodule:cli'],
# },
install_requires=REQUIRED,
extras_require=EXTRAS,
include_package_data=True,
license='MIT',
classifiers=[
# Trove classifiers
# Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy'
],
# $ setup.py publish support.
cmdclass={
'upload': UploadCommand,
},
)
| 28.785185 | 90 | 0.643592 |
import io
import os
import sys
from shutil import rmtree
from setuptools import find_packages, setup, Command
NAME = 'psycopgr'
DESCRIPTION = 'A Python wrapper of pgRouting for routing from nodes to nodes on real map.'
URL = 'https://github.com/herrkaefer/psycopgr'
EMAIL = '[email protected]'
AUTHOR = 'Yang Liu'
REQUIRES_PYTHON = '>=2.7'
VERSION = '1.0.6'
REQUIRED = [
'psycopg2',
]
EXTRAS = {
}
# ------------------------------------------------
# Except, perhaps the License and Trove Classifiers!
# If you do change the License, remember to change the Trove Classifier for that!
here = os.path.abspath(os.path.dirname(__file__))
# Import the README and use it as the long-description.
# Note: this will only work if 'README.md' is present in your MANIFEST.in file!
try:
with io.open(os.path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = '\n' + f.read()
except FileNotFoundError:
long_description = DESCRIPTION
# Load the package's __version__.py module as a dictionary.
about = {}
if not VERSION:
project_slug = NAME.lower().replace("-", "_").replace(" ", "_")
with open(os.path.join(here, project_slug, '__version__.py')) as f:
exec(f.read(), about)
else:
about['__version__'] = VERSION
class UploadCommand(Command):
description = 'Build and publish the package.'
user_options = []
@staticmethod
def status(s):
print('\033[1m{0}\033[0m'.format(s))
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
try:
self.status('Removing previous builds…')
rmtree(os.path.join(here, 'dist'))
except OSError:
pass
self.status('Building Source and Wheel (universal) distribution…')
os.system('{0} setup.py sdist bdist_wheel --universal'.format(sys.executable))
self.status('Uploading the package to PyPI via Twine…')
os.system('twine upload dist/*')
self.status('Pushing git tags…')
os.system('git tag v{0}'.format(about['__version__']))
os.system('git push --tags')
sys.exit()
setup(
name=NAME,
version=about['__version__'],
description=DESCRIPTION,
long_description=long_description,
long_description_content_type='text/markdown',
author=AUTHOR,
author_email=EMAIL,
python_requires=REQUIRES_PYTHON,
url=URL,
packages=find_packages(exclude=["tests", "*.tests", "*.tests.*", "tests.*"]),
install_requires=REQUIRED,
extras_require=EXTRAS,
include_package_data=True,
license='MIT',
classifiers=[
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy'
],
cmdclass={
'upload': UploadCommand,
},
)
| true | true |
1c4a2fa84a2670f568ddc2052bf3b41997f3a6af | 7,462 | py | Python | tests/components/media_player/test_async_helpers.py | shanbs/home-assistant | 818776d2b4f11e4f51992dc88bc0a6f9055833b2 | [
"Apache-2.0"
] | 4 | 2019-01-10T14:47:54.000Z | 2021-04-22T02:06:27.000Z | tests/components/media_player/test_async_helpers.py | shanbs/home-assistant | 818776d2b4f11e4f51992dc88bc0a6f9055833b2 | [
"Apache-2.0"
] | 6 | 2021-02-08T21:02:40.000Z | 2022-03-12T00:52:16.000Z | tests/components/media_player/test_async_helpers.py | shanbs/home-assistant | 818776d2b4f11e4f51992dc88bc0a6f9055833b2 | [
"Apache-2.0"
] | 3 | 2018-08-29T19:26:20.000Z | 2020-01-19T11:58:22.000Z | """The tests for the Async Media player helper functions."""
import unittest
import asyncio
import homeassistant.components.media_player as mp
from homeassistant.const import (
STATE_PLAYING, STATE_PAUSED, STATE_ON, STATE_OFF, STATE_IDLE)
from homeassistant.util.async_ import run_coroutine_threadsafe
from tests.common import get_test_home_assistant
class AsyncMediaPlayer(mp.MediaPlayerDevice):
"""Async media player test class."""
def __init__(self, hass):
"""Initialize the test media player."""
self.hass = hass
self._volume = 0
self._state = STATE_OFF
@property
def state(self):
"""State of the player."""
return self._state
@property
def volume_level(self):
"""Volume level of the media player (0..1)."""
return self._volume
@asyncio.coroutine
def async_set_volume_level(self, volume):
"""Set volume level, range 0..1."""
self._volume = volume
@asyncio.coroutine
def async_media_play(self):
"""Send play command."""
self._state = STATE_PLAYING
@asyncio.coroutine
def async_media_pause(self):
"""Send pause command."""
self._state = STATE_PAUSED
@asyncio.coroutine
def async_turn_on(self):
"""Turn the media player on."""
self._state = STATE_ON
@asyncio.coroutine
def async_turn_off(self):
"""Turn the media player off."""
self._state = STATE_OFF
class SyncMediaPlayer(mp.MediaPlayerDevice):
"""Sync media player test class."""
def __init__(self, hass):
"""Initialize the test media player."""
self.hass = hass
self._volume = 0
self._state = STATE_OFF
@property
def state(self):
"""State of the player."""
return self._state
@property
def volume_level(self):
"""Volume level of the media player (0..1)."""
return self._volume
def set_volume_level(self, volume):
"""Set volume level, range 0..1."""
self._volume = volume
def volume_up(self):
"""Turn volume up for media player."""
if self.volume_level < 1:
self.set_volume_level(min(1, self.volume_level + .2))
def volume_down(self):
"""Turn volume down for media player."""
if self.volume_level > 0:
self.set_volume_level(max(0, self.volume_level - .2))
def media_play_pause(self):
"""Play or pause the media player."""
if self._state == STATE_PLAYING:
self._state = STATE_PAUSED
else:
self._state = STATE_PLAYING
def toggle(self):
"""Toggle the power on the media player."""
if self._state in [STATE_OFF, STATE_IDLE]:
self._state = STATE_ON
else:
self._state = STATE_OFF
@asyncio.coroutine
def async_media_play_pause(self):
"""Create a coroutine to wrap the future returned by ABC.
This allows the run_coroutine_threadsafe helper to be used.
"""
yield from super().async_media_play_pause()
@asyncio.coroutine
def async_toggle(self):
"""Create a coroutine to wrap the future returned by ABC.
This allows the run_coroutine_threadsafe helper to be used.
"""
yield from super().async_toggle()
class TestAsyncMediaPlayer(unittest.TestCase):
"""Test the media_player module."""
def setUp(self): # pylint: disable=invalid-name
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
self.player = AsyncMediaPlayer(self.hass)
def tearDown(self):
"""Shut down test instance."""
self.hass.stop()
def test_volume_up(self):
"""Test the volume_up helper function."""
assert self.player.volume_level == 0
run_coroutine_threadsafe(
self.player.async_set_volume_level(0.5), self.hass.loop).result()
assert self.player.volume_level == 0.5
run_coroutine_threadsafe(
self.player.async_volume_up(), self.hass.loop).result()
assert self.player.volume_level == 0.6
def test_volume_down(self):
"""Test the volume_down helper function."""
assert self.player.volume_level == 0
run_coroutine_threadsafe(
self.player.async_set_volume_level(0.5), self.hass.loop).result()
assert self.player.volume_level == 0.5
run_coroutine_threadsafe(
self.player.async_volume_down(), self.hass.loop).result()
assert self.player.volume_level == 0.4
def test_media_play_pause(self):
"""Test the media_play_pause helper function."""
assert self.player.state == STATE_OFF
run_coroutine_threadsafe(
self.player.async_media_play_pause(), self.hass.loop).result()
assert self.player.state == STATE_PLAYING
run_coroutine_threadsafe(
self.player.async_media_play_pause(), self.hass.loop).result()
assert self.player.state == STATE_PAUSED
def test_toggle(self):
"""Test the toggle helper function."""
assert self.player.state == STATE_OFF
run_coroutine_threadsafe(
self.player.async_toggle(), self.hass.loop).result()
assert self.player.state == STATE_ON
run_coroutine_threadsafe(
self.player.async_toggle(), self.hass.loop).result()
assert self.player.state == STATE_OFF
class TestSyncMediaPlayer(unittest.TestCase):
"""Test the media_player module."""
def setUp(self): # pylint: disable=invalid-name
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
self.player = SyncMediaPlayer(self.hass)
def tearDown(self):
"""Shut down test instance."""
self.hass.stop()
def test_volume_up(self):
"""Test the volume_up helper function."""
assert self.player.volume_level == 0
self.player.set_volume_level(0.5)
assert self.player.volume_level == 0.5
run_coroutine_threadsafe(
self.player.async_volume_up(), self.hass.loop).result()
assert self.player.volume_level == 0.7
def test_volume_down(self):
"""Test the volume_down helper function."""
assert self.player.volume_level == 0
self.player.set_volume_level(0.5)
assert self.player.volume_level == 0.5
run_coroutine_threadsafe(
self.player.async_volume_down(), self.hass.loop).result()
assert self.player.volume_level == 0.3
def test_media_play_pause(self):
"""Test the media_play_pause helper function."""
assert self.player.state == STATE_OFF
run_coroutine_threadsafe(
self.player.async_media_play_pause(), self.hass.loop).result()
assert self.player.state == STATE_PLAYING
run_coroutine_threadsafe(
self.player.async_media_play_pause(), self.hass.loop).result()
assert self.player.state == STATE_PAUSED
def test_toggle(self):
"""Test the toggle helper function."""
assert self.player.state == STATE_OFF
run_coroutine_threadsafe(
self.player.async_toggle(), self.hass.loop).result()
assert self.player.state == STATE_ON
run_coroutine_threadsafe(
self.player.async_toggle(), self.hass.loop).result()
assert self.player.state == STATE_OFF
| 33.3125 | 77 | 0.641785 | import unittest
import asyncio
import homeassistant.components.media_player as mp
from homeassistant.const import (
STATE_PLAYING, STATE_PAUSED, STATE_ON, STATE_OFF, STATE_IDLE)
from homeassistant.util.async_ import run_coroutine_threadsafe
from tests.common import get_test_home_assistant
class AsyncMediaPlayer(mp.MediaPlayerDevice):
def __init__(self, hass):
self.hass = hass
self._volume = 0
self._state = STATE_OFF
@property
def state(self):
return self._state
@property
def volume_level(self):
return self._volume
@asyncio.coroutine
def async_set_volume_level(self, volume):
self._volume = volume
@asyncio.coroutine
def async_media_play(self):
self._state = STATE_PLAYING
@asyncio.coroutine
def async_media_pause(self):
self._state = STATE_PAUSED
@asyncio.coroutine
def async_turn_on(self):
self._state = STATE_ON
@asyncio.coroutine
def async_turn_off(self):
self._state = STATE_OFF
class SyncMediaPlayer(mp.MediaPlayerDevice):
def __init__(self, hass):
self.hass = hass
self._volume = 0
self._state = STATE_OFF
@property
def state(self):
return self._state
@property
def volume_level(self):
return self._volume
def set_volume_level(self, volume):
self._volume = volume
def volume_up(self):
if self.volume_level < 1:
self.set_volume_level(min(1, self.volume_level + .2))
def volume_down(self):
if self.volume_level > 0:
self.set_volume_level(max(0, self.volume_level - .2))
def media_play_pause(self):
if self._state == STATE_PLAYING:
self._state = STATE_PAUSED
else:
self._state = STATE_PLAYING
def toggle(self):
if self._state in [STATE_OFF, STATE_IDLE]:
self._state = STATE_ON
else:
self._state = STATE_OFF
@asyncio.coroutine
def async_media_play_pause(self):
yield from super().async_media_play_pause()
@asyncio.coroutine
def async_toggle(self):
yield from super().async_toggle()
class TestAsyncMediaPlayer(unittest.TestCase):
def setUp(self): self.hass = get_test_home_assistant()
self.player = AsyncMediaPlayer(self.hass)
def tearDown(self):
self.hass.stop()
def test_volume_up(self):
assert self.player.volume_level == 0
run_coroutine_threadsafe(
self.player.async_set_volume_level(0.5), self.hass.loop).result()
assert self.player.volume_level == 0.5
run_coroutine_threadsafe(
self.player.async_volume_up(), self.hass.loop).result()
assert self.player.volume_level == 0.6
def test_volume_down(self):
assert self.player.volume_level == 0
run_coroutine_threadsafe(
self.player.async_set_volume_level(0.5), self.hass.loop).result()
assert self.player.volume_level == 0.5
run_coroutine_threadsafe(
self.player.async_volume_down(), self.hass.loop).result()
assert self.player.volume_level == 0.4
def test_media_play_pause(self):
assert self.player.state == STATE_OFF
run_coroutine_threadsafe(
self.player.async_media_play_pause(), self.hass.loop).result()
assert self.player.state == STATE_PLAYING
run_coroutine_threadsafe(
self.player.async_media_play_pause(), self.hass.loop).result()
assert self.player.state == STATE_PAUSED
def test_toggle(self):
assert self.player.state == STATE_OFF
run_coroutine_threadsafe(
self.player.async_toggle(), self.hass.loop).result()
assert self.player.state == STATE_ON
run_coroutine_threadsafe(
self.player.async_toggle(), self.hass.loop).result()
assert self.player.state == STATE_OFF
class TestSyncMediaPlayer(unittest.TestCase):
def setUp(self): self.hass = get_test_home_assistant()
self.player = SyncMediaPlayer(self.hass)
def tearDown(self):
self.hass.stop()
def test_volume_up(self):
assert self.player.volume_level == 0
self.player.set_volume_level(0.5)
assert self.player.volume_level == 0.5
run_coroutine_threadsafe(
self.player.async_volume_up(), self.hass.loop).result()
assert self.player.volume_level == 0.7
def test_volume_down(self):
assert self.player.volume_level == 0
self.player.set_volume_level(0.5)
assert self.player.volume_level == 0.5
run_coroutine_threadsafe(
self.player.async_volume_down(), self.hass.loop).result()
assert self.player.volume_level == 0.3
def test_media_play_pause(self):
assert self.player.state == STATE_OFF
run_coroutine_threadsafe(
self.player.async_media_play_pause(), self.hass.loop).result()
assert self.player.state == STATE_PLAYING
run_coroutine_threadsafe(
self.player.async_media_play_pause(), self.hass.loop).result()
assert self.player.state == STATE_PAUSED
def test_toggle(self):
assert self.player.state == STATE_OFF
run_coroutine_threadsafe(
self.player.async_toggle(), self.hass.loop).result()
assert self.player.state == STATE_ON
run_coroutine_threadsafe(
self.player.async_toggle(), self.hass.loop).result()
assert self.player.state == STATE_OFF
| true | true |
1c4a2fb0bc1d5a5a20808b69120dab5af1370e7c | 1,850 | py | Python | main.py | qianqianjun/DCGAN | 4e2d37f1d785e592e59334b91d197ef0475c1c99 | [
"MIT"
] | null | null | null | main.py | qianqianjun/DCGAN | 4e2d37f1d785e592e59334b91d197ef0475c1c99 | [
"MIT"
] | null | null | null | main.py | qianqianjun/DCGAN | 4e2d37f1d785e592e59334b91d197ef0475c1c99 | [
"MIT"
] | null | null | null | """
write by qianqianjun
2019.12.20
运行GAN进行训练的入口文件。
"""
import os
import tensorflow as tf
from train_argparse import hps
from dataset_loader import train_images
from data_provider import MnistData
from DCGAN import DCGAN
from utils import combine_imgs
# 创建生成结果目录
output_dir='./out'
if not os.path.exists(output_dir):
os.mkdir(output_dir)
# 创建DCGAN
dcgan=DCGAN(hps)
# 加载Mnist 数据集
mnist_data=MnistData(train_images,hps.z_dim,hps.img_size)
# 构建计算图模型
z_placeholder,img_placeholder,generated_imgs,losses=dcgan.build()
# 构建训练过程模型
train_op=dcgan.build_train_op(losses,hps.learning_rate,hps.beta1)
# 开始进行训练~ :
init_op=tf.global_variables_initializer()
# 定义训练多少步
train_steps=hps.train_steps
with tf.Session() as sess:
sess.run(init_op)
for step in range(train_steps):
batch_imgs,batch_z=mnist_data.next_batch(hps.batch_size)
fetches=[train_op,losses['g'],losses['d']]
should_sample=(step+1) %100 ==0
# 如果到了该保存中间结果的步骤,则run 的时候在 fetches 中加上生成的图像
if should_sample:
fetches+= [generated_imgs]
output_values=sess.run(
fetches,feed_dict={
z_placeholder:batch_z,
img_placeholder:batch_imgs,
}
)
_,g_loss_val,d_loss_val=output_values[0:3]
# 打印训练过程的损失情况
if (step+1) %200==0:
print('step: %4d , g_loss: %4.3f , d_loss: %4.3f' % (step, g_loss_val, d_loss_val))
# 保存中间过程图片结果:
if should_sample:
gen_imgs_val=output_values[3]
gen_img_path=os.path.join(output_dir,'%05d-gen.jpg' % (step+1))
gt_img_path=os.path.join(output_dir,'%05d-gt.jpg' % (step+1))
gen_img=combine_imgs(gen_imgs_val,hps.img_size)
gt_img=combine_imgs(batch_imgs,hps.img_size)
gen_img.save(gen_img_path)
gt_img.save(gt_img_path) | 30.327869 | 95 | 0.675676 | import os
import tensorflow as tf
from train_argparse import hps
from dataset_loader import train_images
from data_provider import MnistData
from DCGAN import DCGAN
from utils import combine_imgs
output_dir='./out'
if not os.path.exists(output_dir):
os.mkdir(output_dir)
dcgan=DCGAN(hps)
mnist_data=MnistData(train_images,hps.z_dim,hps.img_size)
z_placeholder,img_placeholder,generated_imgs,losses=dcgan.build()
train_op=dcgan.build_train_op(losses,hps.learning_rate,hps.beta1)
init_op=tf.global_variables_initializer()
train_steps=hps.train_steps
with tf.Session() as sess:
sess.run(init_op)
for step in range(train_steps):
batch_imgs,batch_z=mnist_data.next_batch(hps.batch_size)
fetches=[train_op,losses['g'],losses['d']]
should_sample=(step+1) %100 ==0
if should_sample:
fetches+= [generated_imgs]
output_values=sess.run(
fetches,feed_dict={
z_placeholder:batch_z,
img_placeholder:batch_imgs,
}
)
_,g_loss_val,d_loss_val=output_values[0:3]
if (step+1) %200==0:
print('step: %4d , g_loss: %4.3f , d_loss: %4.3f' % (step, g_loss_val, d_loss_val))
if should_sample:
gen_imgs_val=output_values[3]
gen_img_path=os.path.join(output_dir,'%05d-gen.jpg' % (step+1))
gt_img_path=os.path.join(output_dir,'%05d-gt.jpg' % (step+1))
gen_img=combine_imgs(gen_imgs_val,hps.img_size)
gt_img=combine_imgs(batch_imgs,hps.img_size)
gen_img.save(gen_img_path)
gt_img.save(gt_img_path) | true | true |
1c4a30464aa70d1e27bbeb964001a464d4795d36 | 11,724 | py | Python | shellbot/listener.py | bernard357/shellbot | daf64fbab4085d1591bf9a1aecd06b4fc615d132 | [
"Apache-2.0"
] | 11 | 2017-04-30T18:10:27.000Z | 2021-11-07T16:59:29.000Z | shellbot/listener.py | DataCraft-AI/shellbot | daf64fbab4085d1591bf9a1aecd06b4fc615d132 | [
"Apache-2.0"
] | 38 | 2017-04-20T17:33:05.000Z | 2017-11-10T20:19:07.000Z | shellbot/listener.py | DataCraft-AI/shellbot | daf64fbab4085d1591bf9a1aecd06b4fc615d132 | [
"Apache-2.0"
] | 3 | 2017-04-21T21:14:53.000Z | 2021-07-27T22:01:21.000Z | # -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
from multiprocessing import Process
import random
from six import string_types
import time
import yaml
from .events import Event, Message, Join, Leave
class Listener(Process):
"""
Handles messages received from chat space
"""
DEFER_DURATION = 2.0 # let SSL stabilize before pumping from the queue
EMPTY_DELAY = 0.005 # time to wait if queue is empty
FRESH_DURATION = 0.5 # maximum amount of time for listener detection
def __init__(self, engine=None, filter=None):
"""
Handles events received from chat spaces
:param engine: the overarching engine
:type engine: Engine
:param filter: if provided, used to filter every event
:type filter: callable
If a ``filter`` is provided, then it is called for each event received.
An event may be a Message, a Join or Leave notification,
or any other Event.
Example::
def filter(event):
# duplicate input stream
my_queue.put(str(event))
# change input stream
event.text = event.text.title()
return event
listener = Listener(filter=filter)
"""
Process.__init__(self)
self.engine = engine
self.filter = filter
def run(self):
"""
Continuously receives updates
This function is looping on items received from the queue, and
is handling them one by one in the background.
Processing should be handled in a separate background process, like
in the following example::
listener = Listener(engine=my_engine)
process = listener.start()
The recommended way for stopping the process is to change the
parameter ``general.switch`` in the context. For example::
engine.set('general.switch', 'off')
Alternatively, the loop is also broken when a poison pill is pushed
to the queue. For example::
engine.ears.put(None)
"""
logging.info(u"Starting listener")
time.sleep(self.DEFER_DURATION) # let SSL stabilize first
try:
self.engine.set('listener.counter', 0)
while self.engine.get('general.switch', 'on') == 'on':
if self.engine.get('listener.lock', 'off') == 'on':
time.sleep(0.001)
continue
if self.engine.ears.empty():
self.idle()
time.sleep(self.EMPTY_DELAY)
continue
try:
item = self.engine.ears.get_nowait()
if item is None:
break
self.process(item)
except Exception as feedback:
logging.exception(feedback)
except KeyboardInterrupt:
pass
logging.info(u"Listener has been stopped")
def idle(self):
"""
Finds something smart to do
"""
if self.engine.bots_to_load:
id = self.engine.bots_to_load.pop()
self.engine.ears.put({'type': 'load_bot', 'id': id})
elif not self.engine.get('vacuum.stamp'):
self.engine.set('vacuum.stamp', time.time())
elif time.time() - self.engine.get('vacuum.stamp') > 5.0:
self.engine.set('vacuum.stamp', time.time())
if self.engine.bots:
id = random.choice(list(self.engine.bots.keys()))
# if id:
# logging.debug(u"- to vacuum: {}".format(id))
# self.engine.vacuum.put()
def process(self, item):
"""
Processes items received from the chat space
:param item: the item received
:type item: dict or json-encoded string
This function dispatches items based on their type. The type is
a key of the provided dict.
Following types are handled:
* ``message`` -- This is a textual message, maybe with a file attached.
The message is given to the ``on_message()`` function.
* ``join`` -- This is when a person or the bot joins a space.
The function ``on_join()`` is called, providing details on the
person or the bot who joined
* ``leave`` -- This is when a person or the bot leaves a space.
The function ``on_leave()`` is called with details on the
leaving person or bot.
* ``load_bot`` -- This is a special event to load the cache in the
process that is running the listener. The identifier of the channel
to load is provided as well.
* on any other case, the function ``on_inbound()`` is
called.
"""
counter = self.engine.context.increment('listener.counter')
logging.debug(u'Listener is working on {}'.format(counter))
if isinstance(item, string_types):
item = yaml.safe_load(item) # better unicode than json.loads()
assert isinstance(item, dict) # low-level event representation
if item['type'] == 'message':
logging.debug(u"- processing a 'message' event")
event = Message(item)
if self.filter:
event = self.filter(event)
self.on_message(event)
elif item['type'] == 'join':
logging.debug(u"- processing a 'join' event")
event = Join(item)
if self.filter:
event = self.filter(event)
self.on_join(event)
elif item['type'] == 'leave':
logging.debug(u"- processing a 'leave' event")
event = Leave(item)
if self.filter:
event = self.filter(event)
self.on_leave(event)
elif item['type'] == 'load_bot':
logging.debug(u"- processing a 'load_bot' event")
bot = self.engine.get_bot(channel_id=item['id'])
else:
logging.debug(u"- processing an inbound event")
event = Event(item)
if self.filter:
event = self.filter(event)
self.on_inbound(event)
def on_message(self, received):
"""
A message has been received
:param received: the message received
:type received: Message
Received information is transmitted to registered callbacks on the
``message`` event at the engine level.
When a message is directed to the bot it is submitted directly to the
shell. This is handled as a command, that can be executed immediately,
or pushed to the inbox and processed by the worker when possible.
All other input is thrown away, except if there is some
downwards listeners. In that situation the input is pushed to a queue
so that some process can pick it up and process it.
The protocol for downwards listeners works like this:
* Check the ``bot.fan`` queue frequently
* On each check, update the string ``fan.<channel_id>`` in the context
with the value of ``time.time()``. This will say that you are around.
The value of ``fan.<channel_id>`` is checked on every message that is not
for the bot itself. If this is fresh enough, then data is put to the
``bot.fan`` queue. Else message is just thrown away.
"""
assert received.type == 'message' # sanity check
self.engine.dispatch('message', received=received)
bot = self.engine.get_bot(received.channel_id)
if received.from_id == self.engine.get('bot.id'):
logging.debug(u"- sent by me, thrown away")
return
input = received.text
if input is None:
logging.debug(u"- no input in this item, thrown away")
return
if len(input) > 0 and input[0] in ['@', '/', '!']:
input = input[1:]
label = 'fan.' + received.channel_id
logging.debug(u"- sensing fan listener on '{}'".format(label))
elapsed = time.time() - self.engine.get(label, 0)
if elapsed < self.FRESH_DURATION:
logging.debug(u"- putting input to fan queue")
bot.fan.put(input) # forward downstream
return
name = self.engine.get('bot.name', 'shelly')
if input.startswith(name):
logging.debug(u"- bot name in command")
input = input[len(name):].strip()
elif received.is_direct:
logging.debug(u"- direct message")
elif self.engine.get('bot.id') in received.mentioned_ids:
logging.debug(u"- bot mentioned in command")
else:
logging.info(u"- not for me, thrown away")
return
logging.debug(u"- submitting command to the shell")
self.engine.shell.do(input, received=received)
def on_join(self, received):
"""
A person, or the bot, has joined a space
:param received: the event received
:type received: Join
Received information is transmitted to registered callbacks on the
``join`` at the engine level.
In the special case where the bot itself is joining a channel by
invitation, then the event ``enter`` is dispatched instead.
"""
assert received.type == 'join'
if received.actor_id == self.engine.get('bot.id'):
self.engine.dispatch('enter', received=received)
bot = self.engine.get_bot(received.channel_id)
self.engine.on_enter(received)
else:
bot = self.engine.get_bot(received.channel_id)
self.engine.dispatch('join', received=received)
def on_leave(self, received):
"""
A person, or the bot, has left a space
:param received: the event received
:type received: Leave
Received information is transmitted to registered callbacks on the
``leave`` at the engine level.
In the special case where the bot itself has been kicked off
from a channel, then the event ``exit`` is dispatched instead.
"""
assert received.type == 'leave'
if received.actor_id == self.engine.get('bot.id'):
self.engine.dispatch('exit', received=received)
self.engine.on_exit(received)
else:
self.engine.dispatch('leave', received=received)
def on_inbound(self, received):
"""
Another event has been received
:param received: the event received
:type received: Event or derivative
Received information is transmitted to registered callbacks on the
``inbound`` at the engine level.
"""
assert received.type not in ('message', 'join', 'leave')
self.engine.dispatch('inbound', received=received)
| 33.212465 | 81 | 0.598601 |
import json
import logging
from multiprocessing import Process
import random
from six import string_types
import time
import yaml
from .events import Event, Message, Join, Leave
class Listener(Process):
DEFER_DURATION = 2.0 EMPTY_DELAY = 0.005
FRESH_DURATION = 0.5
def __init__(self, engine=None, filter=None):
Process.__init__(self)
self.engine = engine
self.filter = filter
def run(self):
logging.info(u"Starting listener")
time.sleep(self.DEFER_DURATION)
try:
self.engine.set('listener.counter', 0)
while self.engine.get('general.switch', 'on') == 'on':
if self.engine.get('listener.lock', 'off') == 'on':
time.sleep(0.001)
continue
if self.engine.ears.empty():
self.idle()
time.sleep(self.EMPTY_DELAY)
continue
try:
item = self.engine.ears.get_nowait()
if item is None:
break
self.process(item)
except Exception as feedback:
logging.exception(feedback)
except KeyboardInterrupt:
pass
logging.info(u"Listener has been stopped")
def idle(self):
if self.engine.bots_to_load:
id = self.engine.bots_to_load.pop()
self.engine.ears.put({'type': 'load_bot', 'id': id})
elif not self.engine.get('vacuum.stamp'):
self.engine.set('vacuum.stamp', time.time())
elif time.time() - self.engine.get('vacuum.stamp') > 5.0:
self.engine.set('vacuum.stamp', time.time())
if self.engine.bots:
id = random.choice(list(self.engine.bots.keys()))
def process(self, item):
counter = self.engine.context.increment('listener.counter')
logging.debug(u'Listener is working on {}'.format(counter))
if isinstance(item, string_types):
item = yaml.safe_load(item)
assert isinstance(item, dict)
if item['type'] == 'message':
logging.debug(u"- processing a 'message' event")
event = Message(item)
if self.filter:
event = self.filter(event)
self.on_message(event)
elif item['type'] == 'join':
logging.debug(u"- processing a 'join' event")
event = Join(item)
if self.filter:
event = self.filter(event)
self.on_join(event)
elif item['type'] == 'leave':
logging.debug(u"- processing a 'leave' event")
event = Leave(item)
if self.filter:
event = self.filter(event)
self.on_leave(event)
elif item['type'] == 'load_bot':
logging.debug(u"- processing a 'load_bot' event")
bot = self.engine.get_bot(channel_id=item['id'])
else:
logging.debug(u"- processing an inbound event")
event = Event(item)
if self.filter:
event = self.filter(event)
self.on_inbound(event)
def on_message(self, received):
assert received.type == 'message'
self.engine.dispatch('message', received=received)
bot = self.engine.get_bot(received.channel_id)
if received.from_id == self.engine.get('bot.id'):
logging.debug(u"- sent by me, thrown away")
return
input = received.text
if input is None:
logging.debug(u"- no input in this item, thrown away")
return
if len(input) > 0 and input[0] in ['@', '/', '!']:
input = input[1:]
label = 'fan.' + received.channel_id
logging.debug(u"- sensing fan listener on '{}'".format(label))
elapsed = time.time() - self.engine.get(label, 0)
if elapsed < self.FRESH_DURATION:
logging.debug(u"- putting input to fan queue")
bot.fan.put(input) return
name = self.engine.get('bot.name', 'shelly')
if input.startswith(name):
logging.debug(u"- bot name in command")
input = input[len(name):].strip()
elif received.is_direct:
logging.debug(u"- direct message")
elif self.engine.get('bot.id') in received.mentioned_ids:
logging.debug(u"- bot mentioned in command")
else:
logging.info(u"- not for me, thrown away")
return
logging.debug(u"- submitting command to the shell")
self.engine.shell.do(input, received=received)
def on_join(self, received):
assert received.type == 'join'
if received.actor_id == self.engine.get('bot.id'):
self.engine.dispatch('enter', received=received)
bot = self.engine.get_bot(received.channel_id)
self.engine.on_enter(received)
else:
bot = self.engine.get_bot(received.channel_id)
self.engine.dispatch('join', received=received)
def on_leave(self, received):
assert received.type == 'leave'
if received.actor_id == self.engine.get('bot.id'):
self.engine.dispatch('exit', received=received)
self.engine.on_exit(received)
else:
self.engine.dispatch('leave', received=received)
def on_inbound(self, received):
assert received.type not in ('message', 'join', 'leave')
self.engine.dispatch('inbound', received=received)
| true | true |
1c4a30519feee2e48d05593d11019103cfebb776 | 469 | py | Python | src/Edge.py | ZacharyJohnson1/python-graph-theory | 885096f9066e22b477d0c58d9cec5db2a62571c1 | [
"MIT"
] | null | null | null | src/Edge.py | ZacharyJohnson1/python-graph-theory | 885096f9066e22b477d0c58d9cec5db2a62571c1 | [
"MIT"
] | null | null | null | src/Edge.py | ZacharyJohnson1/python-graph-theory | 885096f9066e22b477d0c58d9cec5db2a62571c1 | [
"MIT"
] | null | null | null | class Edge:
def __init__(self, u, v, w):
self.u = u
self.v = v
self.w = w
def __eq__(self, edge):
if self.u.id == edge.u.id and self.v.id == edge.v.id:
return True
return False
def __ge__(self, v):
return True if self.w > v.w else False
def __lt__(self, v):
return True if self.w < v.w else False
@staticmethod
def weight(edge):
return edge.w
| 15.129032 | 61 | 0.503198 | class Edge:
def __init__(self, u, v, w):
self.u = u
self.v = v
self.w = w
def __eq__(self, edge):
if self.u.id == edge.u.id and self.v.id == edge.v.id:
return True
return False
def __ge__(self, v):
return True if self.w > v.w else False
def __lt__(self, v):
return True if self.w < v.w else False
@staticmethod
def weight(edge):
return edge.w
| true | true |
1c4a3097d9a65dce695cc13d39c358f32e52ebdc | 81 | py | Python | abc/161/A.py | tonko2/AtCoder | 5d617072517881d226d7c8af09cb88684d41af7e | [
"Xnet",
"X11",
"CECILL-B"
] | 2 | 2022-01-22T07:56:58.000Z | 2022-01-24T00:29:37.000Z | abc/161/A.py | tonko2/AtCoder | 5d617072517881d226d7c8af09cb88684d41af7e | [
"Xnet",
"X11",
"CECILL-B"
] | null | null | null | abc/161/A.py | tonko2/AtCoder | 5d617072517881d226d7c8af09cb88684d41af7e | [
"Xnet",
"X11",
"CECILL-B"
] | null | null | null | X, Y, Z = map(int, input().split())
X, Y = Y, X
X, Z = Z, X
print(f'{X} {Y} {Z}') | 20.25 | 35 | 0.444444 | X, Y, Z = map(int, input().split())
X, Y = Y, X
X, Z = Z, X
print(f'{X} {Y} {Z}') | true | true |
1c4a30bb90b9f5879da24bf6517fa49b6226eb8b | 73,574 | py | Python | user_agent_parser/constants.py | Purushot14/user-agent-parser | 5b6a3cb0d024ce1a2822a617e5a18b45806f8d1b | [
"MIT"
] | null | null | null | user_agent_parser/constants.py | Purushot14/user-agent-parser | 5b6a3cb0d024ce1a2822a617e5a18b45806f8d1b | [
"MIT"
] | null | null | null | user_agent_parser/constants.py | Purushot14/user-agent-parser | 5b6a3cb0d024ce1a2822a617e5a18b45806f8d1b | [
"MIT"
] | null | null | null | """
Created by prakash at 02/03/22
"""
__author__ = 'Prakash14'
class OS:
WINDOWS = "Windows"
WINDOWS_PHONE = "Windows Phone"
ANDROID = "Android"
MAC_OS = "Mac Os"
LINUX = "Linux"
IOS = "iOS"
CHROME_OS = "Chrome OS"
class DEVICE_TYPE:
COMPUTER = "Computer"
MOBILE = "Mobile"
SERVER = "Server"
BOT = "Bot"
class DEVICE_NAME:
IPHONE = "iPhone"
IPAD = "iPad"
MAC = "Mac"
CHROME_BOOK = "Chrome Book"
ANDROID = "Android Phone"
MOBILE_DEVICE_CODE_NAME = {
# OnePlus Devices
"AC2003": "OnePlus Nord 5G",
"EB2101": "OnePlus Nord CE 5G",
"EB2103": "OnePlus Nord CE 5G",
"DN2101": "OnePlus Nord 5G",
"DN2103": "OnePlus Nord 5G",
"AC2001": "OnePlus Nord",
"GM1901": "OnePlus 7",
"A6000": "OnePlus 6",
"A6010": "OnePlus 6T",
"A6003": "OnePlus 6",
"A5010": "OnePlus 5T",
"A5000": "OnePlus 5",
"LE2101": "OnePlus 9R",
"LE2100": "OnePlus 9R",
"LE2113": "OnePlus 9",
"LE2111": "OnePlus 9",
"LE2110": "OnePlus 9",
"LE2117": "OnePlus 9",
"LE2121": "OnePlus 9 Pro",
"LE2125": "OnePlus 9 Pro",
"LE2123": "OnePlus 9 Pro",
"LE2120": "OnePlus 9 Pro",
"LE2127": "OnePlus 9 Pro",
"GM1911": "OnePlus 7 Pro",
"GM1913": "OnePlus 7 Pro",
"GM1917": "OnePlus 7 Pro",
"GM1910": "OnePlus 7 Pro",
"GM1915": "OnePlus 7 Pro",
"HD1901": "OnePlus 7T",
"HD1903": "OnePlus 7T",
"HD1900": "OnePlus 7T",
"HD1907": "OnePlus 7T",
"HD1905": "OnePlus 7T",
"HD1911": "OnePlus 7T",
"KB2001": "OnePlus 8T",
"KB2000": "OnePlus 8T",
"KB2003": "OnePlus 8T",
"KB2005": "OnePlus 8T",
"IN2013": "OnePlus 8",
"IN2017": "OnePlus 8",
"IN2019": "OnePlus 8",
"IN2010": "OnePlus 8",
"IN2011": "OnePlus 8",
"IN2021": "OnePlus 8 Pro",
"IN2023": "OnePlus 8 Pro",
"IN2020": "OnePlus 8 Pro",
"IN2025": "OnePlus 8 Pro",
# Samsung Devices
"SM-X900": "Samsung Galaxy Tab S8 Ultra",
"SM-X906": "Samsung Galaxy Tab S8 Ultra",
"SM-X800": "Samsung Galaxy Tab S8+",
"SM-X806": "Samsung Galaxy Tab S8+",
"SM-X700": "Samsung Galaxy Tab S8",
"SM-X706": "Samsung Galaxy Tab S8",
"SM-S908": "Samsung Galaxy S22 Ultra",
"SM-S906": "Samsung Galaxy S22+",
"SM-S901": "Samsung Galaxy S22",
"SM-G990": "Samsung Galaxy S21 FE",
"SM-A136": "Samsung Galaxy A13 5G",
"SM-X200": "Samsung Galaxy Tab A8 10.5",
"SM-X205": "Samsung Galaxy Tab A8 10.5",
"SM-A032": "Samsung Galaxy A03 Core",
"SM-E426": "Samsung Galaxy F42 5G",
"SM-M526": "Samsung Galaxy M52 5G",
"SM-M225": "Samsung Galaxy M22",
"SM-M326": "Samsung Galaxy M32 5G",
"SM-A037": "Samsung Galaxy A03s",
"SM-A528": "Samsung Galaxy A52s 5G",
"SM-F926B": "Samsung Galaxy Z Fold3 5G",
"SM-F711B": "Samsung Galaxy Z Flip3 5G",
"SM-E225": "Samsung Galaxy F22",
"SM-M325": "Samsung Galaxy M32",
"SM-A226": "Samsung Galaxy A22 5G",
"SM-A225": "Samsung Galaxy A22",
"SM-T730": "Samsung Galaxy Tab S7 FE",
"SM-T736B": "Samsung Galaxy Tab S7 FE",
"SM-T220": "Samsung Galaxy Tab A7 Lite",
"SM-T225": "Samsung Galaxy Tab A7 Lite",
"SM-E526": "Samsung Galaxy F52 5G",
"SM-M426": "Samsung Galaxy M42 5G",
"SM-E025": "Samsung Galaxy F02s",
"SM-F127": "Samsung Galaxy F12",
"SM-A725": "Samsung Galaxy A72",
"SM-A526": "Samsung Galaxy A52 5G",
"SM-A525": "Samsung Galaxy A52",
"SM-A325": "Samsung Galaxy A32",
"SM-M625": "Samsung Galaxy M62",
"SM-E625": "Samsung Galaxy F62",
"SM-M127": "Samsung Galaxy M12",
"SM-M022": "Samsung Galaxy M02",
"SM-A022": "Samsung Galaxy A02",
"SM-G991": "Samsung Galaxy S21",
"SM-G996": "Samsung Galaxy S21+",
"SM-G998": "Samsung Galaxy S21 Ultra",
"SM-A326": "Samsung Galaxy A32 5G",
"SM-M025": "Samsung Galaxy M02s",
"SM-A025": "Samsung Galaxy A02s",
"SM-A125": "Samsung Galaxy A12",
"SM-M217": "Samsung Galaxy M21s",
"SM-A426": "Samsung Galaxy A42 5G",
"SM-F415": "Samsung Galaxy F41",
"SM-G780": "Samsung Galaxy S20 FE",
"SM-G781": "Samsung Galaxy S20 FE",
"SM-F916B": "Samsung Galaxy Z Fold2 5G",
"SM-M515": "Samsung Galaxy M51",
"SM-N980": "Samsung Galaxy Note 20",
"SM-N981": "Samsung Galaxy Note 20",
"SM-N985": "Samsung Galaxy Note 20 Ultra",
"SM-N986": "Samsung Galaxy Note 20 Ultra",
"SM-F707": "Samsung Galaxy Z Flip 5G",
"SM-T870": "Samsung Galaxy Tab S7",
"SM-T875": "Samsung Galaxy Tab S7",
"SM-T876B": "Samsung Galaxy Tab S7",
"SM-T970": "Samsung Galaxy Tab S7+",
"SM-T976B": "Samsung Galaxy Tab S7+",
"SM-M317": "Samsung Galaxy M31s",
"SM-A013": "Samsung Galaxy A01 Core",
"SM-M017": "Samsung Galaxy M01s",
"SM-M015": "Samsung Galaxy M01",
"SM-A217": "Samsung Galaxy A21s",
"SM-A716F": "Samsung Galaxy A71 5G",
"SM-A516F": "Samsung Galaxy A51 5G",
"SM-A215": "Samsung Galaxy A21",
"SM-P610N": "Samsung Galaxy Tab S6 Lite",
"SM-P615": "Samsung Galaxy Tab S6 Lite",
"SM-G980": "Samsung Galaxy S20",
"SM-G981": "Samsung Galaxy S20",
"SM-G985": "Samsung Galaxy S20+",
"SM-G986": "Samsung Galaxy S20+",
"SM-G988": "Samsung Galaxy S20 Ultra",
"SM-M115": "Samsung Galaxy M11",
"SM-M115F": "Samsung Galaxy M11",
"SM-A315": "Samsung Galaxy A31",
"SM-A415": "Samsung Galaxy A41",
"SM-M215": "Samsung Galaxy M21",
"SM-A115": "Samsung Galaxy A11",
"SM-M315": "Samsung Galaxy M31",
"SM-F700": "Samsung Galaxy Z Flip",
"SM-T866N": "Samsung Galaxy Tab S6 5G",
"SM-G715F": "Samsung Galaxy Xcover Pro",
"SM-N770F": "Samsung Galaxy Note 10 Lite",
"SM-G770F": "Samsung Galaxy S10 Lite",
"SM-A015": "Samsung Galaxy A01",
"SM-A715": "Samsung Galaxy A71",
"SM-A515": "Samsung Galaxy A51",
"SM-M307": "Samsung Galaxy M30s",
"SM-A207": "Samsung Galaxy A20s",
"SM-M107": "Samsung Galaxy M10s",
"SM-A707": "Samsung Galaxy A70s",
"SM-A507": "Samsung Galaxy A50s",
"SM-A307": "Samsung Galaxy A30s",
"SM-A908": "Samsung Galaxy A90 5G",
"SM-F900": "Samsung Galaxy Z Fold",
"SM-F907": "Samsung Galaxy Z Fold",
"SM-A107": "Samsung Galaxy A10s",
"SM-A102": "Samsung Galaxy A10e",
"SM-N970": "Samsung Galaxy Note 10",
"SM-N971": "Samsung Galaxy Note 10",
"SM-N975F": "Samsung Galaxy Note 10+",
"SM-N976": "Samsung Galaxy Note 10+",
"SM-M405": "Samsung Galaxy M40",
"SM-G977": "Samsung Galaxy S10 5G",
"SM-T920": "Samsung Galaxy View 2",
"SM-T927": "Samsung Galaxy View 2",
"SM-T927A": "Samsung Galaxy View 2",
"SM-A606": "Samsung Galaxy A60",
"SM-A805": "Samsung Galaxy A80",
"SM-A705": "Samsung Galaxy A70",
"SM-A405": "Samsung Galaxy A40",
"SM-A205": "Samsung Galaxy A20",
"SM-A202": "Samsung Galaxy A20e",
"SM-A260": "Samsung Galaxy A2 Core",
"SM-G975": "Samsung Galaxy S10+",
"SM-G973": "Samsung Galaxy S10",
"SM-G970": "Samsung Galaxy S10e",
"SM-A505": "Samsung Galaxy A50",
"SM-A305": "Samsung Galaxy A30",
"SM-A105": "Samsung Galaxy A10",
"SM-T720": "Samsung Galaxy Tab S5e",
"SM-T725": "Samsung Galaxy Tab S5e",
"SM-T510": "Samsung Galaxy Tab A 10.1 (2019)",
"SM-T515": "Samsung Galaxy Tab A 10.1 (2019)",
"SM-M305": "Samsung Galaxy M30",
"SM-M105": "Samsung Galaxy M10",
"SM-M205": "Samsung Galaxy M20",
"SM-G887": "Samsung Galaxy A8s",
"SM-G6200": "Samsung Galaxy A6s",
"SM-A920": "Samsung Galaxy A9 (2018)",
"SM-A750": "Samsung Galaxy A7 (2018)",
"SM-J415": "Samsung Galaxy J4+",
"SM-J610": "Samsung Galaxy J6+",
"SM-N960": "Samsung Galaxy Note 9",
"SM-T590": "Samsung Galaxy Tab A 10.5 (2018)",
"; SM-T595": "Samsung Galaxy Tab A 10.5 (2018)",
"SM-T830": "Samsung Galaxy Tab S4",
"; SM-T835": "Samsung Galaxy Tab S4",
"SM-J800": "Samsung Galaxy J8 (2018)",
"SM-J600G": "Samsung Galaxy On6",
"SM-G8850": "Samsung Galaxy A8 Star[16]",
"SM-J737": "Samsung Galaxy J7 (2018)",
"SM-A600": "Samsung Galaxy A6 (2018)",
"SM-A605": "Samsung Galaxy A6+ (2018)",
"SM-J400": "Samsung Galaxy J4 (2018)",
"SM-J600": "Samsung Galaxy J6 (2018)",
"SM-J720": "Samsung Galaxy J3 Duo",
"SM-G611": "Samsung Galaxy J4 Prime 2 Samsung Galaxy J7 (2018)",
"SM-G960": "Samsung Galaxy S9",
"SM-G965": "Samsung Galaxy S9+",
"SM-J250": "Samsung Galaxy J4 Pro (2018)",
"SM-A530": "Samsung Galaxy A5 (2018)[17]",
"SM-A730": "Samsung Galaxy A5+ (2018)",
"SM-J200G": "Samsung Galaxy J2",
"SM-T380": "Samsung Galaxy Tab A 8.0 (2017)",
"SM-T385": "Samsung Galaxy Tab A 8.0 (2017)",
"SM-C710": "Samsung Galaxy C8 / C7 (2017)Samsung Galaxy J7+",
"SM-C8000": "Samsung Galaxy C8 / C7 (2017)Samsung Galaxy J7+",
"SM-N950": "Samsung Galaxy Note 8",
"SM-G892": "Samsung Galaxy S8 Active",
"SM-N935": "Samsung Galaxy Note Fan Edition (FE)",
"SM-J727": "Samsung Galaxy J3 (2017)",
"SM-J730x": "Samsung Galaxy J3 (2017)",
"SM-J530": "Samsung Galaxy J5 (2017)",
"SM-J530Y": "Samsung Galaxy J5 (2017)",
"SM-J327": "Samsung Galaxy J7 (2017)",
"SM-J330x": "Samsung Galaxy J7 (2017)",
"SM-J730": "Samsung Galaxy J7 Pro (2017)",
"SM-G615": "Samsung Galaxy J7 Max",
"SM-G390": "Samsung Galaxy Xcover 4",
"SM-G950": "Samsung Galaxy S8",
"SM-G955": "Samsung Galaxy S8+",
"SM-C5010": "Samsung Galaxy C5 Pro",
"SM-T820": "Samsung Galaxy Tab S3",
"SM-T825": "Samsung Galaxy Tab S3",
"SM-A720": "Samsung Galaxy A7 (2017)",
"SM-A520": "Samsung Galaxy A5 (2017)",
"SM-A320": "Samsung Galaxy A3 (2017)",
"SM-C7010": "Samsung Galaxy C5 Pro",
"SM-J106F": "Samsung Galaxy J1 mini Prime/Galaxy V2 (Indonesia)",
"SM-G532F": "Samsung Galaxy J2 Prime",
"SM-G532M": "Samsung Galaxy J2 Prime",
"SM-G532G": "Samsung Galaxy J2 Prime",
"SM-C900F": "Samsung Galaxy C9 Pro",
"SM-A810": "Samsung Galaxy A8 (2016)",
"SM-G570": "Samsung Galaxy On5 (2016)",
"SM-G610": "Samsung Galaxy On5 (2016)",
"SM-J710": "Samsung Galaxy On7 (2016)",
"SM-G610F": "Samsung Galaxy J7 Prime",
"SM-G610M": "Samsung Galaxy J7 Prime",
"SM-N930": "Samsung Galaxy Note 7",
"SM-G570F": "Samsung Galaxy J2 Prime",
"SM-G570M": "Samsung Galaxy J2 Prime",
"SM-G891A": "Samsung Galaxy S7 Active",
"SM-J310F": "Samsung Galaxy J3 Pro",
"SM-T585": "Samsung Galaxy Tab A 10.1 (2016)",
"SM-C5000": "Samsung Galaxy C5",
"SM-C7000": "Samsung Galaxy C7",
"SM-J5109": "Samsung Galaxy J5 (2016)",
"SM-J510F": "Samsung Galaxy J5 (2016)",
"SM-J510FN": "Samsung Galaxy J5 (2016)",
"SM-J510H": "Samsung Galaxy J5 (2016)",
"SM-J510G": "Samsung Galaxy J5 (2016)",
"SM-J510MN": "Samsung Galaxy J5 (2016)",
"SM-J510Y": "Samsung Galaxy J5 (2016)",
"SM-J5108": "Samsung Galaxy J5 (2016)",
"SM-J510K": "Samsung Galaxy J5 (2016)",
"SM-J510L": "Samsung Galaxy J5 (2016)",
"SM-J510S": "Samsung Galaxy J5 (2016)",
"SM-J510UN": "Samsung Galaxy J5 (2016)",
"SM-J7109": "Samsung Galaxy J7 (2016)",
"SM-J710F": "Samsung Galaxy J7 (2016)",
"SM-J710FN": "Samsung Galaxy J7 (2016)",
"SM-J710H": "Samsung Galaxy J7 (2016)",
"SM-J710MN": "Samsung Galaxy J7 (2016)",
"SM-J710FQ": "Samsung Galaxy J7 (2016)",
"SM-J710K": "Samsung Galaxy J7 (2016)",
"SM-J710GN": "Samsung Galaxy J7 (2016)",
"SM-J3109x": "Samsung Galaxy J3 (2016)",
"SM-J320F": "Samsung Galaxy J3 (2016)",
"SM-J320G": "Samsung Galaxy J3 (2016)",
"SM-J320P": "Samsung Galaxy J3 (2016)",
"SM-J320M": "Samsung Galaxy J3 (2016)",
"SM-T280": "Samsung Galaxy Tab A6",
"SM-T285": "Samsung Galaxy Tab A6",
"SM-A9100": "Samsung Galaxy A9 Pro (2016)",
"SM-A910F": "Samsung Galaxy A9 Pro (2016)",
"SM-J105B": "Samsung Galaxy J1 Mini",
"SM-J105DS": "Samsung Galaxy J1 Mini",
"SM-J105F": "Samsung Galaxy J1 Mini",
"SM-G935F": "Samsung Galaxy S7 Edge",
"SM-G935FD": "Samsung Galaxy S7 Edge",
"SM-G9350": "Samsung Galaxy S7 Edge",
"SM-G935A": "Samsung Galaxy S7 Edge",
"SM-G935V": "Samsung Galaxy S7 Edge",
"SM-G935U": "Samsung Galaxy S7 Edge",
"SM-G935S": "Samsung Galaxy S7 Edge",
"SM-G935K": "Samsung Galaxy S7 Edge",
"SM-G935W8": "Samsung Galaxy S7 Edge",
"SC-02H": "Samsung Galaxy S7 Edge",
"SM-G930F": "Samsung Galaxy S7",
"SM-G930FD": "Samsung Galaxy S7",
"SM-G9300": "Samsung Galaxy S7",
"SM-G930A": "Samsung Galaxy S7",
"SM-G930V": "Samsung Galaxy S7",
"SM-G930AZ": "Samsung Galaxy S7",
"SM-G930S": "Samsung Galaxy S7",
"SM-G930K": "Samsung Galaxy S7",
"SM-G930W8": "Samsung Galaxy S7",
"SM-J120F": "Samsung Galaxy J1 (2016)",
"SM-J120M": "Samsung Galaxy J1 (2016)",
"SM-A9000": "Samsung Galaxy A9 (2016)",
"SM-A7100": "Samsung Galaxy A7 (2016)",
"SM-A710F": "Samsung Galaxy A7 (2016)",
"SM-A710FD": "Samsung Galaxy A7 (2016)",
"SM-A710M": "Samsung Galaxy A7 (2016)",
"SM-A710Y": "Samsung Galaxy A7 (2016)",
"SM-A5100": "Samsung Galaxy A5 (2016)",
"SM-A510F": "Samsung Galaxy A5 (2016)",
"SM-A510FD": "Samsung Galaxy A5 (2016)",
"SM-A510M": "Samsung Galaxy A5 (2016)",
"SM-A510Y": "Samsung Galaxy A5 (2016)",
"SM-A310F": "Samsung Galaxy A3 (2016)",
"SM-A310M": "Samsung Galaxy A3 (2016)",
"SM-T670": "Samsung Galaxy View",
"SC-01H": "Samsung Galaxy Active Neo",
"SM-J200F": "Samsung Galaxy J2",
"SM-J200Y": "Samsung Galaxy J2",
"SM-J200H": "Samsung Galaxy J2",
"SM-J200M": "Samsung Galaxy J2",
"SM-G928A": "Samsung Galaxy S6 Edge+",
"SM-G928AZ": "Samsung Galaxy S6 Edge+",
"SM-G928D": "Samsung Galaxy S6 Edge+",
"SM-G928F": "Samsung Galaxy S6 Edge+",
"SM-G928FD": "Samsung Galaxy S6 Edge+",
"SM-G928I": "Samsung Galaxy S6 Edge+",
"SM-G928K": "Samsung Galaxy S6 Edge+",
"SM-G928L": "Samsung Galaxy S6 Edge+",
"SM-G928P": "Samsung Galaxy S6 Edge+",
"SM-G928PZ": "Samsung Galaxy S6 Edge+",
"SM-G928R4": "Samsung Galaxy S6 Edge+",
"SM-G928R7": "Samsung Galaxy S6 Edge+",
"SM-G928S": "Samsung Galaxy S6 Edge+",
"SM-G928T": "Samsung Galaxy S6 Edge+",
"SM-G928T1": "Samsung Galaxy S6 Edge+",
"SM-G928TR": "Samsung Galaxy S6 Edge+",
"SM-G928V": "Samsung Galaxy S6 Edge+",
"SM-G9280": "Samsung Galaxy S6 Edge+",
"SM-G9288": "Samsung Galaxy S6 Edge+",
"SM-G9289": "Samsung Galaxy S6 Edge+",
"SM-A8000": "Samsung Galaxy A8",
"SM-A800F": "Samsung Galaxy A8",
"SM-A800I": "Samsung Galaxy A8",
"SM-A800S": "Samsung Galaxy A8",
"SM-A800Y": "Samsung Galaxy A8",
"SM-N9200": "Samsung Galaxy Note 5",
"SM-N920C": "Samsung Galaxy Note 5",
"SM-N920T": "Samsung Galaxy Note 5",
"SM-N920A": "Samsung Galaxy Note 5",
"SM-N920I": "Samsung Galaxy Note 5",
"SM-N9208": "Samsung Galaxy Note 5",
"SM-G903F": "Samsung Galaxy S5 Neo",
"SM-G903W": "Samsung Galaxy S5 Neo",
"SM-G318H": "Samsung Galaxy Trend 2 Lite",
"SM-G890A": "Samsung Galaxy S6 Active",
"SM-J500F": "Samsung Galaxy J5",
"SM-J500H": "Samsung Galaxy J5",
"SM-J500M": "Samsung Galaxy J5",
"SM-J500G": "Samsung Galaxy J5",
"SM-J700F": "Samsung Galaxy J7",
"SM-J700H": "Samsung Galaxy J7",
"SM-J700M": "Samsung Galaxy J7",
"SM-J700T": "Samsung Galaxy J7",
"SM-J700P": "Samsung Galaxy J7",
"SM-G925A": "Samsung Galaxy S6 Edge",
"SM-G925AZ": "Samsung Galaxy S6 Edge",
"SM-G925F": "Samsung Galaxy S6 Edge",
"SM-G925I": "Samsung Galaxy S6 Edge",
"SM-G925K": "Samsung Galaxy S6 Edge",
"SM-G925L": "Samsung Galaxy S6 Edge",
"SM-G925P": "Samsung Galaxy S6 Edge",
"SM-G925PZ": "Samsung Galaxy S6 Edge",
"SM-G925R4": "Samsung Galaxy S6 Edge",
"SM-G925R7": "Samsung Galaxy S6 Edge",
"SM-G925S": "Samsung Galaxy S6 Edge",
"SM-G925T": "Samsung Galaxy S6 Edge",
"SM-G925T1": "Samsung Galaxy S6 Edge",
"SM-G925TR": "Samsung Galaxy S6 Edge",
"SM-G925V": "Samsung Galaxy S6 Edge",
"SM-G9250": "Samsung Galaxy S6 Edge",
"SM-G9258": "Samsung Galaxy S6 Edge",
"SM-G9259": "Samsung Galaxy S6 Edge",
"SM-G920A": "Samsung Galaxy S6Samsung Galaxy Gear 2",
"SM-G920AZ": "Samsung Galaxy S6Samsung Galaxy Gear 2",
"SM-G920D": "Samsung Galaxy S6Samsung Galaxy Gear 2",
"SM-G920F": "Samsung Galaxy S6Samsung Galaxy Gear 2",
"SM-G920FD": "Samsung Galaxy S6Samsung Galaxy Gear 2",
"SM-G920I": "Samsung Galaxy S6Samsung Galaxy Gear 2",
"SM-G920K": "Samsung Galaxy S6Samsung Galaxy Gear 2",
"SM-G920L": "Samsung Galaxy S6Samsung Galaxy Gear 2",
"SM-G920P": "Samsung Galaxy S6Samsung Galaxy Gear 2",
"SM-G920PZ": "Samsung Galaxy S6Samsung Galaxy Gear 2",
"SM-G920R4": "Samsung Galaxy S6Samsung Galaxy Gear 2",
"SM-G920R7": "Samsung Galaxy S6Samsung Galaxy Gear 2",
"SM-G920S": "Samsung Galaxy S6Samsung Galaxy Gear 2",
"SM-G920T": "Samsung Galaxy S6Samsung Galaxy Gear 2",
"SM-G920T1": "Samsung Galaxy S6Samsung Galaxy Gear 2",
"SM-G920TR": "Samsung Galaxy S6Samsung Galaxy Gear 2",
"SM-G920V": "Samsung Galaxy S6Samsung Galaxy Gear 2",
"SM-G9200": "Samsung Galaxy S6Samsung Galaxy Gear 2",
"SM-G9208": "Samsung Galaxy S6Samsung Galaxy Gear 2",
"SM-G9209": "Samsung Galaxy S6Samsung Galaxy Gear 2",
"SM-J100H": "Samsung Galaxy J1",
"SM-J100F": "Samsung Galaxy J1",
"SM-E500H": "Samsung Galaxy E5",
"SM-E500F": "Samsung Galaxy E5",
"SM-A700F": "Samsung Galaxy A7",
"SM-A700FD": "Samsung Galaxy A7",
"SM-A700FQ": "Samsung Galaxy A7",
"SM-A700H": "Samsung Galaxy A7",
"SM-A700K": "Samsung Galaxy A7",
"SM-A700L": "Samsung Galaxy A7",
"SM-A700M": "Samsung Galaxy A7",
"SM-A700S": "Samsung Galaxy A7",
"SM-A700X": "Samsung Galaxy A7",
"SM-A700YD": "Samsung Galaxy A7",
"SM-A700YZ": "Samsung Galaxy A7",
"SM-A7000": "Samsung Galaxy A7",
"SM-A7009": "Samsung Galaxy A7",
"SM-A7009W": "Samsung Galaxy A7",
"SM-E700H": "Samsung Galaxy E7",
"SM-A500F": "Samsung Galaxy A5 (2015)",
"SM-A500F1": "Samsung Galaxy A5 (2015)",
"SM-A500FQ": "Samsung Galaxy A5 (2015)",
"SM-A500FU": "Samsung Galaxy A5 (2015)",
"SM-A500G": "Samsung Galaxy A5 (2015)",
"SM-A500H": "Samsung Galaxy A5 (2015)",
"SM-A500HQ": "Samsung Galaxy A5 (2015)",
"SM-A500K": "Samsung Galaxy A5 (2015)",
"SM-A500L": "Samsung Galaxy A5 (2015)",
"SM-A500M": "Samsung Galaxy A5 (2015)",
"SM-A500S": "Samsung Galaxy A5 (2015)",
"SM-A500X": "Samsung Galaxy A5 (2015)",
"SM-A500XZ": "Samsung Galaxy A5 (2015)",
"SM-A500Y": "Samsung Galaxy A5 (2015)",
"SM-A500YZ": "Samsung Galaxy A5 (2015)",
"SM-A5000": "Samsung Galaxy A5 (2015)",
"SM-A5009": "Samsung Galaxy A5 (2015)",
"SM-A300F": "Samsung Galaxy A3 (2015)",
"SM-A300FU": "Samsung Galaxy A3 (2015)",
"SM-A300G": "Samsung Galaxy A3 (2015)",
"SM-A300H": "Samsung Galaxy A3 (2015)",
"SM-A300HQ": "Samsung Galaxy A3 (2015)",
"SM-A300M": "Samsung Galaxy A3 (2015)",
"SM-A300X": "Samsung Galaxy A3 (2015)",
"SM-A300XU": "Samsung Galaxy A3 (2015)",
"SM-A300XZ": "Samsung Galaxy A3 (2015)",
"SM-A300Y": "Samsung Galaxy A3 (2015)",
"SM-A300YZ": "Samsung Galaxy A3 (2015)",
"SM-A3000": "Samsung Galaxy A3 (2015)",
"SM-A3009": "Samsung Galaxy A3 (2015)",
"SM-G360BT": "Samsung Galaxy Core Prime",
"SM-G360H": "Samsung Galaxy Core Prime",
"SM-N915G": "Samsung Galaxy Note Edge",
"SM-N9150": "Samsung Galaxy Note Edge",
"SM-N910G": "Samsung Galaxy Note 4",
"SM-G130H": "Samsung Galaxy Young 2",
"SM-G850F": "Samsung Galaxy Alpha",
"SM-G850FQ": "Samsung Galaxy Alpha",
"SM-G850K": "Samsung Galaxy Alpha",
"SM-G850L": "Samsung Galaxy Alpha",
"SM-G850M": "Samsung Galaxy Alpha",
"SM-G850S": "Samsung Galaxy Alpha",
"SM-G850W": "Samsung Galaxy Alpha",
"SM-G850Y": "Samsung Galaxy Alpha",
"SM-G530BT": "Samsung Galaxy Grand Prime",
"SM-G530F": "Samsung Galaxy Grand Prime",
"SM-G530FQ": "Samsung Galaxy Grand Prime",
"SM-G530FZ": "Samsung Galaxy Grand Prime",
"SM-G530H": "Samsung Galaxy Grand Prime",
"SM-G530M": "Samsung Galaxy Grand Prime",
"SM-G530MU": "Samsung Galaxy Grand Prime",
"SM-G530P": "Samsung Galaxy Grand Prime",
"SM-G530R4": "Samsung Galaxy Grand Prime",
"SM-G530R7": "Samsung Galaxy Grand Prime",
"SM-G530T": "Samsung Galaxy Grand Prime",
"SM-G530W": "Samsung Galaxy Grand Prime",
"SM-G530Y": "Samsung Galaxy Grand Prime",
"SM-G5306W": "Samsung Galaxy Grand Prime",
"SM-G5308W": "Samsung Galaxy Grand Prime",
"SM-G5309W": "Samsung Galaxy Grand Prime",
"SM-G110B": "Samsung Galaxy Pocket 2",
"SM-G750F": "Samsung Galaxy Mega 2",
"SM-G350E": "Samsung Galaxy Star 2 Plus",
"SM-G313F": "Samsung Galaxy Ace 4",
"SM-G355H": "Samsung Galaxy Core 2",
"GT-S5500": "Samsung Galaxy S5 Mini",
"GT-S5430": "Samsung Galaxy S5 Mini",
"SM-T800": "Samsung Galaxy Tab S 10.5",
"SM-T805": "Samsung Galaxy Tab S 10.5",
"SM-T807": "Samsung Galaxy Tab S 10.5",
"SM-T807P": "Samsung Galaxy Tab S 10.5",
"SM-T807V": "Samsung Galaxy Tab S 10.5",
"SM-G386F": "Samsung Galaxy Core",
"SM-C115": "Samsung Galaxy K Zoom",
"SM-G310": "Samsung Galaxy Ace Style",
"SM-G900": "Samsung Galaxy S5",
"SM-G900FD": "Samsung Galaxy S5",
"GT-I9300I": "Samsung Galaxy S3 Neo",
"GT-I9301I": "Samsung Galaxy S3 Neo",
"GT-I9303I": "Samsung Galaxy S3 Neo",
"SM-N7500": "Samsung Galaxy Note 3 Neo",
"SM-N7502": "Samsung Galaxy Note 3 Neo",
"SM-N7505": "Samsung Galaxy Note 3 Neo",
"SM-G7102": "Samsung Galaxy Grand 2 (SM-G7100)",
"GT-S7262": "Samsung Galaxy Star Pro (GT-S7260)",
"GT-S7392": "Samsung Galaxy Trend Lite (GT-S7390)",
"SM-G3502": "Samsung Galaxy Core Plus (SM-G3500)",
"SM-N9000": "Samsung Galaxy Note 3",
"SM-N9002": "Samsung Galaxy Note 3",
"SM-N9005": "Samsung Galaxy Note 3",
"SM-V700": "Samsung Galaxy Gear",
"GT-S7272": "Samsung Galaxy Ace 3 (GT-S7270)[20]",
"GT-S7275": "Samsung Galaxy Ace 3 (GT-S7270)[20]",
"GT-S5312": "Samsung Galaxy Pocket Neo (GT-S5310)",
"GT-S5282": "Samsung Galaxy Star (GT-S5280)",
"GT-S5283": "Samsung Galaxy Star (GT-S5280)",
"GT-i8262D": "Samsung Galaxy Core (GT-S8262)",
"Galaxy Grand Quattro": "Samsung Galaxy Win (GT-I8550)",
"GT-I9150": "Samsung Galaxy Mega",
"GT-I9152": "Samsung Galaxy Mega",
"GT-I9200": "Samsung Galaxy Mega",
"GT-I9205": "Samsung Galaxy Mega",
"GT-S6810P": "Samsung Galaxy Fame (GT-S6810)",
"GT-I9505": "Samsung Galaxy S4 (GT-I9500)",
"GT-I9506": "Samsung Galaxy S4 (GT-I9500)",
"GT-S6312": "Samsung Galaxy Young (GT-S6310)",
"GT-I9082": "Samsung Galaxy Grand (GT-I9080)",
"SGH-I437": "Samsung Galaxy Express",
"GT-N7100": "Samsung Galaxy Note II",
"GT-N7102": "Samsung Galaxy Note II",
"GT-N7105": "Samsung Galaxy Note II",
"GT-B5512": "Samsung Galaxy Y Pro DUOS (GT-B5510)[33]",
"GT-I5700": "Samsung Galaxy Spica[67]",
"GT-I7500": "Samsung Galaxy[68]",
# OPPO Devices
'CPH1911': 'OPPO F11',
'CPH1909': 'Oppo A5s',
'CPH1913': 'OPPO F11',
'CPH1931': 'OPPO A5 2020',
'CPH1933': 'OPPO A5 2020',
'CPH1937': 'OPPO A9 2020',
'CPH1969': 'OPPO F11 Pro',
'CPH1989': 'OPPO Reno2 F',
'CPH2001': 'OPPO F15',
'CPH2015': 'OPPO A31',
'CPH2023': 'OPPO Find X2',
'CPH2035': 'OPPO Reno3 Pro',
'CPH2061': 'OPPO A52',
'CPH2071': 'OPPO A11k',
'CPH2077': 'OPPO A12',
'CPH2083': 'OPPO A11k',
'CPH2109': 'OPPO Reno4 Pro',
'CPH2127': 'OPPO A53',
'CPH2137': 'OPPO A33',
'CPH2179': 'OPPO A15s',
'CPH2185': 'OPPO A15',
'CPH2201': 'OPPO Reno5 Pro 5G',
'CPH2213': 'OPPO F19 Pro+',
'CPH2219': 'OPPO F19',
'CPH2239': 'OPPO A54',
'CPH2249': ' OPPO Reno6 Pro 5G',
'CPH2251': 'OPPO Reno6 5G',
'CPH2263': 'OPPO A74 5G',
'CPH2269': 'OPPO A16',
'CPH2285': 'OPPO F19 Pro',
'CPH2293': 'OPPO Reno7 Pro 5G',
'CPH2321': 'OPPO A53s 5G',
'CPH2325': 'OPPO A55',
'CPH2349': 'OPPO A16k',
'CPH2371': 'OPPO Reno7 5G',
'Not Available': 'OPPO F17',
# Huawei/ Honor Devices
"YAL-TL00": "Honor 20",
"YAL-L71": "Huawei nova 5T",
"YAL-L61": "Huawei nova 5T",
"YAL-L41": "Honor 20 Pro",
"YAL-L21": "Huawei nova 5T",
"YAL-AL50": "Honor 20S",
"YAL-AL10": "Honor 20 Pro",
"YAL-AL00I": "Honor 20",
"YAL-AL00": "Honor 20",
"Y635-TL00": "Huawei Y635",
"Y635-L21": "Huawei Y635",
"Y635-L03": "Huawei Y635",
"Y635-L02": "Huawei Y635",
"Y635-L01": "Huawei Y635",
"Y635-CL00": "Huawei Y635",
"Y625-U51": "Huawei Y625",
"Y625-U43": "Huawei Y625",
"Y625-U32": "Huawei Y625",
"Y625-U21": "Huawei Y625",
"Y625-U13": "Huawei Y625",
"Y610-U00": "Huawei Y610-U00",
"Y600-U40": "Huawei Ascend Y600",
"Y600-U351": "Huawei Ascend Y600",
"Y600-U20": "Huawei Ascend Y600",
"Y600-U151": "Huawei Ascend Y600",
"Y600-U00": "Huawei Ascend Y600",
"Y560-U23": "Huawei Y560",
"Y560-U02": "Huawei Y560",
"Y560-L23": "Huawei Y560",
"Y560-L03": "Huawei Y560",
"Y560-L02": "Huawei Y560",
"Y560-L01": "Huawei Y560",
"Y550-L03": "Huawei Ascend Y550",
"Y550-L02": "Huawei Ascend Y550",
"Y550-L01": "Huawei Ascend Y550",
"Y541-U02": "Huawei Y541",
"Y540-U01": "Huawei Ascend Y540",
"Y538": "Huawei Union Y538",
"Y536-A1": "Huawei Y536",
"Y530-U051": "Huawei Y530",
"Y530-U00": "Huawei Y530",
"Y520-U33": "Huawei Ascend Y520",
"Y520-U22": "Huawei Ascend Y520",
"Y520-U12": "Huawei Ascend Y520",
"Y520-U03": "Huawei Ascend Y520",
"Y511-U30": "Huawei Ascend Y511",
"Y511-U251": "Huawei Ascend Y511",
"Y511-U10": "Huawei Ascend Y511",
"Y511-U00": "Huawei Y511",
"Y360-U93": "Huawei Y3 lite",
"Y360-U82": "Huawei Y3 Lite",
"Y360-U61": "Huawei Y360",
"Y360-U31": "Huawei Y360",
"Y360-U23": "Huawei Y360",
"Y360-U03": "Huawei Y360",
"Y340-U081": "Huawei Y340",
"Y336-U02": "Huawei Y336",
"Y330-U17": "Huawei Ascend Y330",
"Y330-U15": "Huawei Ascend Y330",
"Y330-U11": "Huawei Ascend Y330",
"Y330-U07": "Huawei Ascend Y330",
"Y330-U05": "Huawei Ascend Y330",
"Y330-U01": "Huawei Ascend Y330",
"Y321-U051": "Huawei Ascend Y321",
"Y320-U351": "Huawei Ascend Y320",
"Y320-U30": "Huawei Ascend Y320",
"Y320-U151": "Huawei Ascend Y320",
"Y320-U10": "Huawei Ascend Y320",
"Y320-U01": "Huawei Ascend Y320",
"Y300-0151": "Huawei Ascend Y300",
"Y300-0100": "Huawei Ascend Y300",
"Y300-0000": "Huawei Ascend Y300",
"Y221-U33": "Huawei Ascend Y221",
"Y221-U22": "Huawei Ascend Y221",
"Y221-U12": "Huawei Ascend Y221",
"Y221-U03": "Huawei Ascend Y221",
"Y220-U10": "Huawei Ascend Y220",
"Y220-U05": "Huawei Ascend Y220",
"Y220-U00": "Huawei Ascend Y220",
"Y210-0200": "Huawei Ascend Y210",
"Y210-0151": "Huawei Ascend Y210",
"WLZ-AN00": "Huawei nova 6 5G",
"WLZ-AL10": "Huawei nova 6",
"WKG-TN00": "Huawei Enjoy 20 SE",
"WKG-L29": "Huawei Enjoy 20 5G",
"WKG-L09": "Huawei Enjoy 20 5G",
"WKG-AN00": "Huawei Enjoy 20 5G",
"WAS-TL10": "Huawei P10 Lite Dual",
"WAS-LX3": "Huawei P10 Lite",
"WAS-LX2": "Huawei P10 Lite",
"WAS-LX1": "Huawei P10 Lite",
"WAS-L23": "Huawei P10 Lite",
"WAS-L22J": "Huawei WAS-L22J",
"WAS-L22": "Huawei P10 Lite",
"WAS-L21": "Huawei P10 Lite",
"WAS-L03": "Huawei P10 Lite",
"WAS-L02": "Huawei P10 Lite",
"WAS-L01": "Huawei P10 Lite",
"WAS-AL00": "Huawei Nova Youth Dual",
"Warsaw-LX2": "Huawei P10",
"Warsaw-LX1": "Huawei Warsaw-LX1",
"Warsaw-L23": "Huawei P10",
"Warsaw-L22": "Huawei P10",
"Warsaw-L21": "Huawei P10",
"Warsaw-L03": "Huawei Warsaw-L03",
"Warsaw-L02": "Huawei Warsaw-L02",
"W1-U00": "Huawei W1",
"VTR-TL00": "Huawei P10",
"VTR-L29": "Huawei P10",
"VTR-L09": "Huawei P10",
"VTR-AL00": "Huawei P10",
"VRD-W10": "Huawei MediaPad M6 Turbo 8.4",
"VRD-W09": "Huawei MediaPad M6 Turbo 8.4",
"VRD-AL10": "Huawei MediaPad M6 Turbo 8.4",
"VRD-AL09": "Huawei MediaPad M6 Turbo 8.4",
"VOG-TL00": "Huawei P30 Pro",
"VOG-L29": "Huawei P30 Pro",
"VOG-L09": "Huawei P30 Pro",
"VOG-L04": "Huawei P30 Pro",
"VOG-AL10": "Huawei P30 Pro",
"VOG-AL00": "Huawei Y6 Pro",
"VNS-TL00": "Huawei G9 Lite",
"VNS-L62": "Huawei P9 Lite",
"VNS-L53": "Huawei P9 lite",
"VNS-L52C": "Huawei VNS-L52C",
"VNS-L31": "Huawei P9 Lite",
"VNS-L23": "Huawei P9 lite",
"VNS-L22": "Huawei P9 Lite",
"VNS-L21": "Huawei P9 Lite",
"VNS-DL00": "Huawei P9",
"VNS-AL00": "Huawei G9 Lite",
"VKY-TL00": "Huawei P10 Plus",
"VKY-L29": "Huawei P10 Plus",
"VKY-L09": "Huawei P10 Plus",
"VKY-AL00": "Huawei P10 Plus",
"VIE-L29": "Huawei P9 Plus",
"VIE-L09": "Huawei P9 Plus",
"VIE-C00": "Huawei P9 Plus",
"VIE-AL10": "Huawei P9 Plus",
"Victoria-L09": "Huawei P10",
"Vicky-L29": "Huawei P10 Plus",
"Vicky-L09": "Huawei P10 Plus",
"VEN-L22": "Huawei Honor 8 Smart",
"VCE-TL00": "Huawei Nova 4",
"VCE-AL00": "Huawei Nova 4",
"U9510E": "Huawei Ascend D1",
"U9508": "Huawei Honor 2",
"U9202L-1": "Huawei Ascend P1 LTE",
"U9200-1": "Huawei Ascend P1",
"TRT-TL10": "Huawei Y7 Prime",
"TRT-LX3": "Huawei Y7",
"TRT-LX1": "Huawei Y7",
"TRT-LX": "Huawei Y7 Prime",
"TRT-L53D": "Huawei Y7 Prime",
"TRT-L53": "Huawei Y7 Prime",
"TRT-L21A": "Huawei Y7 Prime",
"TRT-L21": "Huawei Y7",
"TRT-L03": "Huawei Y7",
"TRT-L02": "Huawei Y7",
"TRT-L01": "Huawei Y7",
"TRT-AL00": "Huawei Enjoy 7 Plus",
"Toronto-L23": "Huawei Y7",
"Toronto-L22": "Huawei Toronto-L22",
"Toronto-L21": "Huawei Y7",
"Toronto-L02": "Huawei Toronto-L02",
"Toronto-AL00": "Huawei Toronto-AL00",
"TNY-TL00": "Huawei Honor Magic 2",
"TNY-AL10": "Honor Magic 2",
"TNY-AL00": "Honor Magic 2",
"TNNH-AN00": "Honor Play4",
"TNN-AN00": "Huawei Enjoy 20s",
"TIT-U02": "Huawei Y6 Pro",
"TIT-L01": "Huawei Y6 Pro",
"TIT-CL10": "Huawei Enjoy 5",
"TIT-CL00": "Huawei Enjoy 5",
"TIT-AL00": "Huawei Y6 Pro",
"TET-AN00": "Huawei Mate X2",
"TEL-AN10": "Honor X10 5G",
"TEL-AN00A": "Honor X10 5G",
"TEL-AN00": "Honor X10 5G",
"TAS-TL00": "Huawei Mate 30",
"TAS-L29": "Huawei Mate 30",
"TAS-AN00": "Huawei Mate 30 5G",
"TAS-AL00": "Huawei Mate 30",
"TAH-N29M": "Huawei Mate Xs",
"TAH-AN00M": "Huawei Mate X",
"TAH-AN00": "Huawei Mate X",
"TAG-TL00": "Huawei Enjoy 5s",
"TAG-L32": "Huawei GR3",
"TAG-L23": "Huawei GR3",
"TAG-L22": "Huawei GR3",
"TAG-L21": "Huawei GR3",
"TAG-L13": "Huawei GR3",
"TAG-L03": "Huawei GR3",
"TAG-L01": "Huawei P8 Lite Smart",
"TAG-CL00": "Huawei Enjoy 5S",
"T1-A22L": "Huawei Mediapad T1",
"T1-A21w": "Huawei MediaPad T1 10",
"T1-A21L": "Huawei MediaPad T1 10",
"T1-821L": "Huawei MediaPad T1 8.0",
"T1-702u": "Huawei MediaPad T1 7.0",
"T1-702": "Huawei MediaPad T1 7.0",
"T1-701w": "Huawei MediaPad T1 7.0",
"T1-701ua": "Huawei MediaPad T1 7.0",
"T1-701u": "Huawei MediaPad T1 7.0",
"T1-701": "Huawei MediaPad T1 7.0",
"STK-TL00": "Huawei Enjoy 10 Plus",
"STK-L23BHN": "Huawei Y9 Prime (2019)",
"STK-L22HN": "Huawei Y9 Prime (2019)",
"STK-L22DV": "Huawei Y9 Prime (2019)",
"STK-L22": "Huawei Y9 Prime (2019)",
"STK-L21VHN": "Huawei Y9 Prime (2019)",
"STK-L21UDV": "Huawei Y9 Prime (2019)",
"STK-L21MDV": "Huawei Y9 Prime (2019)",
"STK-L21M": "Huawei Y9 Prime (2019)",
"STK-L21HN": "Huawei Y9 Prime (2019)",
"STK-L21": "Huawei Y9 Prime (2019)",
"STK-L03DV": "Huawei P Smart Z",
"STK-L03B": "Huawei P Smart Z",
"STK-L01MDV": "Huawei Y9 Prime (2019)",
"STK-L01M": "Huawei P Smart Z",
"STK-AL00": "Huawei P Smart Z",
"STF-TL10": "Huawei Honor 9",
"STF-L09S": "Huawei Honor 9",
"STF-L09": "Huawei Honor 9",
"STF-AL10": "Huawei Honor 9",
"STF-AL00": "Huawei Honor 9",
"SPN-AL10": "Huawei nova 5z",
"SPN-AL00": "Huawei nova 5z",
"Sophia-L12": "Huawei Ascend P7",
"Sophia-L11": "Huawei Ascend P7",
"Sophia-L10": "Huawei Ascend P7",
"Sophia-L09": "Huawei Ascend P7",
"Sophia-L07": "Huawei Ascend P7",
"Sophia-L00": "Huawei Ascend P7",
"SNE-L01": "Huawei Mate 20 lite",
"SLA-L22": "Huawei P9 Lite Mini",
"SLA-L03": "Huawei Y6 Pro 2017",
"SLA-L02": "Huawei Y6 Pro 2017",
"SLA-AL00": "Huawei Enjoy 7",
"SHT-W09": "Huawei MediaPad M5 8.4",
"SHT-AL09": "Huawei MediaPad M5 8.4",
"Selina-L03": "Huawei Y6 Pro 2017",
"SEA-AL10": "Huawei nova 5 Pro",
"SEA-AL00": "Huawei nova 5",
"SCMR-W09": "Huawei MatePad 10.8",
"SCMR-AL09": "Huawei MatePad 10.8",
"SCL-U31": "Huawei Y6",
"SCL-U23": "Huawei Y6",
"SCL-L32": "Huawei Y6",
"SCL-L21": "Huawei Y6",
"SCL-L04": "Huawei Y6",
"SCL-L03": "Huawei Y6",
"SCL-L02": "Huawei Y6",
"SCL-L01": "Huawei Y6",
"SCL-CL00": "Huawei Honor 4A",
"SCL-AL00": "Huawei Honor 4A",
"SCC-U21": "Huawei Y6",
"SC-CL00": "Huawei Ascend GX1",
"S8-701w": "Huawei MediaPad T1 8.0",
"S8-701u": "Huawei MediaPad M1 8.0",
"S8-306L": "Huawei MediaPad M1 8.0",
"S8-303L": "Huawei MediaPad M1",
"S8-301w": "Huawei MediaPad M1 8.0",
"S8-301u": "Huawei MediaPad M1 8.0",
"S8-301L": "Huawei MediaPad M1 8.0",
"S7-931w": "Huawei MediaPad 7 Lite",
"S7-931u": "Huawei MediaPad 7 Lite",
"S7-722u": "Huawei MediaPad 7 Youth 2",
"S7-721w": "Huawei MediaPad 7 Youth 2",
"S7-721u": "Huawei MediaPad 7 Youth 2",
"S7-721g": "Huawei MediaPad 7 Youth 2",
"S7-701w": "Huawei MediaPad 7 Youth",
"S7-701u": "Huawei MediaPad 7 Youth",
"S7-601w": "Huawei MediaPad 7 Vogue",
"S7-601u": "Huawei MediaPad 7 Vogue",
"S7-301u": "Huawei MediaPad 7 Vogue",
"S7-201u": "Huawei IDEOS S7 Slim",
"S10-232L": "Huawei MediaPad 10 Link Plus",
"S10-231w": "Huawei MediaPad10 Link",
"S10-231u": "Huawei MediaPad 10 Link Plus",
"S10-231L": "Huawei MediaPad 10 Link",
"S10-201u": "Huawei MediaPad 10 Link",
"S10-101w": "Huawei MediaPad 10 FHD",
"S10-101u": "Huawei MediaPad 10 FHD",
"RVL-AL09": "Honor Note 10",
"RNE-L23": "Huawei Mate 10 Lite",
"RNE-L22": "Huawei Nova 2i",
"RNE-L21": "Huawei Mate 10 Lite",
"RNE-L03": "Huawei Mate 10 Lite",
"RNE-L02": "Huawei Nova 2i",
"RNE-L01": "Huawei Mate 10 Lite",
"RNE-AL00": "Huawei Maimang 6",
"RIO-UL00": "Huawei G7 Plus",
"RIO-TL00": "Huawei G7 Plus",
"RIO-L33": "Huawei G8",
"RIO-L23": "Huawei G8",
"RIO-L11": "Huawei G8",
"RIO-L03": "Huawei GX8",
"RIO-L02": "Huawei G8",
"RIO-L01,RIO-L11": "Huawei G8",
"RIO-L01": "Huawei G8",
"RIO-CL00": "Huawei Maimang 4",
"RIO-AL00": "Huawei Maimang 4",
"Rhone-L21": "Huawei Mate 10 Lite",
"Rhone-L03": "Huawei Mate 10 Lite",
"Rhone-L01": "Huawei Mate 10 Lite",
"Prague-TL00": "Huawei Prague-TL00",
"Prague-L23": "Huawei P8 Lite 2017",
"Prague-L22": "Huawei P8 Lite 2017",
"Prague-L21": "Huawei P8 Lite 2017",
"Prague-L03": "Huawei Prague-L03",
"PRA-TL10": "Huawei Honor 8 Lite",
"PRA-LX3": "Huawei P9 Lite 2017",
"PRA-LX2": "Huawei Nova Lite",
"PRA-LX1": "Huawei P8 Lite 2017",
"PRA-LA1": "Huawei Honor 8 Lite",
"PRA-L31": "Huawei P8 Lite 2017",
"PRA-L22": "Huawei P8 Lite 2017",
"PRA-L21": "Huawei P8 Lite Dual Sim 2017",
"PRA-L11": "Huawei P8 Lite 2017",
"PRA-L03": "Huawei P8 Lite 2017",
"PRA-L02": "Huawei Nova Lite",
"PRA-L01": "Huawei P8 Lite 2017",
"PRA-AL00X": "Huawei Honor 8 Lite",
"PRA-AL00": "Huawei Honor 8 Lite",
"PPAH-TL20": "Huawei P smart 2021",
"PPAH-L22": "Huawei P smart 2021",
"PPAH-L21": "Huawei P smart 2021",
"PPAH-L02": "Huawei P smart 2021",
"PPAH-AL40": "Huawei P smart 2021",
"PPAH-AL20": "Huawei P smart 2021",
"POT-TL00": "Huawei Enjoy 9s",
"POT-LX3": "Huawei P Smart 2019",
"POT-LX2J": "Huawei Nova Lite 3",
"POT-LX1A": "Huawei P Smart 2020",
"POT-LX1": "Huawei P Smart 2019",
"POT-L41B": "Huawei P Smart S",
"POT-L21RU": "Huawei P smart 2019",
"POT-L21": "Huawei P smart 2019",
"POT-L01": "Huawei P smart 2019",
"POT-AL10C": "Huawei enjoy 9s",
"POT-AL00a": "Huawei Enjoy 9S",
"POT-AL00": "Huawei P Smart 2019",
"PLK-UL00IN": "Huawei Honor 7",
"PLK-UL00": "Huawei Honor 7",
"PLK-TL01H": "Huawei Honor 7",
"PLK-TL00": "Huawei Honor 7",
"PLK-L01": "Huawei Honor 7",
"PLK-CL00": "Huawei Honor 7",
"PLK-AL10": "Huawei Honor 7",
"PLE-703L": "Huawei MediaPad M2 Lite",
"PLE-701L": "Huawei MediaPad T2 7.0",
"Pine-UL00": "Huawei Honor 6 plus",
"Pine-TL10": "Huawei Honor 6 Plus",
"Pine-L04": "Huawei Honor 6 Plus",
"Pine-L02": "Huawei Honor 6 Plus",
"Pine-L00": "Huawei Honor 6 Plus",
"PIC-TL00": "Huawei Nova 2",
"PIC-LX9": "Huawei Nova 2",
"PIC-L29": "Huawei Nova 2",
"PIC-L09": "Huawei Nova 2",
"PIC-AL00": "Huawei Nova 2",
"PE-UL00": "Huawei Honor 6 Plus",
"PE-TL20": "Huawei Honor 6 Plus",
"PE-TL10": "Huawei Honor 6 Plus",
"PE-TL00M": "Huawei Honor 6 Plus",
"PE-CL00": "Huawei Honor 6 Plus",
"PCT-TL10": "Honor View 20",
"PCT-L29D": "Honor View 20",
"PCT-L29": "Honor View 20",
"PCT-AL10D": "Honor View 20",
"PCT-AL10": "Honor View 20",
"Paris-L09A": "Huawei Nova 3",
"PAR-TL20": "Huawei Nova 3",
"PAR-TL00": "Huawei nova 3",
"PAR-AL00": "Huawei nova 3",
"P7-L10": "Huawei Ascend P7",
"P7-L09": "Huawei Ascend P7",
"P7-L07": "Huawei Ascend P7",
"P7-L05": "Huawei Ascend P7",
"P6-U06": "Huawei Ascend P6",
"P6-S-U00": "Huawei Ascend P6 S",
"P2-6011": "Huawei Ascend P2",
"OXF-AN10L": "Honor V30 Pro",
"OXF-AN10": "Honor V30 Pro",
"OXF-AN00L": "Honor V30",
"OXF-AN00": "Honor V30",
"OCE-AN50": "Huawei Mate 40E",
"OCE-AN10": "Huawei Mate 40E",
"NXT-TL00": "Huawei Mate 8",
"NXT-L29B": "Huawei Ascend Mate8",
"NXT-L29A": "Huawei Ascend Mate8",
"NXT-L29": "Huawei Mate 8",
"NXT-L09A": "Huawei Ascend Mate8",
"NXT-L09": "Huawei Mate 8",
"NXT-DL00": "Huawei Mate 8",
"NXT-CL00": "Huawei Mate 8",
"NXT-C00": "Huawei Mate 8",
"NXT-AL10": "Huawei Mate 8",
"MAR-L01MEA": "Huawei P30 lite",
"MAR-L01B": "Huawei P30 lite",
"MAR-L01A": "Huawei P30 lite",
"MAR-AL00": "Huawei nova 4e",
"Madrid-L21": "Huawei Y6 (2019)",
"M2-A01w": "Huawei MediaPad M2 10.0",
"M2-A01L": "Huawei MediaPad M2 10.0",
"M2-803L": "Huawei MediaPad M2 8.0",
"M2-802L": "Huawei MediaPad M2 8.0",
"M2-801w": "Huawei MediaPad M2 8.0",
"M2-801L": "Huawei MediaPad M2 8.0",
"LYO-L21": "Huawei Honor 5A",
"LYO-L02": "Huawei Y6 II",
"LYO-L01": "Huawei Y6 II",
"LYA-TL00L": "Huawei Mate 20 Pro",
"LYA-TL00": "Huawei Mate 20 Pro",
"LYA-L29": "Huawei Mate 20 Pro",
"LYA-L09": "Huawei Mate 20 Pro",
"LYA-AL10": "Huawei Mate 20 Pro",
"LYA-AL00P": "Huawei Mate 20 Pro",
"LYA-AL00L": "Huawei Mate 20 Pro",
"LYA-AL00": "Huawei Mate 20 Pro",
"LUA-U23": "Huawei Y3 II",
"LUA-U22": "Huawei Y3 II",
"LUA-U03": "Huawei Y3 II",
"LUA-U02": "Huawei Y3 II",
"LUA-L23": "Huawei Y3 II",
"LUA-L22HN": "Huawei Honor Bee 2",
"LUA-L22": "Huawei Y3 II",
"LUA-L21": "Huawei Y3 II",
"LUA-L13": "Huawei Y3 II",
"LUA-L03": "Huawei Y3 II",
"LUA-L02": "Huawei Y3 II",
"LUA-L01": "Huawei Y3 II",
"LRA-L21B": "Honor 30i",
"LRA-AL00": "Honor 20 lite (China)",
"LON-L29D": "Huawei Mate 9 Pro",
"LON-L29C": "Huawei Mate 9 Pro",
"LON-L29": "Huawei Mate 9 Pro",
"LON-AL00": "Huawei Mate 9 Pro",
"LLD-TL10": "Honor 9 Lite",
"LLD-L31": "Huawei Honor 9 Lite",
"LLD-L21": "Huawei Honor 9 Lite",
"LLD-AL30": "Honor 9N (9i)",
"LLD-AL20": "Honor 9N (9i)",
"LLD-AL10": "Huawei Honor 9 Lite",
"LLD-AL00": "Huawei Honor 9 Lite",
"LIO-TL00": "Huawei Mate 30 Pro",
"LIO-N29": "Huawei Mate 30 RS Porsche Design",
"LIO-L29": "Huawei Mate 30 Pro 5G",
"LIO-AN00P": "Huawei Mate 30 RS Porsche Design",
"LIO-AN00M": "Huawei Mate 30 Pro",
"LIO-AN00": "Huawei Mate 30 Pro 5G",
"LIO-AL00": "Huawei Mate 30 Pro",
"LDN-TL10": "Huawei Y7 Prime 2018",
"LDN-TL00": "Huawei Enjoy 8",
"LDN-LX3": "Huawei Y7 2018",
"LDN-LX2": "Huawei Y7 Prime 2018",
"LDN-L22": "Huawei nova 2 lite",
"LDN-L21": "Huawei Y7 2018",
"LDN-L03": "Huawei Y7 2018",
"LDN-L01": "Huawei Y7 2018",
"LDN-AL00": "Huawei Enjoy 8",
"KSA-L29": "Honor 8S",
"KSA-L22": "Honor 8S",
"KSA-L09": "Honor 8S",
"KSA-AL10": "Honor 8S",
"KSA-AL00": "Honor 8S",
"KRJ-W09": "Honor V6",
"KRJ-AN00": "Honor V6",
"KOB2-W09B": "Huawei MatePad T8",
"KOB2-W09": "Huawei MatePad T8",
"KOB2-L09B": "Huawei MatePad T8",
"KOB2-L09": "Huawei MatePad T8",
"KOB-W09": "HUAWEI MediaPad T3",
"KOB-L09": "Huawei Mediapad T3",
"KNT-UL10": "Huawei Honor V8",
"KNT-TL10": "Huawei Honor V8",
"KNT-C00": "Huawei Honor V8",
"KNT-AL20": "Huawei Honor V8",
"KNT-AL10": "Huawei Honor V8",
"KKG-TN00": "Honor X10 Max 5G",
"KKG-AN00": "Honor X10 Max 5G",
"KIW-UL00": "Huawei Honor 5X",
"KIW-TL00": "Huawei Honor 5X",
"KIW-L24": "Huawei Honor 5X",
"KIW-L22": "Huawei Honor 5X",
"KIW-L21": "Huawei HONOR 5X",
"KIW-CL00": "Huawei Honor 5X",
"KIW-AL10": "Huawei Honor 5X",
"KII-L33": "Huawei GR5",
"KII-L23": "Huawei GR5",
"KII-L22": "Huawei GR5",
"KII-L21": "Huawei GR5",
"KII-L05": "Huawei GR5",
"JSN-TL00": "Honor 8X",
"JSN-L22X": "Honor 8X",
"JSN-L21X": "Honor 8X",
"JSN-L21": "Honor 8X",
"JSN-AL00": "Honor 8X",
"JSC-AN00A": "Huawei nova 8 SE",
"JSC-AN00": "Huawei nova 8 SE",
"JNY-L22": "Huawei nova 7i",
"JNY-L21": "Huawei nova 7i",
"JNY-L01": "Huawei nova 7i",
"JNY-AL10": "Huawei nova 6 SE",
"JMM-TL00": "Huawei Honor 6C Pro",
"JMM-L22": "Huawei Honor 6C Pro",
"JMM-AL00": "Huawei Honor 6C Pro",
"JKM-TL00": "Huawei Y9 (2019)",
"JKM-LX3": "Huawei Y9 (2019)",
"JKM-LX2": "Huawei Y9 (2019)",
"JKM-LX1": "Huawei Y9 (2019)",
"JKM-L21X": "Huawei Y9 (2019)",
"JKM-L21": "Huawei Y9 (2019)",
"JKM-L01X": "Huawei Y9 (2019)",
"JKM-AL20": "Huawei Y9 (2019)",
"JKM-AL10": "Huawei Y9 (2019)",
"JKM-AL00": "Huawei Y9 (2019)",
"Jimmy-TL00": "Huawei Jimmy TL00",
"Jimmy-AL00": "Huawei Jimmy-AL00",
"JER-TN20": "Huawei nova 7 Pro 5G",
"JER-TN10": "Huawei nova 7 Pro 5G",
"JER-AN20": "Huawei nova 7 Pro 5G",
"JER-AN10": "Huawei Nova 7 Pro",
"JEF-TN20": "Huawei nova 7 5G",
"JEF-TN00": "Huawei nova 7 5G",
"JEF-AN20": "Huawei nova 7 5G",
"JEF-AN00": "Huawei Nova 7 Pro",
"JDN2-W09HN": "Honor Tab 5",
"JDN2-W09": "Honor Tab 5",
"JDN2-L09": "Huawei MediaPad M5 Lite 8",
"JDN2-AL50HN": "Huawei MediaPad M5 lite",
"JDN2-AL50": "Huawei MediaPad M5 lite",
"JDN2-AL00HN": "Honor Pad 5 8",
"JDN2-AL00": "Honor Pad 5 8",
"JDN-W09": "Huawei Honor Pad 2",
"JDN-L01": "Huawei MediaPad T2 8.0",
"JDN-AL00": "Huawei Honor Pad 2",
"Jazz-TL10": "Huawei Ascend Mate 7",
"Jazz-L11": "Huawei Ascend Mate 7",
"Jazz-L09": "Huawei Ascend Mate 7",
"Jazz-J1": "Huawei Ascend Mate 7",
"JAT-TL00": "Huawei Honor 8A",
"JAT-L41HW": "Honor 8A Pro",
"JAT-L41": "Honor 8A Pro",
"JAT-L29HW": "Honor Play 8A",
"JAT-L29": "Honor Play 8A",
"JAT-L23HW": "Honor Play 8A",
"JAT-L21AHW": "Honor 8A Pro",
"JAT-AL00": "Honor Play 8A",
"Jakarta-LGRP2": "Huawei Y9 (2019)",
"Jackman-L22": "Huawei Y9 (2019)",
"INE-TL00": "Huawei Nova 3i",
"INE-LX2": "Huawei Nova 3i",
"INE-LX1": "Huawei Nova 3i",
"INE-LGRP1": "Huawei Nova 3i",
"INE-L22rr": "Huawei Nova 3i",
"INE-L22": "Huawei Nova 3i",
"INE-L21": "Huawei Nova 3i",
"INE-AL00": "Huawei nova 3i",
"HWI-TL00": "Huawei Nova 2S",
"HWI-LGRP1": "Huawei Nova 2S",
"HWI-AL00": "Huawei Nova 2s",
"HRY-TL00": "Honor 10 Lite",
"HRY-L21T": "Honor 10 Lite",
"HRY-L21D": "Honor 10 Lite",
"HRY-L21": "Honor 10 Lite",
"HRY-LX1": "Honor 10 Lite",
"HRY-LX2": "Honor 10 Lite",
"HRY-AL00a": "Honor 10 Lite",
"HRY-LX1MEB": "Honor 10 Lite",
"HRY-AL00TA": "Honor 20i",
"HRY-AL00T": "Honor 10 Lite",
"HRY-AL00A": "Honor 10 Lite",
"HRY-AL00": "Honor 10 Lite",
"Holly-U19": "Huawei Holly",
"Holly-U10": "Huawei Holly",
"Holly-U00": "Huawei Honor 3C",
"HMA-TL00": "Huawei Mate 20",
"HMA-L29": "Huawei Mate 20",
"HMA-L09": "Huawei Mate 20",
"HMA-AL00": "Huawei Mate 20",
"HLK-L42": "Honor 9X Pro",
"HLK-L41": "Honor 9X Pro",
"HLK-AL10": "Honor 9X",
"HLK-AL00A": "Honor 9X",
"HLK-AL00": "Honor 9X (China)",
"HDN-W09": "Huawei Honor",
"H60-L12": "Huawei Honor 6",
"H60-L04": "Huawei Honor 6",
"H60-L03": "Huawei Honor 6",
"H60-L02": "Huawei Honor 6",
"H60-L01": "Huawei Honor 6",
"H60-J1": "Huawei Honor 6",
"H30-U10": "Huawei 3C",
"H30-L02": "Huawei Honor 3C",
"H30-L01": "Huawei Honor 3C",
"GRA-UL10": "Huawei P8",
"GRA-UL00": "Huawei P8",
"GRA-TL00": "Huawei P8",
"GRA-L13": "Huawei P8",
"GRA-L09": "Huawei P8",
"GRA-L03": "Huawei P8",
"GRA-CL10": "Huawei P8",
"GRA-CL00": "Huawei P8 Standard Edition",
"GLK-TL00": "Huawei nova 5i",
"GLK-AL00": "Huawei nova 5i",
"GEM-703L": "HUAWEI Honor X2",
"GEM-703": "Huawei MediaPad X2",
"GEM-702L": "Huawei MediaPad X2",
"GEM-702": "Huawei MediaPad X2",
"GEM-701L": "Huawei MediaPad X2",
"GEM-701": "Huawei MediaPad X2",
"G760-TL00": "Huawei Ascend G7",
"G760-L03": "Huawei Ascend G7",
"G760-L01": "Huawei Ascend G7",
"G750-U10": "Huawei Honor 3X",
"G750-T20": "Huawei Honor 3X",
"G750-C00": "Huawei Honor 3X",
"G740-L00": "Huawei G740",
"G735-L23": "Huawei G Play",
"G735-L12": "Huawei G Play",
"G735-L03": "Huawei G Play",
"G730-U251": "Huawei G730",
"G730-U10": "Huawei G730",
"G700-U20": "Huawei Ascend G700",
"G700-U10": "Huawei Ascend G700",
"G7-L11": "Huawei Ascend G7",
"G7-L01": "Huawei Ascend G7",
"G630-U251": "Huawei G630",
"G630-U20": "Huawei G630",
"G630-U10": "Huawei G630",
"G630-U00": "Huawei G630",
"G629-UL00": "Huawei G629",
"G628-TL00": "Huawei Ascend G628",
"G620S-UL00": "Huawei Ascend G620s",
"G620S-L03": "Huawei Ascend G620s",
"G620S-L02": "Huawei Ascend G620s",
"G620S-L01": "Huawei Ascend G620s",
"G620-UL01": "Huawei G620",
"G620-L75": "Huawei Ascend G620s",
"G620-L72": "Huawei G620",
"G615-U10": "Huawei Ascend G615",
"G610-U20": "Huawei G610",
"G610-U15": "Huawei G610",
"G610-U00": "Huawei Ascend G6",
"G6-U251": "Huawei Ascend G6",
"G6-U10": "Huawei Ascend G6",
"G6-L33": "Huawei Ascend G6",
"G6-L22": "Huawei Ascend G6",
"G6-L11": "Huawei Ascend G6",
"G527-U081": "Huawei Ascend G527",
"G526-L33": "Huawei Ascend G526",
"G525-U00": "Huawei Ascend G525",
"G510-0251": "Huawei Ascend G510",
"G510-0200": "Huawei Ascend G510",
"G510-0100": "Huawei Ascend G510",
"FRLM-TN00": "Huawei Enjoy 20 SE",
"FRLM-L22": "Huawei Enjoy 20 SE",
"FRLM-L03": "Huawei Enjoy 20 SE",
"FRLM-AN00A": "Huawei Enjoy 20 SE",
"FRD-L19": "Huawei Honor 8",
"FRD-L14": "Huawei Honor 8",
"FRD-L09": "Huawei HONOR 8",
"FRD-L04": "Huawei Honor 8",
"FRD-L02": "Huawei HONOR 8",
"FRD-DL00": "Huawei MediaPad T2 10.0 Pro",
"FRD-C00": "Huawei Honor 8",
"FRD-AL10": "Huawei Honor 8",
"FRD-AL00": "Huawei Honor 8",
"FLA-TL10": "Huawei Y9 (2018)",
"FLA-AL20": "Huawei Y9 2018",
"FLA-AL10": "Huawei Y9 2018",
"FLA-AL00": "Huawei Y9 2018",
"Figo-L31": "Huawei P Smart",
"FIG-TL10": "Huawei Enjoy 7S Dual",
"FIG-TL00": "Huawei P smart",
"FIG-LX3": "Huawei P Smart",
"FIG-LX2": "Huawei P Smart",
"FIG-LX1": "Huawei P Smart Dual SIM",
"FIG-LA1": "Huawei P Smart",
"FIG-L31": "Huawei P Smart",
"FIG-L22": "Huawei P Smart",
"FIG-L21": "Huawei P Smart",
"FIG-L11": "Huawei P Smart",
"FIG-L03": "Huawei P Smart",
"FIG-L02": "Huawei P Smart",
"FIG-AL10": "Huawei Enjoy 7S",
"FIG-AL00": "Huawei P smart",
"FDR-A05": "Huawei MediaPad T2 10.0 Pro",
"FDR-A04": "Huawei MediaPad T2 10.0 Pro",
"FDR-A03L": "Huawei M2",
"FDR-A03": "Huawei MediaPad T2 10.0 Pro",
"FDR-A01w": "Huawei MediaPad T2 10.0 Pro",
"FDR-A01": "Huawei MediaPad T2 10.0 Pro",
"EVR-TL00": "Huawei Mate 20 X",
"EVR-N29": "Huawei Mate 20 X (5G)",
"EVR-L29": "Huawei Mate 20 X",
"EVR-AN00": "Huawei Mate 20 X (5G)",
"EVR-AL00": "Huawei Mate 20 X",
"EVA-L29": "Huawei P9",
"EVA-L19": "Huawei P9",
"EVA-L09": "Huawei P9",
"EVA-DL00": "Huawei P9",
"EVA-CL00": "Huawei P9",
"EVA-C00": "Huawei P9",
"EVA-AL10": "Huawei P9",
"EVA-AL00": "Huawei P9",
"EML-L29": "Huawei P20",
"EML-L09": "Huawei P20",
"EML-AL00": "Huawei P20",
"ELS-TN00": "Huawei P40 Pro",
"ELS-N39": "Huawei P40 Pro+",
"ELS-N29": "Huawei P40 Pro+",
"ELS-N04": "Huawei P40 Pro",
"ELS-AN10": "Huawei P40 Pro+",
"ELS-AN00": "Huawei P40 Pro",
"ELE-TL00": "Huawei P30",
"ELE-L29": "Huawei P30",
"ELE-L09": "Huawei P30",
"ELE-L04": "Huawei P30",
"ELE-AL00": "Huawei P30",
"EDI-AL10": "Huawei Honor Note 8",
"EDGE-U00": "Huawei Ascend P6",
"EDGE-C00": "Huawei Ascend P6",
"EBG-TN00": "Honor 30 Pro",
"EBG-N19": "Honor 30 Pro+",
"EBG-AN10": "Honor 30 Pro+",
"EBG-AN00": "Honor 30 Pro",
"DVCM-TN20": "",
"DVCM-AN20": "Huawei Enjoy 20 Pro",
"DVCM-AN00": "Huawei Enjoy 20 Pro",
"DUK-TL30": "Huawei Honor V9",
"DUK-L09": "Huawei Honor 8 Pro",
"DUK-AL30": "Huawei Honor V9",
"DUK-AL20": "Huawei Honor V9",
"DUB-LX3": "Huawei Y7 Prime 2019",
"DUB-LX1": "Huawei Y7 Prime 2019",
"DUB-L01": "Huawei Y7 2019",
"DUB-AL00": "Huawei Enjoy 9",
"DUA-L29": "Honor 9S",
"DRA-LX5": "Huawei Y5 Lite (2018)",
"DRA-L29": "Huawei Y5p",
"DRA-L21": "Huawei Y5 Prime 2018",
"DRA-L09": "Huawei Y5p",
"DNN-L29": "Honor 10X Lite",
"Diego-TL10": "Huawei Enjoy 6S",
"Diego-L23": "Huawei Diego-L23",
"Diego-L21": "Huawei Honor 6C",
"Diego-L03": "Huawei Diego-L03",
"Diego-L01": "Huawei Diego-L01",
"Diego-AL00": "Huawei Diego-AL00",
"Delhi-TL20": "Huawei Honor 6A",
"Delhi-L42": "Huawei Honor 6A",
"Delhi-L22": "Huawei Honor 6A",
"Delhi-AL10": "Huawei Honor 6A",
"DAV-703": "Huawei P8 MAX",
"DAV-702L": "Huawei P8 max",
"DAV-701L": "Huawei P8 max",
"D2-0082": "Huawei Ascend D2",
"CUN-U29": "Huawei Y5 II",
"CUN-TL00": "Huawei Honor 5",
"CUN-L33": "Huawei Y5 II",
"CUN-L23": "Huawei Y5 II",
"CUN-L22": "Huawei Y5 II",
"CUN-L21": "Huawei Y5 II",
"CUN-L03": "Huawei Y5 II",
"CUN-L02": "Huawei Y5 II",
"CUN-L01": "Huawei Y5 II",
"CUN-AL00": "Huawei Honor 5",
"CRR-UL20": "Huawei Mate S",
"CRR-UL00": "Huawei Mate S",
"CRR-TL00": "Huawei Mate S",
"CRR-L13": "Huawei Mate S",
"CRR-L09": "Huawei Mate S",
"CRR-CL20": "Huawei Mate S",
"CRR-CL00": "Huawei Mate S",
"CRO-UL00": "Huawei Y3 2017",
"CRO-L03": "Huawei Y3 2017",
"CRO-L02": "Huawei Y3 2017",
"CPN-W09": "Huawei M3 Lite",
"CPN-L09": "Huawei MediaPad M3 Lite",
"CPN-AL00": "Huawei M3 Lite",
"COR-TL10": "Honor Play",
"COR-AL10": "Honor Play",
"COR-AL00": "Honor Play",
"COL-TL10": "Huawei Honor 10",
"COL-TL00": "Huawei Honor 10",
"COL-L29": "Huawei Honor 10",
"COL-AL10": "Honor 10",
"CND-AN00": "Huawei nova 7 SE 5G Youth",
"CMR-W19": "Huawei MediaPad M5 Pro 10.8",
"CMR-W09TWN": "Huawei MediaPad M5",
"CMR-W09": "Huawei MediaPad M5 10.8",
"CMR-AL19": "Huawei MediaPad M5 Pro 10.8",
"CMR-AL09": "Huawei MediaPad M5 10.8",
"CM990": "Huawei CM990",
"CLT-TL00": "Huawei P20 Pro",
"CLT-L29": "Huawei P20 Pro Dual SIM",
"CLT-L09": "Huawei P20 Pro Dual SIM",
"CLT-L04": "Huawei P20 Pro Dual SIM",
"CLT-AL01": "Huawei P20 Pro Dual SIM",
"CLT-AL00": "Huawei P20 Pro Dual SIM",
"CHM-UL00": "Huawei Honor 4C",
"CHM-U01": "Huawei Honor 4C",
"CHM-TL00H": "Huawei Honor 4C",
"CHM-TL00": "Huawei Honor 4C",
"CHM-CL00": "Huawei Honor 4C",
"CHL-AL60CH": "Huawei nova 8 SE",
"CherryPlus-TL00": "Huawei Honor 4X",
"CherryPlus-L23": "Huawei Honor 4X",
"CherryPlus-L12": "Huawei Honor 4X LTE",
"CherryPlus-L11": "Huawei HONOR 4X",
"Cherry-L04": "Huawei Honor 4X",
"Cherry-CL20": "Huawei Honor 4X",
"Cherry-CL10": "Huawei Honor 4X",
"CHE2-L12": "Huawei Honor 4X",
"Che2-L11": "Huawei Honor 4X",
"CHE1-L04": "Huawei Honor 4X",
"CHE1-CL20": "Huawei Honor 4X",
"CHE1-CL10": "Huawei Honor 4X",
"CHE-TL00H": "Huawei Honor 4x",
"CHE-TL00": "Huawei Honor 4X",
"Che-L11": "Huawei Honor 4X",
"CHC-U23": "Huawei G Play Mini",
"CHC-U03": "Huawei G Play mini",
"CHC-U01": "Huawei G Play Mini",
"CDY-TN90": "Honor 30S",
"CDY-TN20": "Huawei nova 7 SE",
"CDY-TN00": "Huawei nova 7 SE",
"CDY-N29H": "Huawei nova 7 SE",
"CDY-N29B": "Huawei nova 7 SE",
"CDY-N29": "Huawei nova 7 SE",
"CDY-AN95": "Huawei nova 7 SE",
"CDY-AN90": "Honor 30S",
"CDY-AN20": "Huawei nova 7 SE",
"CDY-AN00": "Huawei nova 7 SE",
"CDL-AN50": "Huawei nova 7 SE",
"CAZ-TL20": "Huawei Nova",
"CAZ-TL10": "Huawei Nova",
"CAZ-AL10": "Huawei Nova",
"Cannes-L12": "Huawei Nova",
"Cannes-L11": "Huawei Nova",
"Cannes-L01": "Huawei Nova",
"Cannes-AL10": "Huawei Nova Cannes-AL10",
"CAN-L13": "Huawei Nova",
"CAN-L12": "Huawei Nova",
"CAN-L11": "Huawei nova",
"CAN-L03": "Huawei Nova",
"CAN-L01": "Huawei Nova",
"Cameron-W19": "Huawei MediaPad M5 Pro 10.8",
"CAM-UL00": "Huawei Honor 5A",
"CAM-TL00": "Huawei Honor 5A",
"CAM-L23": "Huawei Y6 II",
"CAM-L21": "Huawei Y6 II",
"CAM-L03": "Huawei Y6 II Compact",
"CAM-AL00": "Huawei Honor 5A",
"CairoGO-L22": "Huawei CairoGO-L22",
"CairoGO-L02": "Huawei Y3 2018",
"Cairo-U00": "Huawei Cairo-U00",
"Cairo-L23": "Huawei Cairo-L23",
"Cairo-L22": "Huawei Cairo-L22",
"Cairo-L03": "Huawei Cairo-L03",
"Cairo-L02": "Huawei Cairo-L02",
"CAG-L02": "Huawei Y3 2018",
"C8860V": "Huawei Honor",
"C8817E": "Huawei C8817E",
"C8817D": "Huawei Honor 6 Pro",
"C8816D": "Huawei C8816D",
"C8816": "Huawei C8816",
"C199s": "Huawei C199S",
"BZT3-W59": "Huawei C5 10.4",
"BZT3-W09": "",
"BZT3-AL00": "Honor 5c",
"BZT-W09": "Huawei MediaPad C5 10.1",
"BZD-W00": "Huawei MediaPad C3",
"BZD-AL00": "Huawei MediaPad C3",
"BZC-W00": "",
"BZC-AL00": "",
"BTV-W09": "Huawei M3",
"BTV-DL09": "Huawei MediaPad M3",
"BRQ-AN00CG": "Huawei nova 8 Pro 4G",
"BRQ-AN00": "Huawei nova 8 Pro 5G",
"BRQ-AL00": "Huawei nova 8 Pro 5G",
"Bond-L24": "Huawei Honor 7X",
"BOND-L21": "Huawei Honor 7X",
"BND-TL10": "Huawei Honor 7X",
"BND-L34": "Huawei Mate SE",
"BND-L31A": "Huawei Honor 7X",
"BND-L31": "Huawei Honor 7X",
"BND-L24A": "Huawei Honor 7x",
"BND-L21": "Huawei Honor 7X",
"BND-AL10": "Huawei Honor 7X",
"BND-AL00": "Huawei Honor 7X",
"BMH-TN10": "Honor 30",
"BMH-N19": "Honor 30",
"BMH-AN20": "Honor 30",
"BMH-AN10": "Honor 30",
"BLN-TL10": "Huawei Honor 6X",
"BLN-TL00": "Huawei Honor 6X",
"BLN-L24": "Huawei Honor 6X",
"BLN-L22HN": "Huawei Honor 6X",
"BLN-L22": "Huawei Honor 6X",
"BLN-L21": "Huawei Honor 6X",
"BLN-AL40": "Huawei Honor 6X",
"BLN-AL30": "Huawei Honor 6X",
"BLN-AL20": "Huawei Honor 6X",
"BLN-AL10": "Huawei Honor 6X",
"BLL-L23": "Huawei Mate 9 Lite",
"BLL-L22": "Huawei GR5 2017",
"BLL-L21": "Huawei GR5 2017",
"BLA-TL00": "Huawei Mate 10 Pro",
"BLA-L29": "Huawei Mate 10 Pro",
"BLA-L09": "Huawei Mate 10 pro",
"BLA-AL00": "Huawei Mate 10 pro",
"BLA-A09": "Huawei Mate 10 pro",
"BKL-TL10": "Huawei Honor View 10",
"BKL-L09": "Huawei Honor View 10 Global",
"BKL-L04": "Huawei Honor View 10",
"BKL-AL20": "Huawei Honor V10",
"BKL-AL00": "Huawei Honor V10",
"BKK-TL00": "Huawei Honor 8C",
"BKK-L22": "Huawei Honor 8C",
"BKK-L21": "Huawei Honor 8C",
"BKK-AL10": "Huawei Honor 8C",
"BKK-AL00": "Honor 8C",
"BGO-L03": "Huawei MediaPad T2 7.0",
"BGO-DL09": "Huawei MediaPad T2 7.0",
"BG2-W09": "Huawei MediaPad T3",
"BG2-U03": "Huawei MediaPad T3",
"BG2-U01": "Huawei MediaPad T3 7 3G",
"Berlin-L23": "Huawei Honor 6X",
"Berlin-L22": "Huawei GR5 2017",
"Berlin-L21HN": "Huawei Honor 6X",
"Berlin-L21": "Huawei Honor 6X",
"Berkeley-LGRP2": "Huawei Honor V10",
"Barca-L22": "Huawei Barca-L22",
"Barca-L21": "Huawei Nova 2 Plus",
"Barca-L03": "Huawei Nova 2 Plus",
"BAH3-W59": "Huawei MatePad 10.4",
"BAH3-W09": "Huawei MatePad 10.4",
"BAH3-L09": "Huawei MatePad 10.4",
"BAH3-AN10": "Huawei MatePad 5G",
"BAH3-AL00": "Huawei MatePad 10.4",
"BAH2-W19": "Huawei MediaPad M5 lite",
"BAH2-W09": "Huawei MediaPad M5 lite",
"BAH2-L09": "Huawei MediaPad M5 Lite",
"BAH2-AL10": "Huawei MediaPad M5 lite",
"BAH-W09": "Huawei M3 Lite",
"BAH-L09": "Huawei MediaPad M3 Lite 10",
"BAH-AL00": "Huawei M3 Lite",
"BAC-TL00": "Huawei nova 2 plus",
"BAC-L23": "Huawei nova 2 plus",
"BAC-L22": "Huawei nova 2 plus",
"BAC-L21": "Huawei nova 2 plus",
"BAC-L03": "Huawei nova 2 plus",
"BAC-AL00": "Huawei Nova 2 Plus",
"AUM-L41": "Huawei Honor 7C (Enjoy 8)",
"AUM-L29": "Huawei Honor 7A Pro",
"ATU-LX3": "Huawei Y6 2018",
"ATU-L42": "Huawei Y6 Prime 2018",
"ATU-L22": "Huawei Y6 2018",
"ATU-L21": "Huawei Y6 2018",
"ATU-L11": "Huawei Y6 2018",
"ATU-L03": "Huawei Y6 2018",
"ATU-AL10": "Huawei Enjoy 8e",
"Atomu-L21": "Huawei Y6 Prime 2018",
"Atomu-L03": "Huawei Honor 7A",
"Atomu-AL20IND": "Huawei Honor 7A",
"ATH-UL06": "Huawei ShotX",
"ATH-UL01": "Huawei ShotX",
"ATH-UL00": "Huawei Honor 7i",
"ATH-TL00": "Huawei Honor 7i",
"ATH-CL00": "Huawei Honor 7i",
"ATH-AL00": "Huawei Honor 7i",
"ASKH-TL00": "Honor Play 3",
"ASKH-AL00": "Honor Play 3",
"ARTH-TL00": "Huawei Enjoy 10",
"ARTH-L29N": "Huawei Y7p",
"ARTH-L29": "Huawei Y7p",
"ARTH-L28": "Huawei Y7p",
"ARTH-L09": "Huawei Enjoy 10",
"ARTH-L08": "Huawei Enjoy 10",
"ARTH-AL00M": "Huawei Enjoy 10",
"ARTH-AL00": "Huawei Enjoy 10",
"ARS-TL00": "Huawei Enjoy 9 Max",
"ARS-L22": "Huawei Y Max",
"Ares-L22HW": "Huawei Y Max",
"ARE-TL00": "Huawei Honor 8X Max",
"ARE-L22HN": "Huawei Honor 8X Max",
"AQM-TL00": "Huawei Enjoy 10s",
"AQM-L21A": "Huawei Y8P",
"AQM-L01": "Huawei Y8p",
"AQM-AL10HN": "Honor Play 4T Pro",
"AQM-AL00": "Huawei Enjoy 10s",
"ANG-AN00": "Huawei nova 8 5G",
"ANE-TL00": "Huawei P20 lite",
"ANE-LX3": "Huawei P20 Lite",
"ANE-LX2JOT": "Huawei P20 Lite",
"ANE-LX2J": "Huawei P20 Lite",
"ANE-LX2": "Huawei Nova 3e",
"ANE-LX1": "Huawei P20 Lite",
"ANE-LGRP1": "Huawei P20 Lite",
"ANE-L21": "Huawei P20 Lite",
"ANE-L12JPZ": "Huawei Nova 3e",
"ANE-L12": "Huawei Nova 3e",
"ANE-L03": "Huawei Nova 3e",
"ANE-L02J": "Huawei Nova 3e",
"ANE-L02": "Huawei Nova 3e",
"ANE-AL00I": "Huawei P20 Lite",
"ANE-AL00": "Huawei P20 Lite",
"ANA-TN00": "Huawei P40",
"ANA-N29": "Huawei P40",
"ANA-L04": "Huawei P40",
"ANA-AN00": "Huawei P40",
"ANA-AL00": "Huawei P40 4G",
"AMN-L29": "Huawei Y5 (2019)",
"AMN-L22": "Huawei Y5 (2019)",
"AMN-L09": "Huawei Y5 (2019)",
"ALP-TL00ZZB51": "Huawei Mate 10",
"ALP-TL00B": "Huawei Mate 10",
"ALP-TL00": "Huawei Mate 10",
"ALP-LGRP2": "Huawei Mate 10",
"ALP-LGRP1": "Huawei Mate 10",
"ALP-L29": "Huawei Mate 10",
"ALP-L09": "Huawei Mate 10",
"ALP-AL00ZZB54": "Huawei Mate 10",
"ALP-AL00ZZB02": "Huawei Mate 10",
"ALP-AL00": "Huawei Mate 10",
"ALE-TL00": "Huawei P8 Lite",
"ALE-L32": "Huawei P8 Lite",
"ALE-L23URY": "Huawei P8 Lite",
"ALE-L23": "Huawei P8 Lite",
"ALE-L21TUR": "Huawei P8 Lite",
"ALE-L21S": "Huawei P8 Lite",
"ALE-L21POL": "Huawei P8 Lite",
"ALE-L21MKD": "Huawei P8 Lite",
"ALE-L21HUN": "Huawei P8 Lite",
"ALE-L21HR": "Huawei P8 Lite",
"ALE-L21GR": "Huawei P8 Lite",
"ALE-L21FRA": "Huawei P8 Lite",
"ALE-L21DEU": "Huawei P8 Lite",
"ALE-L21AUT": "Huawei P8 Lite",
"ALE-L21": "Huawei P8 Lite",
"ALE-L03": "Huawei P8 Lite",
"ALE-L02": "Huawei P8 Lite",
"ALE-L01": "Huawei P8 Lite",
"ALE-CL00": "Huawei P8 Lite",
"AKA-L29": "Honor Play 4T",
"AKA-AL20": "Honor Play 4T",
"AKA-AL10": "Honor Play 4T",
"AGS3K-W10": "Huawei MatePad T 10s",
"AGS3K-W09": "Huawei MatePad T 10s",
"AGS3K-L09": "Huawei MatePad T 10s",
"AGS3-W09HN": "Huawei Enjoy Tablet 2",
"AGS3-W09": "Huawei MatePad T 10s",
"AGS3-W00E": "Huawei Enjoy Tablet 2",
"AGS3-W00D": "Huawei Enjoy Tablet 2",
"AGS3-W00B": "Huawei Enjoy Tablet 2",
"AGS3-L09": "Huawei MatePad T 10s",
"AGS3-AL09HN": "Huawei Enjoy Tablet 2",
"AGS3-AL00": "Huawei Enjoy Tablet 2",
"AGS2-W09HN": "Huawei MediaPad T5",
"AGS2-W09AUS": "Huawei MediaPad T5",
"AGS2-W09": "Huawei MediaPad T5",
"AGS2-L09": "Huawei MediaPad T5",
"AGS2-L03": "Huawei MediaPad T5",
"AGS2-AL00HN": "Huawei MediaPad T5",
"AGS2-AL00": "Honor Pad 5 10.1",
"AGS-W09": "Huawei MediaPad T3 10",
"AGS-L09": "Huawei MediaPad T3 10",
"AGRK-W09K": "Huawei MatePad T 10s",
"AGRK-W09": "Huawei AGRK-W09",
"AGRK-L09K": "Huawei MatePad T 10s",
"AGRK-L09": "Huawei MatePad T 10s",
"AGR-W09K": "Honor Pad X6",
"AGR-W09HN": "Huawei Enjoy Tablet 2",
"AGR-W09": "Honor Pad X6",
"AGR-L09": "Huawei MatePad T 10s",
"AGR-AL09HN": "Honor Pad X6",
"7D-504L": "Huawei MediaPad X1 7.0",
"7D-501u": "Huawei MediaPad X1 7.0",
"7D-501L": "Huawei MediaPad X1 7.0",
"704HW": "Huawei Nova Lite 2",
"608HW": "Huawei nova lite",
"NOP-AN01P": "Huawei Mate 40 Pro+",
"NOP-AN00P": "Huawei Mate 40 Pro+",
"NOP-AN00": "Huawei Mate 40 Pro+",
"NOH-N29": "Huawei Mate 40 Pro",
"NOH-AN01": "Huawei Mate 40 Pro",
"NOH-AN00": "Huawei Mate 40 Pro",
"NMO-L31": "Huawei GT3",
"NMO-L22": "Huawei GT3",
"NMO-L02": "Huawei NMO-L02",
"NICE-TL10": "Huawei Nice-TL10",
"NICE-AL10": "Huawei Nice-AL10",
"NICE-AL00": "Huawei Nice-AL00",
"NEO-L29": "Huawei Mate RS",
"NEN-L23CQ": "Huawei nova 8 5G",
"NEN-L22CQ": "Huawei nova 8 5G",
"NEN-L21CQ": "Huawei nova 8 5G",
"NEN-L03CQ": "Huawei nova 8 5G",
"NEN-L01CQ": "Huawei nova 8 5G",
"NEM-UL10": "Huawei Honor 5C",
"NEM-TL00": "Huawei Honor 5C",
"NEM-L51": "Huawei Honor 5C",
"NEM-L22": "Huawei Honor 5C",
"NEM-L21": "Huawei HONOR 7 Lite",
"NEM-AL10": "Huawei Honor 5C",
"MXWM-TN00": "Honor 30 Youth",
"MXWM-AN00": "Honor 30 Youth",
"MT7-UL00": "Huawei Ascend Mate 7",
"MT7-TL10": "Huawei Ascend Mate7",
"MT7-TL00": "Huawei Mate 7",
"MT7-L11": "Huawei Ascend Mate7",
"MT7-L09": "Huawei Ascend Mate7",
"MT7-J1": "Huawei Ascend Mate 7",
"MT7-CL00": "Huawei Ascend Mate 7",
"MT2-L05": "Huawei Ascend Mate2",
"MT1-U06": "Huawei Ascend Mate7",
"MT-L09": "Huawei Ascend Mate7",
"MRX-W39": "Huawei MatePad Pro",
"MRX-W29": "Huawei MatePad Pro",
"MRX-W19": "Huawei MatePad Pro",
"MRX-W09": "Huawei MatePad Pro",
"MRX-AN19": "Huawei MatePad Pro 5G",
"MRX-AL19": "Huawei MatePad Pro",
"MRX-AL09": "Huawei MatePad Pro",
"MRD-TL00": "Huawei Enjoy 9e",
"MRD-LX3": "Huawei Y6 2019",
"MRD-L41A": "Huawei Y6 (2019)",
"MRD-L41": "Huawei Y6 2019",
"MRD-L23": "Huawei Y6 2019",
"MRD-L22": "Huawei Y6 Pro (2019)",
"MRD-L21A": "Huawei Y6 Pro (2019)",
"MRD-L21": "Huawei Y6 2019",
"MRD-L11": "Huawei Y6 2019",
"MRD-L01": "Huawei Y6 2019",
"MRD-AL00": "Huawei Enjoy 9e",
"MOA-TL00": "Honor Play 9A",
"MOA-L49I": "Honor Play 9A",
"MOA-L49": "Honor 9A",
"MOA-AL20": "Honor Play 9A",
"MOA-AL00": "Honor Play 9A",
"MLA-UL00": "Huawei G9 Plus",
"MLA-TL10": "Huawei G9 Plus",
"MLA-TL00": "Huawei G9 Plus",
"MLA-L13": "Huawei nova plus",
"MLA-L12": "Huawei nova plus",
"MLA-L11": "Huawei nova plus",
"MLA-L03": "Huawei Nova plus",
"MLA-L02": "Huawei Nova Plus",
"MLA-L01": "Huawei Nova Plus",
"MLA-AL10": "Huawei Nova Plus",
"MLA-AL00": "Huawei Maimang 5",
"MHA-TL00": "Huawei Mate 9",
"MHA-L29": "Huawei Mate 9",
"MHA-L09": "Huawei Mate 9",
"MHA-AL00": "Huawei Mate 9 Pro",
"MED-TL00": "Huawei Enjoy 10",
"MED-L49": "Huawei Y6p",
"MED-L29II": "Honor 9A",
"MED-L29": "Honor 9A",
"MED-L09": "Huawei Y6p",
"MED-AL20": "Honor Play 9A",
"MED-AL10": "Honor Play 9A",
"MED-AL00": "Honor Play 9A",
"Maya-U29": "Huawei Honor Maya",
"Maya-TL10": "Huawei Honor Maya",
"Maya-L41": "Huawei Y6 2017",
"Maya-L13": "Huawei Honor Maya",
"Maya-L11": "Huawei Y6 2017",
"Maya-L03": "Huawei Maya L03",
"Maya-AL10": "Huawei Honor Maya",
"MAR-TL00": "Huawei nova 4e",
"MAR-L22BX": "Huawei P30 lite",
"MAR-L22B": "Huawei P30 lite",
"MAR-L22A": "Huawei P30 lite",
"MAR-L21MEB": "Huawei P30 lite",
"MAR-L21MEA": "Huawei P30 lite",
"MAR-L21H": "Huawei P30 lite",
"MAR-L21B": "Huawei P30 lite",
"MAR-L21A": "Huawei P30 lite",
"MAR-L03A": "Huawei P30 lite",
"MAR-L01MEB": "Huawei P30 lite",
# redmi and Mi and POCO devices
'2014215': 'Xiaomi Mi 4',
'2014712': 'Xiaomi Redmi Note',
'2014817': 'Xiaomi Redmi 2',
'2014818': 'Xiaomi Redmi 2',
'2015015': 'Xiaomi Mi 4i',
'2015051': 'Xiaomi Redmi Note 2',
'2015105': 'Xiaomi Mi 5',
'2015116': 'Xiaomi Redmi Note 3',
'2015161': 'Xiaomi Redmi Note 3',
'2015213': 'Xiaomi Mi Note 2',
'2015711': 'Xiaomi Mi 5s',
'2015816': 'Xiaomi Redmi 3',
'2016001': 'Xiaomi Mi Max',
'2016002': 'Xiaomi Mi Max',
'2016007': 'Xiaomi Mi Max',
'2016031': 'Xiaomi Redmi 3s',
'2016060': 'Xiaomi Redmi 4 (4X)',
'2016070': 'Xiaomi Mi 5s Plus',
'2016090': 'Xiaomi Redmi 4 (4X)',
'2016100': 'Xiaomi Redmi Note 4',
'2016117': 'Xiaomi Redmi 4A',
'AWM-A0': 'Xiaomi Black Shark Helo',
'DLT-A0': 'Xiaomi Black Shark 2 Pro',
'DLT-H0': 'Xiaomi Black Shark 2 Pro',
'M1803D5XA': 'Xiaomi Mi Mix 2S',
'M1803E1A': 'Xiaomi Mi 8',
'M1803E6G': 'Xiaomi Redmi S2 (Redmi Y2)',
'M1803E6H': 'Xiaomi Redmi S2 (Redmi Y2)',
'M1803E6I': 'Xiaomi Redmi S2 (Redmi Y2)',
'M1803E7SG': 'Xiaomi Redmi Note 5 AI Dual Camera',
'M1803E7SH': 'Xiaomi Redmi Note 5 AI Dual Camera',
'M1804C3CG': 'Xiaomi Redmi 6A',
'M1804C3CH': 'Xiaomi Redmi 6A',
'M1804C3CI': 'Xiaomi Redmi 6A',
'M1804C3DG': 'Xiaomi Redmi 6',
'M1804C3DH': 'Xiaomi Redmi 6',
'M1804C3DI;': 'Xiaomi Redmi 6',
'M1804D2SG': 'Xiaomi Mi A2 (Mi 6X)',
'M1804D2SI': 'Xiaomi Mi A2 (Mi 6X)',
'M1804E4A': 'Xiaomi Mi Max 3',
'M1805D1SG': 'Xiaomi Mi A2 Lite (Redmi 6 Pro)',
'M1805E10A': 'Xiaomi Pocophone F1',
'M1806E7TG': 'Xiaomi Redmi Note 6 Pro',
'M1806E7TH': 'Xiaomi Redmi Note 6 Pro',
'M1806E7TI': 'Xiaomi Redmi Note 6 Pro',
'M1807E8A': 'Xiaomi Mi 8 Pro',
'M1808D2TG': 'Xiaomi Mi 8 Lite',
'M1810F6LG': 'Xiaomi Redmi 7',
'M1810F6LH': 'Xiaomi Redmi 7',
'M1810F6LI': 'Xiaomi Redmi 7',
'M1901F71': 'Xiaomi Redmi Note 7S',
'M1901F7G': 'Xiaomi Redmi Note 7',
'M1901F7H': 'Xiaomi Redmi Note 7',
'M1901F7I': 'Xiaomi Redmi Note 7',
'M1901F7S': 'Xiaomi Redmi Note 7 Pro',
'M1901F9E': 'Xiaomi Mi Play',
'M1902F1G': 'Xiaomi Mi 9',
'M1903C3EG': 'Xiaomi Redmi 7A',
'M1903C3EH': 'Xiaomi Redmi 7A',
'M1903C3EI': 'Xiaomi Redmi 7A',
'M1903C3GG': 'Xiaomi Redmi Go',
'M1903C3GH': 'Xiaomi Redmi Go',
'M1903C3GI': 'Xiaomi Redmi Go',
'M1903F10G': 'Xiaomi Mi 9T',
'M1903F10I': 'Xiaomi Redmi K20',
'M1903F11G': 'Xiaomi Mi 9T Pro',
'M1903F2G': 'Xiaomi Mi 9 SE',
'M1904F3BG': 'Xiaomi Mi 9 Lite',
'M1906F9SH': 'Xiaomi Mi A3',
'M1906F9SI': 'Xiaomi Mi A3',
'M1906G7G': 'Xiaomi Redmi Note 8 Pro',
'M1906G7I': 'Xiaomi Redmi Note 8 Pro',
'M1908C3JG': 'Xiaomi Redmi Note 8',
'M1908C3JH': 'Xiaomi Redmi Note 8',
'M1908C3JI': 'Xiaomi Redmi Note 8',
'M1908C3KG': 'Xiaomi Redmi 8A',
'M1908C3KH': 'Xiaomi Redmi 8A',
'M1908C3XG': 'Xiaomi Redmi Note 8T',
'M1910F4E': 'Xiaomi Mi CC9 Pro',
'M1910F4G': 'Xiaomi Mi Note 10 Lite',
'M1910F4S': 'Xiaomi Mi Note 10 Pro',
'M1912G7BC': 'Xiaomi Redmi K30',
'M1912G7BE': 'Xiaomi Redmi K30',
'M2001C3K3I': 'Xiaomi Redmi 8A Dual',
'M2001J1G': 'Xiaomi Mi 10 Pro 5G',
'M2001J2G': 'Xiaomi Mi 10 5G',
'M2001J2I': 'Xiaomi Mi 10 5G',
'M2002F4LG': 'Xiaomi Mi Note 10 Lite',
'M2002J9E': 'Xiaomi Mi 10 Youth 5G',
'M2002J9G': 'Xiaomi Mi 10 Lite 5G',
'M2003J15SC': 'Xiaomi Redmi 10X 4G',
'M2003J15SG': 'Xiaomi Redmi Note 9',
'M2003J15SS': 'Xiaomi Redmi Note 9',
'M2003J6A1G': 'Xiaomi Redmi Note 9S',
'M2003J6A1I': 'Xiaomi Redmi Note 9 Pro (India)',
'M2003J6B1I': 'Xiaomi Redmi Note 9 Pro Max',
'M2004C3MI': 'Xiaomi Redmi 9 (India)',
'M2004J11G': 'Xiaomi Poco F2 Pro',
'M2004J19C': 'Xiaomi Redmi 9',
'M2004J19G': 'Xiaomi Redmi 9',
'M2010J19SI': 'Xiaomi Redmi 9 Power',
'M2004J19PI': 'Xiaomi Poco M2',
'M2004J7AC': 'Xiaomi Redmi Note 10',
'M2101K6I': 'Xiaomi Redmi Note 10 Pro Max',
'M2103K19G': 'Xiaomi Redmi Note 10 5G',
'M2004J7BC': 'Xiaomi Redmi 10X Pro 5G',
'M2006C3LC': 'Xiaomi Redmi 9A',
'M2006C3LG': 'Xiaomi Redmi 9A',
'M2006C3LI': 'Xiaomi Redmi 9A',
'M2006C3LII': 'Xiaomi Redmi 9i',
'M2006C3LVG': 'Xiaomi Redmi 9AT',
'M2006C3MG': 'Xiaomi Redmi 9C',
'M2006C3MII': 'Xiaomi Redmi 9 (India)',
'M2006C3MNG': 'Xiaomi Redmi 9C NFC',
'M2006J10C': 'Xiaomi Redmi K30 Ultra',
'M2007J17C': 'Xiaomi Redmi Note 9 Pro 5G',
'M2007J17G': 'Xiaomi Mi 10T Lite 5G',
'M2007J17I': 'Xiaomi Mi 10i',
'M2007J1SC': 'Xiaomi Mi 10 Ultra',
'M2007J20CG': 'Xiaomi Poco X3 NFC',
'M2007J20CI': 'Xiaomi Poco X3',
'M2007J20CT': 'Xiaomi Poco X3 NFC',
'M2007J22C': 'Xiaomi Redmi Note 9 5G',
'M2007J3SC': 'Xiaomi Redmi K30S',
'M2007J3SG': 'Xiaomi Mi 10T Pro 5G',
'M2007J3SY': 'Xiaomi Mi 10T 5G',
'M2010J19CG': 'Xiaomi Poco M3',
'M2010J19CI': 'Xiaomi Poco M3',
'MAE136': 'Xiaomi Redmi 4 (4X)',
'MAG138': 'Xiaomi Redmi 4 (4X)',
'MCE16': 'Xiaomi Mi 6',
'MCE8': 'Xiaomi Mi Note 3',
'MCG3B': 'Xiaomi Redmi 5A',
'MCI3B': 'Xiaomi Redmi 5A',
'MDE40': 'Xiaomi Mi Max 2',
'MDE5': 'Xiaomi Mi Mix 2',
'MDG1': 'Xiaomi Redmi 5',
'MDG2': 'Xiaomi Mi A1 (Mi 5X)',
'MDI1': 'Xiaomi Redmi 5',
'MDI2': 'Xiaomi Mi A1 (Mi 5X)',
'MDI40': 'Xiaomi Mi Max 2',
'MDI6': 'Xiaomi Redmi Y1 Lite',
'MDI6S': 'Xiaomi Redmi Y1 (Note 5A)',
'MEG7': 'Xiaomi Redmi 5 Plus (Redmi Note 5)',
'MEI7': 'Xiaomi Redmi Note 5 Pro',
'MEI7S': 'Xiaomi Redmi Note 5 Pro',
'MZB07QAIN': 'Xiaomi Poco C3',
'MZB07RHIN': 'Xiaomi Poco C3',
'MZB07RIIN': 'Xiaomi Poco C3',
'MZB07RJIN': 'Xiaomi Poco C3',
'MZB07RKIN': 'Xiaomi Poco C3',
'MZB07RLIN': 'Xiaomi Poco C3',
'MZB07Z0IN': 'Xiaomi Poco X3',
'MZB07Z1IN': 'Xiaomi Poco X3',
'MZB07Z2IN': 'Xiaomi Poco X3',
'MZB07Z3IN': 'Xiaomi Poco X3',
'MZB07Z4IN': 'Xiaomi Poco X3',
'MZB7995IN': 'Xiaomi Redmi 7A',
'MZB8458IN': 'Xiaomi Redmi 8A',
'MZB8741IN': 'Xiaomi Poco X2',
'MZB8742IN': 'Xiaomi Poco X2',
'MZB8743IN': 'Xiaomi Poco X2',
'MZB8744IN': 'Xiaomi Poco X2',
'MZB8745IN': 'Xiaomi Poco X2',
'MZB8746IN': 'Xiaomi Poco X2',
'MZB9011IN': 'Xiaomi Poco X2',
'MZB9012IN': 'Xiaomi Poco X2',
'MZB9013IN': 'Xiaomi Poco X2',
'MZB9919IN': 'Xiaomi Poco M2',
'MZB9965IN': 'Xiaomi Poco X3',
'SHARK MBU-A0': 'Xiaomi Black Shark 3 Pro',
'SHARK MBU-H0': 'Xiaomi Black Shark 3 Pro',
'SKW-A0': 'Xiaomi Black Shark 2',
'SKW-H0': 'Xiaomi Black Shark 2',
# Realme devices
"RMX1931": "Realme X",
"RMX1901": "Realme X",
"RMX1941": "Realme C2",
"RMX2156": "Realme Narzo 30",
"RMX3360": "Realme GT Master Edition",
"RMX1851": "Realme 3 Pro",
"RMX2030": "Realme 5i",
# BlackBerry devices
"BBG100-1": "BlackBerry Evolve",
# Asus
"ASUS_X00TD": "ASUS Zenfone Max Pro M1",
"ASUS_Z017DB": "ASUS Zenfone 3",
"ASUS_X00HD": "Asus Zenfone 4 Max",
"ASUS_X00TDA": "ASUS Zenfone Max Pro M1",
"ASUS_I01WD": "Asus Zenfone 6",
"ASUS_Z01RD": "Asus Zenfone 5Z",
"ZS630KL": "Asus Zenfone 6",
"I01WD": "Asus Zenfone 6",
# Others
"V2037": "vivo Y20G",
"I2012": "vivo"
}
| 36.676969 | 69 | 0.58378 | __author__ = 'Prakash14'
class OS:
WINDOWS = "Windows"
WINDOWS_PHONE = "Windows Phone"
ANDROID = "Android"
MAC_OS = "Mac Os"
LINUX = "Linux"
IOS = "iOS"
CHROME_OS = "Chrome OS"
class DEVICE_TYPE:
COMPUTER = "Computer"
MOBILE = "Mobile"
SERVER = "Server"
BOT = "Bot"
class DEVICE_NAME:
IPHONE = "iPhone"
IPAD = "iPad"
MAC = "Mac"
CHROME_BOOK = "Chrome Book"
ANDROID = "Android Phone"
MOBILE_DEVICE_CODE_NAME = {
"AC2003": "OnePlus Nord 5G",
"EB2101": "OnePlus Nord CE 5G",
"EB2103": "OnePlus Nord CE 5G",
"DN2101": "OnePlus Nord 5G",
"DN2103": "OnePlus Nord 5G",
"AC2001": "OnePlus Nord",
"GM1901": "OnePlus 7",
"A6000": "OnePlus 6",
"A6010": "OnePlus 6T",
"A6003": "OnePlus 6",
"A5010": "OnePlus 5T",
"A5000": "OnePlus 5",
"LE2101": "OnePlus 9R",
"LE2100": "OnePlus 9R",
"LE2113": "OnePlus 9",
"LE2111": "OnePlus 9",
"LE2110": "OnePlus 9",
"LE2117": "OnePlus 9",
"LE2121": "OnePlus 9 Pro",
"LE2125": "OnePlus 9 Pro",
"LE2123": "OnePlus 9 Pro",
"LE2120": "OnePlus 9 Pro",
"LE2127": "OnePlus 9 Pro",
"GM1911": "OnePlus 7 Pro",
"GM1913": "OnePlus 7 Pro",
"GM1917": "OnePlus 7 Pro",
"GM1910": "OnePlus 7 Pro",
"GM1915": "OnePlus 7 Pro",
"HD1901": "OnePlus 7T",
"HD1903": "OnePlus 7T",
"HD1900": "OnePlus 7T",
"HD1907": "OnePlus 7T",
"HD1905": "OnePlus 7T",
"HD1911": "OnePlus 7T",
"KB2001": "OnePlus 8T",
"KB2000": "OnePlus 8T",
"KB2003": "OnePlus 8T",
"KB2005": "OnePlus 8T",
"IN2013": "OnePlus 8",
"IN2017": "OnePlus 8",
"IN2019": "OnePlus 8",
"IN2010": "OnePlus 8",
"IN2011": "OnePlus 8",
"IN2021": "OnePlus 8 Pro",
"IN2023": "OnePlus 8 Pro",
"IN2020": "OnePlus 8 Pro",
"IN2025": "OnePlus 8 Pro",
"SM-X900": "Samsung Galaxy Tab S8 Ultra",
"SM-X906": "Samsung Galaxy Tab S8 Ultra",
"SM-X800": "Samsung Galaxy Tab S8+",
"SM-X806": "Samsung Galaxy Tab S8+",
"SM-X700": "Samsung Galaxy Tab S8",
"SM-X706": "Samsung Galaxy Tab S8",
"SM-S908": "Samsung Galaxy S22 Ultra",
"SM-S906": "Samsung Galaxy S22+",
"SM-S901": "Samsung Galaxy S22",
"SM-G990": "Samsung Galaxy S21 FE",
"SM-A136": "Samsung Galaxy A13 5G",
"SM-X200": "Samsung Galaxy Tab A8 10.5",
"SM-X205": "Samsung Galaxy Tab A8 10.5",
"SM-A032": "Samsung Galaxy A03 Core",
"SM-E426": "Samsung Galaxy F42 5G",
"SM-M526": "Samsung Galaxy M52 5G",
"SM-M225": "Samsung Galaxy M22",
"SM-M326": "Samsung Galaxy M32 5G",
"SM-A037": "Samsung Galaxy A03s",
"SM-A528": "Samsung Galaxy A52s 5G",
"SM-F926B": "Samsung Galaxy Z Fold3 5G",
"SM-F711B": "Samsung Galaxy Z Flip3 5G",
"SM-E225": "Samsung Galaxy F22",
"SM-M325": "Samsung Galaxy M32",
"SM-A226": "Samsung Galaxy A22 5G",
"SM-A225": "Samsung Galaxy A22",
"SM-T730": "Samsung Galaxy Tab S7 FE",
"SM-T736B": "Samsung Galaxy Tab S7 FE",
"SM-T220": "Samsung Galaxy Tab A7 Lite",
"SM-T225": "Samsung Galaxy Tab A7 Lite",
"SM-E526": "Samsung Galaxy F52 5G",
"SM-M426": "Samsung Galaxy M42 5G",
"SM-E025": "Samsung Galaxy F02s",
"SM-F127": "Samsung Galaxy F12",
"SM-A725": "Samsung Galaxy A72",
"SM-A526": "Samsung Galaxy A52 5G",
"SM-A525": "Samsung Galaxy A52",
"SM-A325": "Samsung Galaxy A32",
"SM-M625": "Samsung Galaxy M62",
"SM-E625": "Samsung Galaxy F62",
"SM-M127": "Samsung Galaxy M12",
"SM-M022": "Samsung Galaxy M02",
"SM-A022": "Samsung Galaxy A02",
"SM-G991": "Samsung Galaxy S21",
"SM-G996": "Samsung Galaxy S21+",
"SM-G998": "Samsung Galaxy S21 Ultra",
"SM-A326": "Samsung Galaxy A32 5G",
"SM-M025": "Samsung Galaxy M02s",
"SM-A025": "Samsung Galaxy A02s",
"SM-A125": "Samsung Galaxy A12",
"SM-M217": "Samsung Galaxy M21s",
"SM-A426": "Samsung Galaxy A42 5G",
"SM-F415": "Samsung Galaxy F41",
"SM-G780": "Samsung Galaxy S20 FE",
"SM-G781": "Samsung Galaxy S20 FE",
"SM-F916B": "Samsung Galaxy Z Fold2 5G",
"SM-M515": "Samsung Galaxy M51",
"SM-N980": "Samsung Galaxy Note 20",
"SM-N981": "Samsung Galaxy Note 20",
"SM-N985": "Samsung Galaxy Note 20 Ultra",
"SM-N986": "Samsung Galaxy Note 20 Ultra",
"SM-F707": "Samsung Galaxy Z Flip 5G",
"SM-T870": "Samsung Galaxy Tab S7",
"SM-T875": "Samsung Galaxy Tab S7",
"SM-T876B": "Samsung Galaxy Tab S7",
"SM-T970": "Samsung Galaxy Tab S7+",
"SM-T976B": "Samsung Galaxy Tab S7+",
"SM-M317": "Samsung Galaxy M31s",
"SM-A013": "Samsung Galaxy A01 Core",
"SM-M017": "Samsung Galaxy M01s",
"SM-M015": "Samsung Galaxy M01",
"SM-A217": "Samsung Galaxy A21s",
"SM-A716F": "Samsung Galaxy A71 5G",
"SM-A516F": "Samsung Galaxy A51 5G",
"SM-A215": "Samsung Galaxy A21",
"SM-P610N": "Samsung Galaxy Tab S6 Lite",
"SM-P615": "Samsung Galaxy Tab S6 Lite",
"SM-G980": "Samsung Galaxy S20",
"SM-G981": "Samsung Galaxy S20",
"SM-G985": "Samsung Galaxy S20+",
"SM-G986": "Samsung Galaxy S20+",
"SM-G988": "Samsung Galaxy S20 Ultra",
"SM-M115": "Samsung Galaxy M11",
"SM-M115F": "Samsung Galaxy M11",
"SM-A315": "Samsung Galaxy A31",
"SM-A415": "Samsung Galaxy A41",
"SM-M215": "Samsung Galaxy M21",
"SM-A115": "Samsung Galaxy A11",
"SM-M315": "Samsung Galaxy M31",
"SM-F700": "Samsung Galaxy Z Flip",
"SM-T866N": "Samsung Galaxy Tab S6 5G",
"SM-G715F": "Samsung Galaxy Xcover Pro",
"SM-N770F": "Samsung Galaxy Note 10 Lite",
"SM-G770F": "Samsung Galaxy S10 Lite",
"SM-A015": "Samsung Galaxy A01",
"SM-A715": "Samsung Galaxy A71",
"SM-A515": "Samsung Galaxy A51",
"SM-M307": "Samsung Galaxy M30s",
"SM-A207": "Samsung Galaxy A20s",
"SM-M107": "Samsung Galaxy M10s",
"SM-A707": "Samsung Galaxy A70s",
"SM-A507": "Samsung Galaxy A50s",
"SM-A307": "Samsung Galaxy A30s",
"SM-A908": "Samsung Galaxy A90 5G",
"SM-F900": "Samsung Galaxy Z Fold",
"SM-F907": "Samsung Galaxy Z Fold",
"SM-A107": "Samsung Galaxy A10s",
"SM-A102": "Samsung Galaxy A10e",
"SM-N970": "Samsung Galaxy Note 10",
"SM-N971": "Samsung Galaxy Note 10",
"SM-N975F": "Samsung Galaxy Note 10+",
"SM-N976": "Samsung Galaxy Note 10+",
"SM-M405": "Samsung Galaxy M40",
"SM-G977": "Samsung Galaxy S10 5G",
"SM-T920": "Samsung Galaxy View 2",
"SM-T927": "Samsung Galaxy View 2",
"SM-T927A": "Samsung Galaxy View 2",
"SM-A606": "Samsung Galaxy A60",
"SM-A805": "Samsung Galaxy A80",
"SM-A705": "Samsung Galaxy A70",
"SM-A405": "Samsung Galaxy A40",
"SM-A205": "Samsung Galaxy A20",
"SM-A202": "Samsung Galaxy A20e",
"SM-A260": "Samsung Galaxy A2 Core",
"SM-G975": "Samsung Galaxy S10+",
"SM-G973": "Samsung Galaxy S10",
"SM-G970": "Samsung Galaxy S10e",
"SM-A505": "Samsung Galaxy A50",
"SM-A305": "Samsung Galaxy A30",
"SM-A105": "Samsung Galaxy A10",
"SM-T720": "Samsung Galaxy Tab S5e",
"SM-T725": "Samsung Galaxy Tab S5e",
"SM-T510": "Samsung Galaxy Tab A 10.1 (2019)",
"SM-T515": "Samsung Galaxy Tab A 10.1 (2019)",
"SM-M305": "Samsung Galaxy M30",
"SM-M105": "Samsung Galaxy M10",
"SM-M205": "Samsung Galaxy M20",
"SM-G887": "Samsung Galaxy A8s",
"SM-G6200": "Samsung Galaxy A6s",
"SM-A920": "Samsung Galaxy A9 (2018)",
"SM-A750": "Samsung Galaxy A7 (2018)",
"SM-J415": "Samsung Galaxy J4+",
"SM-J610": "Samsung Galaxy J6+",
"SM-N960": "Samsung Galaxy Note 9",
"SM-T590": "Samsung Galaxy Tab A 10.5 (2018)",
"; SM-T595": "Samsung Galaxy Tab A 10.5 (2018)",
"SM-T830": "Samsung Galaxy Tab S4",
"; SM-T835": "Samsung Galaxy Tab S4",
"SM-J800": "Samsung Galaxy J8 (2018)",
"SM-J600G": "Samsung Galaxy On6",
"SM-G8850": "Samsung Galaxy A8 Star[16]",
"SM-J737": "Samsung Galaxy J7 (2018)",
"SM-A600": "Samsung Galaxy A6 (2018)",
"SM-A605": "Samsung Galaxy A6+ (2018)",
"SM-J400": "Samsung Galaxy J4 (2018)",
"SM-J600": "Samsung Galaxy J6 (2018)",
"SM-J720": "Samsung Galaxy J3 Duo",
"SM-G611": "Samsung Galaxy J4 Prime 2 Samsung Galaxy J7 (2018)",
"SM-G960": "Samsung Galaxy S9",
"SM-G965": "Samsung Galaxy S9+",
"SM-J250": "Samsung Galaxy J4 Pro (2018)",
"SM-A530": "Samsung Galaxy A5 (2018)[17]",
"SM-A730": "Samsung Galaxy A5+ (2018)",
"SM-J200G": "Samsung Galaxy J2",
"SM-T380": "Samsung Galaxy Tab A 8.0 (2017)",
"SM-T385": "Samsung Galaxy Tab A 8.0 (2017)",
"SM-C710": "Samsung Galaxy C8 / C7 (2017)Samsung Galaxy J7+",
"SM-C8000": "Samsung Galaxy C8 / C7 (2017)Samsung Galaxy J7+",
"SM-N950": "Samsung Galaxy Note 8",
"SM-G892": "Samsung Galaxy S8 Active",
"SM-N935": "Samsung Galaxy Note Fan Edition (FE)",
"SM-J727": "Samsung Galaxy J3 (2017)",
"SM-J730x": "Samsung Galaxy J3 (2017)",
"SM-J530": "Samsung Galaxy J5 (2017)",
"SM-J530Y": "Samsung Galaxy J5 (2017)",
"SM-J327": "Samsung Galaxy J7 (2017)",
"SM-J330x": "Samsung Galaxy J7 (2017)",
"SM-J730": "Samsung Galaxy J7 Pro (2017)",
"SM-G615": "Samsung Galaxy J7 Max",
"SM-G390": "Samsung Galaxy Xcover 4",
"SM-G950": "Samsung Galaxy S8",
"SM-G955": "Samsung Galaxy S8+",
"SM-C5010": "Samsung Galaxy C5 Pro",
"SM-T820": "Samsung Galaxy Tab S3",
"SM-T825": "Samsung Galaxy Tab S3",
"SM-A720": "Samsung Galaxy A7 (2017)",
"SM-A520": "Samsung Galaxy A5 (2017)",
"SM-A320": "Samsung Galaxy A3 (2017)",
"SM-C7010": "Samsung Galaxy C5 Pro",
"SM-J106F": "Samsung Galaxy J1 mini Prime/Galaxy V2 (Indonesia)",
"SM-G532F": "Samsung Galaxy J2 Prime",
"SM-G532M": "Samsung Galaxy J2 Prime",
"SM-G532G": "Samsung Galaxy J2 Prime",
"SM-C900F": "Samsung Galaxy C9 Pro",
"SM-A810": "Samsung Galaxy A8 (2016)",
"SM-G570": "Samsung Galaxy On5 (2016)",
"SM-G610": "Samsung Galaxy On5 (2016)",
"SM-J710": "Samsung Galaxy On7 (2016)",
"SM-G610F": "Samsung Galaxy J7 Prime",
"SM-G610M": "Samsung Galaxy J7 Prime",
"SM-N930": "Samsung Galaxy Note 7",
"SM-G570F": "Samsung Galaxy J2 Prime",
"SM-G570M": "Samsung Galaxy J2 Prime",
"SM-G891A": "Samsung Galaxy S7 Active",
"SM-J310F": "Samsung Galaxy J3 Pro",
"SM-T585": "Samsung Galaxy Tab A 10.1 (2016)",
"SM-C5000": "Samsung Galaxy C5",
"SM-C7000": "Samsung Galaxy C7",
"SM-J5109": "Samsung Galaxy J5 (2016)",
"SM-J510F": "Samsung Galaxy J5 (2016)",
"SM-J510FN": "Samsung Galaxy J5 (2016)",
"SM-J510H": "Samsung Galaxy J5 (2016)",
"SM-J510G": "Samsung Galaxy J5 (2016)",
"SM-J510MN": "Samsung Galaxy J5 (2016)",
"SM-J510Y": "Samsung Galaxy J5 (2016)",
"SM-J5108": "Samsung Galaxy J5 (2016)",
"SM-J510K": "Samsung Galaxy J5 (2016)",
"SM-J510L": "Samsung Galaxy J5 (2016)",
"SM-J510S": "Samsung Galaxy J5 (2016)",
"SM-J510UN": "Samsung Galaxy J5 (2016)",
"SM-J7109": "Samsung Galaxy J7 (2016)",
"SM-J710F": "Samsung Galaxy J7 (2016)",
"SM-J710FN": "Samsung Galaxy J7 (2016)",
"SM-J710H": "Samsung Galaxy J7 (2016)",
"SM-J710MN": "Samsung Galaxy J7 (2016)",
"SM-J710FQ": "Samsung Galaxy J7 (2016)",
"SM-J710K": "Samsung Galaxy J7 (2016)",
"SM-J710GN": "Samsung Galaxy J7 (2016)",
"SM-J3109x": "Samsung Galaxy J3 (2016)",
"SM-J320F": "Samsung Galaxy J3 (2016)",
"SM-J320G": "Samsung Galaxy J3 (2016)",
"SM-J320P": "Samsung Galaxy J3 (2016)",
"SM-J320M": "Samsung Galaxy J3 (2016)",
"SM-T280": "Samsung Galaxy Tab A6",
"SM-T285": "Samsung Galaxy Tab A6",
"SM-A9100": "Samsung Galaxy A9 Pro (2016)",
"SM-A910F": "Samsung Galaxy A9 Pro (2016)",
"SM-J105B": "Samsung Galaxy J1 Mini",
"SM-J105DS": "Samsung Galaxy J1 Mini",
"SM-J105F": "Samsung Galaxy J1 Mini",
"SM-G935F": "Samsung Galaxy S7 Edge",
"SM-G935FD": "Samsung Galaxy S7 Edge",
"SM-G9350": "Samsung Galaxy S7 Edge",
"SM-G935A": "Samsung Galaxy S7 Edge",
"SM-G935V": "Samsung Galaxy S7 Edge",
"SM-G935U": "Samsung Galaxy S7 Edge",
"SM-G935S": "Samsung Galaxy S7 Edge",
"SM-G935K": "Samsung Galaxy S7 Edge",
"SM-G935W8": "Samsung Galaxy S7 Edge",
"SC-02H": "Samsung Galaxy S7 Edge",
"SM-G930F": "Samsung Galaxy S7",
"SM-G930FD": "Samsung Galaxy S7",
"SM-G9300": "Samsung Galaxy S7",
"SM-G930A": "Samsung Galaxy S7",
"SM-G930V": "Samsung Galaxy S7",
"SM-G930AZ": "Samsung Galaxy S7",
"SM-G930S": "Samsung Galaxy S7",
"SM-G930K": "Samsung Galaxy S7",
"SM-G930W8": "Samsung Galaxy S7",
"SM-J120F": "Samsung Galaxy J1 (2016)",
"SM-J120M": "Samsung Galaxy J1 (2016)",
"SM-A9000": "Samsung Galaxy A9 (2016)",
"SM-A7100": "Samsung Galaxy A7 (2016)",
"SM-A710F": "Samsung Galaxy A7 (2016)",
"SM-A710FD": "Samsung Galaxy A7 (2016)",
"SM-A710M": "Samsung Galaxy A7 (2016)",
"SM-A710Y": "Samsung Galaxy A7 (2016)",
"SM-A5100": "Samsung Galaxy A5 (2016)",
"SM-A510F": "Samsung Galaxy A5 (2016)",
"SM-A510FD": "Samsung Galaxy A5 (2016)",
"SM-A510M": "Samsung Galaxy A5 (2016)",
"SM-A510Y": "Samsung Galaxy A5 (2016)",
"SM-A310F": "Samsung Galaxy A3 (2016)",
"SM-A310M": "Samsung Galaxy A3 (2016)",
"SM-T670": "Samsung Galaxy View",
"SC-01H": "Samsung Galaxy Active Neo",
"SM-J200F": "Samsung Galaxy J2",
"SM-J200Y": "Samsung Galaxy J2",
"SM-J200H": "Samsung Galaxy J2",
"SM-J200M": "Samsung Galaxy J2",
"SM-G928A": "Samsung Galaxy S6 Edge+",
"SM-G928AZ": "Samsung Galaxy S6 Edge+",
"SM-G928D": "Samsung Galaxy S6 Edge+",
"SM-G928F": "Samsung Galaxy S6 Edge+",
"SM-G928FD": "Samsung Galaxy S6 Edge+",
"SM-G928I": "Samsung Galaxy S6 Edge+",
"SM-G928K": "Samsung Galaxy S6 Edge+",
"SM-G928L": "Samsung Galaxy S6 Edge+",
"SM-G928P": "Samsung Galaxy S6 Edge+",
"SM-G928PZ": "Samsung Galaxy S6 Edge+",
"SM-G928R4": "Samsung Galaxy S6 Edge+",
"SM-G928R7": "Samsung Galaxy S6 Edge+",
"SM-G928S": "Samsung Galaxy S6 Edge+",
"SM-G928T": "Samsung Galaxy S6 Edge+",
"SM-G928T1": "Samsung Galaxy S6 Edge+",
"SM-G928TR": "Samsung Galaxy S6 Edge+",
"SM-G928V": "Samsung Galaxy S6 Edge+",
"SM-G9280": "Samsung Galaxy S6 Edge+",
"SM-G9288": "Samsung Galaxy S6 Edge+",
"SM-G9289": "Samsung Galaxy S6 Edge+",
"SM-A8000": "Samsung Galaxy A8",
"SM-A800F": "Samsung Galaxy A8",
"SM-A800I": "Samsung Galaxy A8",
"SM-A800S": "Samsung Galaxy A8",
"SM-A800Y": "Samsung Galaxy A8",
"SM-N9200": "Samsung Galaxy Note 5",
"SM-N920C": "Samsung Galaxy Note 5",
"SM-N920T": "Samsung Galaxy Note 5",
"SM-N920A": "Samsung Galaxy Note 5",
"SM-N920I": "Samsung Galaxy Note 5",
"SM-N9208": "Samsung Galaxy Note 5",
"SM-G903F": "Samsung Galaxy S5 Neo",
"SM-G903W": "Samsung Galaxy S5 Neo",
"SM-G318H": "Samsung Galaxy Trend 2 Lite",
"SM-G890A": "Samsung Galaxy S6 Active",
"SM-J500F": "Samsung Galaxy J5",
"SM-J500H": "Samsung Galaxy J5",
"SM-J500M": "Samsung Galaxy J5",
"SM-J500G": "Samsung Galaxy J5",
"SM-J700F": "Samsung Galaxy J7",
"SM-J700H": "Samsung Galaxy J7",
"SM-J700M": "Samsung Galaxy J7",
"SM-J700T": "Samsung Galaxy J7",
"SM-J700P": "Samsung Galaxy J7",
"SM-G925A": "Samsung Galaxy S6 Edge",
"SM-G925AZ": "Samsung Galaxy S6 Edge",
"SM-G925F": "Samsung Galaxy S6 Edge",
"SM-G925I": "Samsung Galaxy S6 Edge",
"SM-G925K": "Samsung Galaxy S6 Edge",
"SM-G925L": "Samsung Galaxy S6 Edge",
"SM-G925P": "Samsung Galaxy S6 Edge",
"SM-G925PZ": "Samsung Galaxy S6 Edge",
"SM-G925R4": "Samsung Galaxy S6 Edge",
"SM-G925R7": "Samsung Galaxy S6 Edge",
"SM-G925S": "Samsung Galaxy S6 Edge",
"SM-G925T": "Samsung Galaxy S6 Edge",
"SM-G925T1": "Samsung Galaxy S6 Edge",
"SM-G925TR": "Samsung Galaxy S6 Edge",
"SM-G925V": "Samsung Galaxy S6 Edge",
"SM-G9250": "Samsung Galaxy S6 Edge",
"SM-G9258": "Samsung Galaxy S6 Edge",
"SM-G9259": "Samsung Galaxy S6 Edge",
"SM-G920A": "Samsung Galaxy S6Samsung Galaxy Gear 2",
"SM-G920AZ": "Samsung Galaxy S6Samsung Galaxy Gear 2",
"SM-G920D": "Samsung Galaxy S6Samsung Galaxy Gear 2",
"SM-G920F": "Samsung Galaxy S6Samsung Galaxy Gear 2",
"SM-G920FD": "Samsung Galaxy S6Samsung Galaxy Gear 2",
"SM-G920I": "Samsung Galaxy S6Samsung Galaxy Gear 2",
"SM-G920K": "Samsung Galaxy S6Samsung Galaxy Gear 2",
"SM-G920L": "Samsung Galaxy S6Samsung Galaxy Gear 2",
"SM-G920P": "Samsung Galaxy S6Samsung Galaxy Gear 2",
"SM-G920PZ": "Samsung Galaxy S6Samsung Galaxy Gear 2",
"SM-G920R4": "Samsung Galaxy S6Samsung Galaxy Gear 2",
"SM-G920R7": "Samsung Galaxy S6Samsung Galaxy Gear 2",
"SM-G920S": "Samsung Galaxy S6Samsung Galaxy Gear 2",
"SM-G920T": "Samsung Galaxy S6Samsung Galaxy Gear 2",
"SM-G920T1": "Samsung Galaxy S6Samsung Galaxy Gear 2",
"SM-G920TR": "Samsung Galaxy S6Samsung Galaxy Gear 2",
"SM-G920V": "Samsung Galaxy S6Samsung Galaxy Gear 2",
"SM-G9200": "Samsung Galaxy S6Samsung Galaxy Gear 2",
"SM-G9208": "Samsung Galaxy S6Samsung Galaxy Gear 2",
"SM-G9209": "Samsung Galaxy S6Samsung Galaxy Gear 2",
"SM-J100H": "Samsung Galaxy J1",
"SM-J100F": "Samsung Galaxy J1",
"SM-E500H": "Samsung Galaxy E5",
"SM-E500F": "Samsung Galaxy E5",
"SM-A700F": "Samsung Galaxy A7",
"SM-A700FD": "Samsung Galaxy A7",
"SM-A700FQ": "Samsung Galaxy A7",
"SM-A700H": "Samsung Galaxy A7",
"SM-A700K": "Samsung Galaxy A7",
"SM-A700L": "Samsung Galaxy A7",
"SM-A700M": "Samsung Galaxy A7",
"SM-A700S": "Samsung Galaxy A7",
"SM-A700X": "Samsung Galaxy A7",
"SM-A700YD": "Samsung Galaxy A7",
"SM-A700YZ": "Samsung Galaxy A7",
"SM-A7000": "Samsung Galaxy A7",
"SM-A7009": "Samsung Galaxy A7",
"SM-A7009W": "Samsung Galaxy A7",
"SM-E700H": "Samsung Galaxy E7",
"SM-A500F": "Samsung Galaxy A5 (2015)",
"SM-A500F1": "Samsung Galaxy A5 (2015)",
"SM-A500FQ": "Samsung Galaxy A5 (2015)",
"SM-A500FU": "Samsung Galaxy A5 (2015)",
"SM-A500G": "Samsung Galaxy A5 (2015)",
"SM-A500H": "Samsung Galaxy A5 (2015)",
"SM-A500HQ": "Samsung Galaxy A5 (2015)",
"SM-A500K": "Samsung Galaxy A5 (2015)",
"SM-A500L": "Samsung Galaxy A5 (2015)",
"SM-A500M": "Samsung Galaxy A5 (2015)",
"SM-A500S": "Samsung Galaxy A5 (2015)",
"SM-A500X": "Samsung Galaxy A5 (2015)",
"SM-A500XZ": "Samsung Galaxy A5 (2015)",
"SM-A500Y": "Samsung Galaxy A5 (2015)",
"SM-A500YZ": "Samsung Galaxy A5 (2015)",
"SM-A5000": "Samsung Galaxy A5 (2015)",
"SM-A5009": "Samsung Galaxy A5 (2015)",
"SM-A300F": "Samsung Galaxy A3 (2015)",
"SM-A300FU": "Samsung Galaxy A3 (2015)",
"SM-A300G": "Samsung Galaxy A3 (2015)",
"SM-A300H": "Samsung Galaxy A3 (2015)",
"SM-A300HQ": "Samsung Galaxy A3 (2015)",
"SM-A300M": "Samsung Galaxy A3 (2015)",
"SM-A300X": "Samsung Galaxy A3 (2015)",
"SM-A300XU": "Samsung Galaxy A3 (2015)",
"SM-A300XZ": "Samsung Galaxy A3 (2015)",
"SM-A300Y": "Samsung Galaxy A3 (2015)",
"SM-A300YZ": "Samsung Galaxy A3 (2015)",
"SM-A3000": "Samsung Galaxy A3 (2015)",
"SM-A3009": "Samsung Galaxy A3 (2015)",
"SM-G360BT": "Samsung Galaxy Core Prime",
"SM-G360H": "Samsung Galaxy Core Prime",
"SM-N915G": "Samsung Galaxy Note Edge",
"SM-N9150": "Samsung Galaxy Note Edge",
"SM-N910G": "Samsung Galaxy Note 4",
"SM-G130H": "Samsung Galaxy Young 2",
"SM-G850F": "Samsung Galaxy Alpha",
"SM-G850FQ": "Samsung Galaxy Alpha",
"SM-G850K": "Samsung Galaxy Alpha",
"SM-G850L": "Samsung Galaxy Alpha",
"SM-G850M": "Samsung Galaxy Alpha",
"SM-G850S": "Samsung Galaxy Alpha",
"SM-G850W": "Samsung Galaxy Alpha",
"SM-G850Y": "Samsung Galaxy Alpha",
"SM-G530BT": "Samsung Galaxy Grand Prime",
"SM-G530F": "Samsung Galaxy Grand Prime",
"SM-G530FQ": "Samsung Galaxy Grand Prime",
"SM-G530FZ": "Samsung Galaxy Grand Prime",
"SM-G530H": "Samsung Galaxy Grand Prime",
"SM-G530M": "Samsung Galaxy Grand Prime",
"SM-G530MU": "Samsung Galaxy Grand Prime",
"SM-G530P": "Samsung Galaxy Grand Prime",
"SM-G530R4": "Samsung Galaxy Grand Prime",
"SM-G530R7": "Samsung Galaxy Grand Prime",
"SM-G530T": "Samsung Galaxy Grand Prime",
"SM-G530W": "Samsung Galaxy Grand Prime",
"SM-G530Y": "Samsung Galaxy Grand Prime",
"SM-G5306W": "Samsung Galaxy Grand Prime",
"SM-G5308W": "Samsung Galaxy Grand Prime",
"SM-G5309W": "Samsung Galaxy Grand Prime",
"SM-G110B": "Samsung Galaxy Pocket 2",
"SM-G750F": "Samsung Galaxy Mega 2",
"SM-G350E": "Samsung Galaxy Star 2 Plus",
"SM-G313F": "Samsung Galaxy Ace 4",
"SM-G355H": "Samsung Galaxy Core 2",
"GT-S5500": "Samsung Galaxy S5 Mini",
"GT-S5430": "Samsung Galaxy S5 Mini",
"SM-T800": "Samsung Galaxy Tab S 10.5",
"SM-T805": "Samsung Galaxy Tab S 10.5",
"SM-T807": "Samsung Galaxy Tab S 10.5",
"SM-T807P": "Samsung Galaxy Tab S 10.5",
"SM-T807V": "Samsung Galaxy Tab S 10.5",
"SM-G386F": "Samsung Galaxy Core",
"SM-C115": "Samsung Galaxy K Zoom",
"SM-G310": "Samsung Galaxy Ace Style",
"SM-G900": "Samsung Galaxy S5",
"SM-G900FD": "Samsung Galaxy S5",
"GT-I9300I": "Samsung Galaxy S3 Neo",
"GT-I9301I": "Samsung Galaxy S3 Neo",
"GT-I9303I": "Samsung Galaxy S3 Neo",
"SM-N7500": "Samsung Galaxy Note 3 Neo",
"SM-N7502": "Samsung Galaxy Note 3 Neo",
"SM-N7505": "Samsung Galaxy Note 3 Neo",
"SM-G7102": "Samsung Galaxy Grand 2 (SM-G7100)",
"GT-S7262": "Samsung Galaxy Star Pro (GT-S7260)",
"GT-S7392": "Samsung Galaxy Trend Lite (GT-S7390)",
"SM-G3502": "Samsung Galaxy Core Plus (SM-G3500)",
"SM-N9000": "Samsung Galaxy Note 3",
"SM-N9002": "Samsung Galaxy Note 3",
"SM-N9005": "Samsung Galaxy Note 3",
"SM-V700": "Samsung Galaxy Gear",
"GT-S7272": "Samsung Galaxy Ace 3 (GT-S7270)[20]",
"GT-S7275": "Samsung Galaxy Ace 3 (GT-S7270)[20]",
"GT-S5312": "Samsung Galaxy Pocket Neo (GT-S5310)",
"GT-S5282": "Samsung Galaxy Star (GT-S5280)",
"GT-S5283": "Samsung Galaxy Star (GT-S5280)",
"GT-i8262D": "Samsung Galaxy Core (GT-S8262)",
"Galaxy Grand Quattro": "Samsung Galaxy Win (GT-I8550)",
"GT-I9150": "Samsung Galaxy Mega",
"GT-I9152": "Samsung Galaxy Mega",
"GT-I9200": "Samsung Galaxy Mega",
"GT-I9205": "Samsung Galaxy Mega",
"GT-S6810P": "Samsung Galaxy Fame (GT-S6810)",
"GT-I9505": "Samsung Galaxy S4 (GT-I9500)",
"GT-I9506": "Samsung Galaxy S4 (GT-I9500)",
"GT-S6312": "Samsung Galaxy Young (GT-S6310)",
"GT-I9082": "Samsung Galaxy Grand (GT-I9080)",
"SGH-I437": "Samsung Galaxy Express",
"GT-N7100": "Samsung Galaxy Note II",
"GT-N7102": "Samsung Galaxy Note II",
"GT-N7105": "Samsung Galaxy Note II",
"GT-B5512": "Samsung Galaxy Y Pro DUOS (GT-B5510)[33]",
"GT-I5700": "Samsung Galaxy Spica[67]",
"GT-I7500": "Samsung Galaxy[68]",
'CPH1911': 'OPPO F11',
'CPH1909': 'Oppo A5s',
'CPH1913': 'OPPO F11',
'CPH1931': 'OPPO A5 2020',
'CPH1933': 'OPPO A5 2020',
'CPH1937': 'OPPO A9 2020',
'CPH1969': 'OPPO F11 Pro',
'CPH1989': 'OPPO Reno2 F',
'CPH2001': 'OPPO F15',
'CPH2015': 'OPPO A31',
'CPH2023': 'OPPO Find X2',
'CPH2035': 'OPPO Reno3 Pro',
'CPH2061': 'OPPO A52',
'CPH2071': 'OPPO A11k',
'CPH2077': 'OPPO A12',
'CPH2083': 'OPPO A11k',
'CPH2109': 'OPPO Reno4 Pro',
'CPH2127': 'OPPO A53',
'CPH2137': 'OPPO A33',
'CPH2179': 'OPPO A15s',
'CPH2185': 'OPPO A15',
'CPH2201': 'OPPO Reno5 Pro 5G',
'CPH2213': 'OPPO F19 Pro+',
'CPH2219': 'OPPO F19',
'CPH2239': 'OPPO A54',
'CPH2249': ' OPPO Reno6 Pro 5G',
'CPH2251': 'OPPO Reno6 5G',
'CPH2263': 'OPPO A74 5G',
'CPH2269': 'OPPO A16',
'CPH2285': 'OPPO F19 Pro',
'CPH2293': 'OPPO Reno7 Pro 5G',
'CPH2321': 'OPPO A53s 5G',
'CPH2325': 'OPPO A55',
'CPH2349': 'OPPO A16k',
'CPH2371': 'OPPO Reno7 5G',
'Not Available': 'OPPO F17',
"YAL-TL00": "Honor 20",
"YAL-L71": "Huawei nova 5T",
"YAL-L61": "Huawei nova 5T",
"YAL-L41": "Honor 20 Pro",
"YAL-L21": "Huawei nova 5T",
"YAL-AL50": "Honor 20S",
"YAL-AL10": "Honor 20 Pro",
"YAL-AL00I": "Honor 20",
"YAL-AL00": "Honor 20",
"Y635-TL00": "Huawei Y635",
"Y635-L21": "Huawei Y635",
"Y635-L03": "Huawei Y635",
"Y635-L02": "Huawei Y635",
"Y635-L01": "Huawei Y635",
"Y635-CL00": "Huawei Y635",
"Y625-U51": "Huawei Y625",
"Y625-U43": "Huawei Y625",
"Y625-U32": "Huawei Y625",
"Y625-U21": "Huawei Y625",
"Y625-U13": "Huawei Y625",
"Y610-U00": "Huawei Y610-U00",
"Y600-U40": "Huawei Ascend Y600",
"Y600-U351": "Huawei Ascend Y600",
"Y600-U20": "Huawei Ascend Y600",
"Y600-U151": "Huawei Ascend Y600",
"Y600-U00": "Huawei Ascend Y600",
"Y560-U23": "Huawei Y560",
"Y560-U02": "Huawei Y560",
"Y560-L23": "Huawei Y560",
"Y560-L03": "Huawei Y560",
"Y560-L02": "Huawei Y560",
"Y560-L01": "Huawei Y560",
"Y550-L03": "Huawei Ascend Y550",
"Y550-L02": "Huawei Ascend Y550",
"Y550-L01": "Huawei Ascend Y550",
"Y541-U02": "Huawei Y541",
"Y540-U01": "Huawei Ascend Y540",
"Y538": "Huawei Union Y538",
"Y536-A1": "Huawei Y536",
"Y530-U051": "Huawei Y530",
"Y530-U00": "Huawei Y530",
"Y520-U33": "Huawei Ascend Y520",
"Y520-U22": "Huawei Ascend Y520",
"Y520-U12": "Huawei Ascend Y520",
"Y520-U03": "Huawei Ascend Y520",
"Y511-U30": "Huawei Ascend Y511",
"Y511-U251": "Huawei Ascend Y511",
"Y511-U10": "Huawei Ascend Y511",
"Y511-U00": "Huawei Y511",
"Y360-U93": "Huawei Y3 lite",
"Y360-U82": "Huawei Y3 Lite",
"Y360-U61": "Huawei Y360",
"Y360-U31": "Huawei Y360",
"Y360-U23": "Huawei Y360",
"Y360-U03": "Huawei Y360",
"Y340-U081": "Huawei Y340",
"Y336-U02": "Huawei Y336",
"Y330-U17": "Huawei Ascend Y330",
"Y330-U15": "Huawei Ascend Y330",
"Y330-U11": "Huawei Ascend Y330",
"Y330-U07": "Huawei Ascend Y330",
"Y330-U05": "Huawei Ascend Y330",
"Y330-U01": "Huawei Ascend Y330",
"Y321-U051": "Huawei Ascend Y321",
"Y320-U351": "Huawei Ascend Y320",
"Y320-U30": "Huawei Ascend Y320",
"Y320-U151": "Huawei Ascend Y320",
"Y320-U10": "Huawei Ascend Y320",
"Y320-U01": "Huawei Ascend Y320",
"Y300-0151": "Huawei Ascend Y300",
"Y300-0100": "Huawei Ascend Y300",
"Y300-0000": "Huawei Ascend Y300",
"Y221-U33": "Huawei Ascend Y221",
"Y221-U22": "Huawei Ascend Y221",
"Y221-U12": "Huawei Ascend Y221",
"Y221-U03": "Huawei Ascend Y221",
"Y220-U10": "Huawei Ascend Y220",
"Y220-U05": "Huawei Ascend Y220",
"Y220-U00": "Huawei Ascend Y220",
"Y210-0200": "Huawei Ascend Y210",
"Y210-0151": "Huawei Ascend Y210",
"WLZ-AN00": "Huawei nova 6 5G",
"WLZ-AL10": "Huawei nova 6",
"WKG-TN00": "Huawei Enjoy 20 SE",
"WKG-L29": "Huawei Enjoy 20 5G",
"WKG-L09": "Huawei Enjoy 20 5G",
"WKG-AN00": "Huawei Enjoy 20 5G",
"WAS-TL10": "Huawei P10 Lite Dual",
"WAS-LX3": "Huawei P10 Lite",
"WAS-LX2": "Huawei P10 Lite",
"WAS-LX1": "Huawei P10 Lite",
"WAS-L23": "Huawei P10 Lite",
"WAS-L22J": "Huawei WAS-L22J",
"WAS-L22": "Huawei P10 Lite",
"WAS-L21": "Huawei P10 Lite",
"WAS-L03": "Huawei P10 Lite",
"WAS-L02": "Huawei P10 Lite",
"WAS-L01": "Huawei P10 Lite",
"WAS-AL00": "Huawei Nova Youth Dual",
"Warsaw-LX2": "Huawei P10",
"Warsaw-LX1": "Huawei Warsaw-LX1",
"Warsaw-L23": "Huawei P10",
"Warsaw-L22": "Huawei P10",
"Warsaw-L21": "Huawei P10",
"Warsaw-L03": "Huawei Warsaw-L03",
"Warsaw-L02": "Huawei Warsaw-L02",
"W1-U00": "Huawei W1",
"VTR-TL00": "Huawei P10",
"VTR-L29": "Huawei P10",
"VTR-L09": "Huawei P10",
"VTR-AL00": "Huawei P10",
"VRD-W10": "Huawei MediaPad M6 Turbo 8.4",
"VRD-W09": "Huawei MediaPad M6 Turbo 8.4",
"VRD-AL10": "Huawei MediaPad M6 Turbo 8.4",
"VRD-AL09": "Huawei MediaPad M6 Turbo 8.4",
"VOG-TL00": "Huawei P30 Pro",
"VOG-L29": "Huawei P30 Pro",
"VOG-L09": "Huawei P30 Pro",
"VOG-L04": "Huawei P30 Pro",
"VOG-AL10": "Huawei P30 Pro",
"VOG-AL00": "Huawei Y6 Pro",
"VNS-TL00": "Huawei G9 Lite",
"VNS-L62": "Huawei P9 Lite",
"VNS-L53": "Huawei P9 lite",
"VNS-L52C": "Huawei VNS-L52C",
"VNS-L31": "Huawei P9 Lite",
"VNS-L23": "Huawei P9 lite",
"VNS-L22": "Huawei P9 Lite",
"VNS-L21": "Huawei P9 Lite",
"VNS-DL00": "Huawei P9",
"VNS-AL00": "Huawei G9 Lite",
"VKY-TL00": "Huawei P10 Plus",
"VKY-L29": "Huawei P10 Plus",
"VKY-L09": "Huawei P10 Plus",
"VKY-AL00": "Huawei P10 Plus",
"VIE-L29": "Huawei P9 Plus",
"VIE-L09": "Huawei P9 Plus",
"VIE-C00": "Huawei P9 Plus",
"VIE-AL10": "Huawei P9 Plus",
"Victoria-L09": "Huawei P10",
"Vicky-L29": "Huawei P10 Plus",
"Vicky-L09": "Huawei P10 Plus",
"VEN-L22": "Huawei Honor 8 Smart",
"VCE-TL00": "Huawei Nova 4",
"VCE-AL00": "Huawei Nova 4",
"U9510E": "Huawei Ascend D1",
"U9508": "Huawei Honor 2",
"U9202L-1": "Huawei Ascend P1 LTE",
"U9200-1": "Huawei Ascend P1",
"TRT-TL10": "Huawei Y7 Prime",
"TRT-LX3": "Huawei Y7",
"TRT-LX1": "Huawei Y7",
"TRT-LX": "Huawei Y7 Prime",
"TRT-L53D": "Huawei Y7 Prime",
"TRT-L53": "Huawei Y7 Prime",
"TRT-L21A": "Huawei Y7 Prime",
"TRT-L21": "Huawei Y7",
"TRT-L03": "Huawei Y7",
"TRT-L02": "Huawei Y7",
"TRT-L01": "Huawei Y7",
"TRT-AL00": "Huawei Enjoy 7 Plus",
"Toronto-L23": "Huawei Y7",
"Toronto-L22": "Huawei Toronto-L22",
"Toronto-L21": "Huawei Y7",
"Toronto-L02": "Huawei Toronto-L02",
"Toronto-AL00": "Huawei Toronto-AL00",
"TNY-TL00": "Huawei Honor Magic 2",
"TNY-AL10": "Honor Magic 2",
"TNY-AL00": "Honor Magic 2",
"TNNH-AN00": "Honor Play4",
"TNN-AN00": "Huawei Enjoy 20s",
"TIT-U02": "Huawei Y6 Pro",
"TIT-L01": "Huawei Y6 Pro",
"TIT-CL10": "Huawei Enjoy 5",
"TIT-CL00": "Huawei Enjoy 5",
"TIT-AL00": "Huawei Y6 Pro",
"TET-AN00": "Huawei Mate X2",
"TEL-AN10": "Honor X10 5G",
"TEL-AN00A": "Honor X10 5G",
"TEL-AN00": "Honor X10 5G",
"TAS-TL00": "Huawei Mate 30",
"TAS-L29": "Huawei Mate 30",
"TAS-AN00": "Huawei Mate 30 5G",
"TAS-AL00": "Huawei Mate 30",
"TAH-N29M": "Huawei Mate Xs",
"TAH-AN00M": "Huawei Mate X",
"TAH-AN00": "Huawei Mate X",
"TAG-TL00": "Huawei Enjoy 5s",
"TAG-L32": "Huawei GR3",
"TAG-L23": "Huawei GR3",
"TAG-L22": "Huawei GR3",
"TAG-L21": "Huawei GR3",
"TAG-L13": "Huawei GR3",
"TAG-L03": "Huawei GR3",
"TAG-L01": "Huawei P8 Lite Smart",
"TAG-CL00": "Huawei Enjoy 5S",
"T1-A22L": "Huawei Mediapad T1",
"T1-A21w": "Huawei MediaPad T1 10",
"T1-A21L": "Huawei MediaPad T1 10",
"T1-821L": "Huawei MediaPad T1 8.0",
"T1-702u": "Huawei MediaPad T1 7.0",
"T1-702": "Huawei MediaPad T1 7.0",
"T1-701w": "Huawei MediaPad T1 7.0",
"T1-701ua": "Huawei MediaPad T1 7.0",
"T1-701u": "Huawei MediaPad T1 7.0",
"T1-701": "Huawei MediaPad T1 7.0",
"STK-TL00": "Huawei Enjoy 10 Plus",
"STK-L23BHN": "Huawei Y9 Prime (2019)",
"STK-L22HN": "Huawei Y9 Prime (2019)",
"STK-L22DV": "Huawei Y9 Prime (2019)",
"STK-L22": "Huawei Y9 Prime (2019)",
"STK-L21VHN": "Huawei Y9 Prime (2019)",
"STK-L21UDV": "Huawei Y9 Prime (2019)",
"STK-L21MDV": "Huawei Y9 Prime (2019)",
"STK-L21M": "Huawei Y9 Prime (2019)",
"STK-L21HN": "Huawei Y9 Prime (2019)",
"STK-L21": "Huawei Y9 Prime (2019)",
"STK-L03DV": "Huawei P Smart Z",
"STK-L03B": "Huawei P Smart Z",
"STK-L01MDV": "Huawei Y9 Prime (2019)",
"STK-L01M": "Huawei P Smart Z",
"STK-AL00": "Huawei P Smart Z",
"STF-TL10": "Huawei Honor 9",
"STF-L09S": "Huawei Honor 9",
"STF-L09": "Huawei Honor 9",
"STF-AL10": "Huawei Honor 9",
"STF-AL00": "Huawei Honor 9",
"SPN-AL10": "Huawei nova 5z",
"SPN-AL00": "Huawei nova 5z",
"Sophia-L12": "Huawei Ascend P7",
"Sophia-L11": "Huawei Ascend P7",
"Sophia-L10": "Huawei Ascend P7",
"Sophia-L09": "Huawei Ascend P7",
"Sophia-L07": "Huawei Ascend P7",
"Sophia-L00": "Huawei Ascend P7",
"SNE-L01": "Huawei Mate 20 lite",
"SLA-L22": "Huawei P9 Lite Mini",
"SLA-L03": "Huawei Y6 Pro 2017",
"SLA-L02": "Huawei Y6 Pro 2017",
"SLA-AL00": "Huawei Enjoy 7",
"SHT-W09": "Huawei MediaPad M5 8.4",
"SHT-AL09": "Huawei MediaPad M5 8.4",
"Selina-L03": "Huawei Y6 Pro 2017",
"SEA-AL10": "Huawei nova 5 Pro",
"SEA-AL00": "Huawei nova 5",
"SCMR-W09": "Huawei MatePad 10.8",
"SCMR-AL09": "Huawei MatePad 10.8",
"SCL-U31": "Huawei Y6",
"SCL-U23": "Huawei Y6",
"SCL-L32": "Huawei Y6",
"SCL-L21": "Huawei Y6",
"SCL-L04": "Huawei Y6",
"SCL-L03": "Huawei Y6",
"SCL-L02": "Huawei Y6",
"SCL-L01": "Huawei Y6",
"SCL-CL00": "Huawei Honor 4A",
"SCL-AL00": "Huawei Honor 4A",
"SCC-U21": "Huawei Y6",
"SC-CL00": "Huawei Ascend GX1",
"S8-701w": "Huawei MediaPad T1 8.0",
"S8-701u": "Huawei MediaPad M1 8.0",
"S8-306L": "Huawei MediaPad M1 8.0",
"S8-303L": "Huawei MediaPad M1",
"S8-301w": "Huawei MediaPad M1 8.0",
"S8-301u": "Huawei MediaPad M1 8.0",
"S8-301L": "Huawei MediaPad M1 8.0",
"S7-931w": "Huawei MediaPad 7 Lite",
"S7-931u": "Huawei MediaPad 7 Lite",
"S7-722u": "Huawei MediaPad 7 Youth 2",
"S7-721w": "Huawei MediaPad 7 Youth 2",
"S7-721u": "Huawei MediaPad 7 Youth 2",
"S7-721g": "Huawei MediaPad 7 Youth 2",
"S7-701w": "Huawei MediaPad 7 Youth",
"S7-701u": "Huawei MediaPad 7 Youth",
"S7-601w": "Huawei MediaPad 7 Vogue",
"S7-601u": "Huawei MediaPad 7 Vogue",
"S7-301u": "Huawei MediaPad 7 Vogue",
"S7-201u": "Huawei IDEOS S7 Slim",
"S10-232L": "Huawei MediaPad 10 Link Plus",
"S10-231w": "Huawei MediaPad10 Link",
"S10-231u": "Huawei MediaPad 10 Link Plus",
"S10-231L": "Huawei MediaPad 10 Link",
"S10-201u": "Huawei MediaPad 10 Link",
"S10-101w": "Huawei MediaPad 10 FHD",
"S10-101u": "Huawei MediaPad 10 FHD",
"RVL-AL09": "Honor Note 10",
"RNE-L23": "Huawei Mate 10 Lite",
"RNE-L22": "Huawei Nova 2i",
"RNE-L21": "Huawei Mate 10 Lite",
"RNE-L03": "Huawei Mate 10 Lite",
"RNE-L02": "Huawei Nova 2i",
"RNE-L01": "Huawei Mate 10 Lite",
"RNE-AL00": "Huawei Maimang 6",
"RIO-UL00": "Huawei G7 Plus",
"RIO-TL00": "Huawei G7 Plus",
"RIO-L33": "Huawei G8",
"RIO-L23": "Huawei G8",
"RIO-L11": "Huawei G8",
"RIO-L03": "Huawei GX8",
"RIO-L02": "Huawei G8",
"RIO-L01,RIO-L11": "Huawei G8",
"RIO-L01": "Huawei G8",
"RIO-CL00": "Huawei Maimang 4",
"RIO-AL00": "Huawei Maimang 4",
"Rhone-L21": "Huawei Mate 10 Lite",
"Rhone-L03": "Huawei Mate 10 Lite",
"Rhone-L01": "Huawei Mate 10 Lite",
"Prague-TL00": "Huawei Prague-TL00",
"Prague-L23": "Huawei P8 Lite 2017",
"Prague-L22": "Huawei P8 Lite 2017",
"Prague-L21": "Huawei P8 Lite 2017",
"Prague-L03": "Huawei Prague-L03",
"PRA-TL10": "Huawei Honor 8 Lite",
"PRA-LX3": "Huawei P9 Lite 2017",
"PRA-LX2": "Huawei Nova Lite",
"PRA-LX1": "Huawei P8 Lite 2017",
"PRA-LA1": "Huawei Honor 8 Lite",
"PRA-L31": "Huawei P8 Lite 2017",
"PRA-L22": "Huawei P8 Lite 2017",
"PRA-L21": "Huawei P8 Lite Dual Sim 2017",
"PRA-L11": "Huawei P8 Lite 2017",
"PRA-L03": "Huawei P8 Lite 2017",
"PRA-L02": "Huawei Nova Lite",
"PRA-L01": "Huawei P8 Lite 2017",
"PRA-AL00X": "Huawei Honor 8 Lite",
"PRA-AL00": "Huawei Honor 8 Lite",
"PPAH-TL20": "Huawei P smart 2021",
"PPAH-L22": "Huawei P smart 2021",
"PPAH-L21": "Huawei P smart 2021",
"PPAH-L02": "Huawei P smart 2021",
"PPAH-AL40": "Huawei P smart 2021",
"PPAH-AL20": "Huawei P smart 2021",
"POT-TL00": "Huawei Enjoy 9s",
"POT-LX3": "Huawei P Smart 2019",
"POT-LX2J": "Huawei Nova Lite 3",
"POT-LX1A": "Huawei P Smart 2020",
"POT-LX1": "Huawei P Smart 2019",
"POT-L41B": "Huawei P Smart S",
"POT-L21RU": "Huawei P smart 2019",
"POT-L21": "Huawei P smart 2019",
"POT-L01": "Huawei P smart 2019",
"POT-AL10C": "Huawei enjoy 9s",
"POT-AL00a": "Huawei Enjoy 9S",
"POT-AL00": "Huawei P Smart 2019",
"PLK-UL00IN": "Huawei Honor 7",
"PLK-UL00": "Huawei Honor 7",
"PLK-TL01H": "Huawei Honor 7",
"PLK-TL00": "Huawei Honor 7",
"PLK-L01": "Huawei Honor 7",
"PLK-CL00": "Huawei Honor 7",
"PLK-AL10": "Huawei Honor 7",
"PLE-703L": "Huawei MediaPad M2 Lite",
"PLE-701L": "Huawei MediaPad T2 7.0",
"Pine-UL00": "Huawei Honor 6 plus",
"Pine-TL10": "Huawei Honor 6 Plus",
"Pine-L04": "Huawei Honor 6 Plus",
"Pine-L02": "Huawei Honor 6 Plus",
"Pine-L00": "Huawei Honor 6 Plus",
"PIC-TL00": "Huawei Nova 2",
"PIC-LX9": "Huawei Nova 2",
"PIC-L29": "Huawei Nova 2",
"PIC-L09": "Huawei Nova 2",
"PIC-AL00": "Huawei Nova 2",
"PE-UL00": "Huawei Honor 6 Plus",
"PE-TL20": "Huawei Honor 6 Plus",
"PE-TL10": "Huawei Honor 6 Plus",
"PE-TL00M": "Huawei Honor 6 Plus",
"PE-CL00": "Huawei Honor 6 Plus",
"PCT-TL10": "Honor View 20",
"PCT-L29D": "Honor View 20",
"PCT-L29": "Honor View 20",
"PCT-AL10D": "Honor View 20",
"PCT-AL10": "Honor View 20",
"Paris-L09A": "Huawei Nova 3",
"PAR-TL20": "Huawei Nova 3",
"PAR-TL00": "Huawei nova 3",
"PAR-AL00": "Huawei nova 3",
"P7-L10": "Huawei Ascend P7",
"P7-L09": "Huawei Ascend P7",
"P7-L07": "Huawei Ascend P7",
"P7-L05": "Huawei Ascend P7",
"P6-U06": "Huawei Ascend P6",
"P6-S-U00": "Huawei Ascend P6 S",
"P2-6011": "Huawei Ascend P2",
"OXF-AN10L": "Honor V30 Pro",
"OXF-AN10": "Honor V30 Pro",
"OXF-AN00L": "Honor V30",
"OXF-AN00": "Honor V30",
"OCE-AN50": "Huawei Mate 40E",
"OCE-AN10": "Huawei Mate 40E",
"NXT-TL00": "Huawei Mate 8",
"NXT-L29B": "Huawei Ascend Mate8",
"NXT-L29A": "Huawei Ascend Mate8",
"NXT-L29": "Huawei Mate 8",
"NXT-L09A": "Huawei Ascend Mate8",
"NXT-L09": "Huawei Mate 8",
"NXT-DL00": "Huawei Mate 8",
"NXT-CL00": "Huawei Mate 8",
"NXT-C00": "Huawei Mate 8",
"NXT-AL10": "Huawei Mate 8",
"MAR-L01MEA": "Huawei P30 lite",
"MAR-L01B": "Huawei P30 lite",
"MAR-L01A": "Huawei P30 lite",
"MAR-AL00": "Huawei nova 4e",
"Madrid-L21": "Huawei Y6 (2019)",
"M2-A01w": "Huawei MediaPad M2 10.0",
"M2-A01L": "Huawei MediaPad M2 10.0",
"M2-803L": "Huawei MediaPad M2 8.0",
"M2-802L": "Huawei MediaPad M2 8.0",
"M2-801w": "Huawei MediaPad M2 8.0",
"M2-801L": "Huawei MediaPad M2 8.0",
"LYO-L21": "Huawei Honor 5A",
"LYO-L02": "Huawei Y6 II",
"LYO-L01": "Huawei Y6 II",
"LYA-TL00L": "Huawei Mate 20 Pro",
"LYA-TL00": "Huawei Mate 20 Pro",
"LYA-L29": "Huawei Mate 20 Pro",
"LYA-L09": "Huawei Mate 20 Pro",
"LYA-AL10": "Huawei Mate 20 Pro",
"LYA-AL00P": "Huawei Mate 20 Pro",
"LYA-AL00L": "Huawei Mate 20 Pro",
"LYA-AL00": "Huawei Mate 20 Pro",
"LUA-U23": "Huawei Y3 II",
"LUA-U22": "Huawei Y3 II",
"LUA-U03": "Huawei Y3 II",
"LUA-U02": "Huawei Y3 II",
"LUA-L23": "Huawei Y3 II",
"LUA-L22HN": "Huawei Honor Bee 2",
"LUA-L22": "Huawei Y3 II",
"LUA-L21": "Huawei Y3 II",
"LUA-L13": "Huawei Y3 II",
"LUA-L03": "Huawei Y3 II",
"LUA-L02": "Huawei Y3 II",
"LUA-L01": "Huawei Y3 II",
"LRA-L21B": "Honor 30i",
"LRA-AL00": "Honor 20 lite (China)",
"LON-L29D": "Huawei Mate 9 Pro",
"LON-L29C": "Huawei Mate 9 Pro",
"LON-L29": "Huawei Mate 9 Pro",
"LON-AL00": "Huawei Mate 9 Pro",
"LLD-TL10": "Honor 9 Lite",
"LLD-L31": "Huawei Honor 9 Lite",
"LLD-L21": "Huawei Honor 9 Lite",
"LLD-AL30": "Honor 9N (9i)",
"LLD-AL20": "Honor 9N (9i)",
"LLD-AL10": "Huawei Honor 9 Lite",
"LLD-AL00": "Huawei Honor 9 Lite",
"LIO-TL00": "Huawei Mate 30 Pro",
"LIO-N29": "Huawei Mate 30 RS Porsche Design",
"LIO-L29": "Huawei Mate 30 Pro 5G",
"LIO-AN00P": "Huawei Mate 30 RS Porsche Design",
"LIO-AN00M": "Huawei Mate 30 Pro",
"LIO-AN00": "Huawei Mate 30 Pro 5G",
"LIO-AL00": "Huawei Mate 30 Pro",
"LDN-TL10": "Huawei Y7 Prime 2018",
"LDN-TL00": "Huawei Enjoy 8",
"LDN-LX3": "Huawei Y7 2018",
"LDN-LX2": "Huawei Y7 Prime 2018",
"LDN-L22": "Huawei nova 2 lite",
"LDN-L21": "Huawei Y7 2018",
"LDN-L03": "Huawei Y7 2018",
"LDN-L01": "Huawei Y7 2018",
"LDN-AL00": "Huawei Enjoy 8",
"KSA-L29": "Honor 8S",
"KSA-L22": "Honor 8S",
"KSA-L09": "Honor 8S",
"KSA-AL10": "Honor 8S",
"KSA-AL00": "Honor 8S",
"KRJ-W09": "Honor V6",
"KRJ-AN00": "Honor V6",
"KOB2-W09B": "Huawei MatePad T8",
"KOB2-W09": "Huawei MatePad T8",
"KOB2-L09B": "Huawei MatePad T8",
"KOB2-L09": "Huawei MatePad T8",
"KOB-W09": "HUAWEI MediaPad T3",
"KOB-L09": "Huawei Mediapad T3",
"KNT-UL10": "Huawei Honor V8",
"KNT-TL10": "Huawei Honor V8",
"KNT-C00": "Huawei Honor V8",
"KNT-AL20": "Huawei Honor V8",
"KNT-AL10": "Huawei Honor V8",
"KKG-TN00": "Honor X10 Max 5G",
"KKG-AN00": "Honor X10 Max 5G",
"KIW-UL00": "Huawei Honor 5X",
"KIW-TL00": "Huawei Honor 5X",
"KIW-L24": "Huawei Honor 5X",
"KIW-L22": "Huawei Honor 5X",
"KIW-L21": "Huawei HONOR 5X",
"KIW-CL00": "Huawei Honor 5X",
"KIW-AL10": "Huawei Honor 5X",
"KII-L33": "Huawei GR5",
"KII-L23": "Huawei GR5",
"KII-L22": "Huawei GR5",
"KII-L21": "Huawei GR5",
"KII-L05": "Huawei GR5",
"JSN-TL00": "Honor 8X",
"JSN-L22X": "Honor 8X",
"JSN-L21X": "Honor 8X",
"JSN-L21": "Honor 8X",
"JSN-AL00": "Honor 8X",
"JSC-AN00A": "Huawei nova 8 SE",
"JSC-AN00": "Huawei nova 8 SE",
"JNY-L22": "Huawei nova 7i",
"JNY-L21": "Huawei nova 7i",
"JNY-L01": "Huawei nova 7i",
"JNY-AL10": "Huawei nova 6 SE",
"JMM-TL00": "Huawei Honor 6C Pro",
"JMM-L22": "Huawei Honor 6C Pro",
"JMM-AL00": "Huawei Honor 6C Pro",
"JKM-TL00": "Huawei Y9 (2019)",
"JKM-LX3": "Huawei Y9 (2019)",
"JKM-LX2": "Huawei Y9 (2019)",
"JKM-LX1": "Huawei Y9 (2019)",
"JKM-L21X": "Huawei Y9 (2019)",
"JKM-L21": "Huawei Y9 (2019)",
"JKM-L01X": "Huawei Y9 (2019)",
"JKM-AL20": "Huawei Y9 (2019)",
"JKM-AL10": "Huawei Y9 (2019)",
"JKM-AL00": "Huawei Y9 (2019)",
"Jimmy-TL00": "Huawei Jimmy TL00",
"Jimmy-AL00": "Huawei Jimmy-AL00",
"JER-TN20": "Huawei nova 7 Pro 5G",
"JER-TN10": "Huawei nova 7 Pro 5G",
"JER-AN20": "Huawei nova 7 Pro 5G",
"JER-AN10": "Huawei Nova 7 Pro",
"JEF-TN20": "Huawei nova 7 5G",
"JEF-TN00": "Huawei nova 7 5G",
"JEF-AN20": "Huawei nova 7 5G",
"JEF-AN00": "Huawei Nova 7 Pro",
"JDN2-W09HN": "Honor Tab 5",
"JDN2-W09": "Honor Tab 5",
"JDN2-L09": "Huawei MediaPad M5 Lite 8",
"JDN2-AL50HN": "Huawei MediaPad M5 lite",
"JDN2-AL50": "Huawei MediaPad M5 lite",
"JDN2-AL00HN": "Honor Pad 5 8",
"JDN2-AL00": "Honor Pad 5 8",
"JDN-W09": "Huawei Honor Pad 2",
"JDN-L01": "Huawei MediaPad T2 8.0",
"JDN-AL00": "Huawei Honor Pad 2",
"Jazz-TL10": "Huawei Ascend Mate 7",
"Jazz-L11": "Huawei Ascend Mate 7",
"Jazz-L09": "Huawei Ascend Mate 7",
"Jazz-J1": "Huawei Ascend Mate 7",
"JAT-TL00": "Huawei Honor 8A",
"JAT-L41HW": "Honor 8A Pro",
"JAT-L41": "Honor 8A Pro",
"JAT-L29HW": "Honor Play 8A",
"JAT-L29": "Honor Play 8A",
"JAT-L23HW": "Honor Play 8A",
"JAT-L21AHW": "Honor 8A Pro",
"JAT-AL00": "Honor Play 8A",
"Jakarta-LGRP2": "Huawei Y9 (2019)",
"Jackman-L22": "Huawei Y9 (2019)",
"INE-TL00": "Huawei Nova 3i",
"INE-LX2": "Huawei Nova 3i",
"INE-LX1": "Huawei Nova 3i",
"INE-LGRP1": "Huawei Nova 3i",
"INE-L22rr": "Huawei Nova 3i",
"INE-L22": "Huawei Nova 3i",
"INE-L21": "Huawei Nova 3i",
"INE-AL00": "Huawei nova 3i",
"HWI-TL00": "Huawei Nova 2S",
"HWI-LGRP1": "Huawei Nova 2S",
"HWI-AL00": "Huawei Nova 2s",
"HRY-TL00": "Honor 10 Lite",
"HRY-L21T": "Honor 10 Lite",
"HRY-L21D": "Honor 10 Lite",
"HRY-L21": "Honor 10 Lite",
"HRY-LX1": "Honor 10 Lite",
"HRY-LX2": "Honor 10 Lite",
"HRY-AL00a": "Honor 10 Lite",
"HRY-LX1MEB": "Honor 10 Lite",
"HRY-AL00TA": "Honor 20i",
"HRY-AL00T": "Honor 10 Lite",
"HRY-AL00A": "Honor 10 Lite",
"HRY-AL00": "Honor 10 Lite",
"Holly-U19": "Huawei Holly",
"Holly-U10": "Huawei Holly",
"Holly-U00": "Huawei Honor 3C",
"HMA-TL00": "Huawei Mate 20",
"HMA-L29": "Huawei Mate 20",
"HMA-L09": "Huawei Mate 20",
"HMA-AL00": "Huawei Mate 20",
"HLK-L42": "Honor 9X Pro",
"HLK-L41": "Honor 9X Pro",
"HLK-AL10": "Honor 9X",
"HLK-AL00A": "Honor 9X",
"HLK-AL00": "Honor 9X (China)",
"HDN-W09": "Huawei Honor",
"H60-L12": "Huawei Honor 6",
"H60-L04": "Huawei Honor 6",
"H60-L03": "Huawei Honor 6",
"H60-L02": "Huawei Honor 6",
"H60-L01": "Huawei Honor 6",
"H60-J1": "Huawei Honor 6",
"H30-U10": "Huawei 3C",
"H30-L02": "Huawei Honor 3C",
"H30-L01": "Huawei Honor 3C",
"GRA-UL10": "Huawei P8",
"GRA-UL00": "Huawei P8",
"GRA-TL00": "Huawei P8",
"GRA-L13": "Huawei P8",
"GRA-L09": "Huawei P8",
"GRA-L03": "Huawei P8",
"GRA-CL10": "Huawei P8",
"GRA-CL00": "Huawei P8 Standard Edition",
"GLK-TL00": "Huawei nova 5i",
"GLK-AL00": "Huawei nova 5i",
"GEM-703L": "HUAWEI Honor X2",
"GEM-703": "Huawei MediaPad X2",
"GEM-702L": "Huawei MediaPad X2",
"GEM-702": "Huawei MediaPad X2",
"GEM-701L": "Huawei MediaPad X2",
"GEM-701": "Huawei MediaPad X2",
"G760-TL00": "Huawei Ascend G7",
"G760-L03": "Huawei Ascend G7",
"G760-L01": "Huawei Ascend G7",
"G750-U10": "Huawei Honor 3X",
"G750-T20": "Huawei Honor 3X",
"G750-C00": "Huawei Honor 3X",
"G740-L00": "Huawei G740",
"G735-L23": "Huawei G Play",
"G735-L12": "Huawei G Play",
"G735-L03": "Huawei G Play",
"G730-U251": "Huawei G730",
"G730-U10": "Huawei G730",
"G700-U20": "Huawei Ascend G700",
"G700-U10": "Huawei Ascend G700",
"G7-L11": "Huawei Ascend G7",
"G7-L01": "Huawei Ascend G7",
"G630-U251": "Huawei G630",
"G630-U20": "Huawei G630",
"G630-U10": "Huawei G630",
"G630-U00": "Huawei G630",
"G629-UL00": "Huawei G629",
"G628-TL00": "Huawei Ascend G628",
"G620S-UL00": "Huawei Ascend G620s",
"G620S-L03": "Huawei Ascend G620s",
"G620S-L02": "Huawei Ascend G620s",
"G620S-L01": "Huawei Ascend G620s",
"G620-UL01": "Huawei G620",
"G620-L75": "Huawei Ascend G620s",
"G620-L72": "Huawei G620",
"G615-U10": "Huawei Ascend G615",
"G610-U20": "Huawei G610",
"G610-U15": "Huawei G610",
"G610-U00": "Huawei Ascend G6",
"G6-U251": "Huawei Ascend G6",
"G6-U10": "Huawei Ascend G6",
"G6-L33": "Huawei Ascend G6",
"G6-L22": "Huawei Ascend G6",
"G6-L11": "Huawei Ascend G6",
"G527-U081": "Huawei Ascend G527",
"G526-L33": "Huawei Ascend G526",
"G525-U00": "Huawei Ascend G525",
"G510-0251": "Huawei Ascend G510",
"G510-0200": "Huawei Ascend G510",
"G510-0100": "Huawei Ascend G510",
"FRLM-TN00": "Huawei Enjoy 20 SE",
"FRLM-L22": "Huawei Enjoy 20 SE",
"FRLM-L03": "Huawei Enjoy 20 SE",
"FRLM-AN00A": "Huawei Enjoy 20 SE",
"FRD-L19": "Huawei Honor 8",
"FRD-L14": "Huawei Honor 8",
"FRD-L09": "Huawei HONOR 8",
"FRD-L04": "Huawei Honor 8",
"FRD-L02": "Huawei HONOR 8",
"FRD-DL00": "Huawei MediaPad T2 10.0 Pro",
"FRD-C00": "Huawei Honor 8",
"FRD-AL10": "Huawei Honor 8",
"FRD-AL00": "Huawei Honor 8",
"FLA-TL10": "Huawei Y9 (2018)",
"FLA-AL20": "Huawei Y9 2018",
"FLA-AL10": "Huawei Y9 2018",
"FLA-AL00": "Huawei Y9 2018",
"Figo-L31": "Huawei P Smart",
"FIG-TL10": "Huawei Enjoy 7S Dual",
"FIG-TL00": "Huawei P smart",
"FIG-LX3": "Huawei P Smart",
"FIG-LX2": "Huawei P Smart",
"FIG-LX1": "Huawei P Smart Dual SIM",
"FIG-LA1": "Huawei P Smart",
"FIG-L31": "Huawei P Smart",
"FIG-L22": "Huawei P Smart",
"FIG-L21": "Huawei P Smart",
"FIG-L11": "Huawei P Smart",
"FIG-L03": "Huawei P Smart",
"FIG-L02": "Huawei P Smart",
"FIG-AL10": "Huawei Enjoy 7S",
"FIG-AL00": "Huawei P smart",
"FDR-A05": "Huawei MediaPad T2 10.0 Pro",
"FDR-A04": "Huawei MediaPad T2 10.0 Pro",
"FDR-A03L": "Huawei M2",
"FDR-A03": "Huawei MediaPad T2 10.0 Pro",
"FDR-A01w": "Huawei MediaPad T2 10.0 Pro",
"FDR-A01": "Huawei MediaPad T2 10.0 Pro",
"EVR-TL00": "Huawei Mate 20 X",
"EVR-N29": "Huawei Mate 20 X (5G)",
"EVR-L29": "Huawei Mate 20 X",
"EVR-AN00": "Huawei Mate 20 X (5G)",
"EVR-AL00": "Huawei Mate 20 X",
"EVA-L29": "Huawei P9",
"EVA-L19": "Huawei P9",
"EVA-L09": "Huawei P9",
"EVA-DL00": "Huawei P9",
"EVA-CL00": "Huawei P9",
"EVA-C00": "Huawei P9",
"EVA-AL10": "Huawei P9",
"EVA-AL00": "Huawei P9",
"EML-L29": "Huawei P20",
"EML-L09": "Huawei P20",
"EML-AL00": "Huawei P20",
"ELS-TN00": "Huawei P40 Pro",
"ELS-N39": "Huawei P40 Pro+",
"ELS-N29": "Huawei P40 Pro+",
"ELS-N04": "Huawei P40 Pro",
"ELS-AN10": "Huawei P40 Pro+",
"ELS-AN00": "Huawei P40 Pro",
"ELE-TL00": "Huawei P30",
"ELE-L29": "Huawei P30",
"ELE-L09": "Huawei P30",
"ELE-L04": "Huawei P30",
"ELE-AL00": "Huawei P30",
"EDI-AL10": "Huawei Honor Note 8",
"EDGE-U00": "Huawei Ascend P6",
"EDGE-C00": "Huawei Ascend P6",
"EBG-TN00": "Honor 30 Pro",
"EBG-N19": "Honor 30 Pro+",
"EBG-AN10": "Honor 30 Pro+",
"EBG-AN00": "Honor 30 Pro",
"DVCM-TN20": "",
"DVCM-AN20": "Huawei Enjoy 20 Pro",
"DVCM-AN00": "Huawei Enjoy 20 Pro",
"DUK-TL30": "Huawei Honor V9",
"DUK-L09": "Huawei Honor 8 Pro",
"DUK-AL30": "Huawei Honor V9",
"DUK-AL20": "Huawei Honor V9",
"DUB-LX3": "Huawei Y7 Prime 2019",
"DUB-LX1": "Huawei Y7 Prime 2019",
"DUB-L01": "Huawei Y7 2019",
"DUB-AL00": "Huawei Enjoy 9",
"DUA-L29": "Honor 9S",
"DRA-LX5": "Huawei Y5 Lite (2018)",
"DRA-L29": "Huawei Y5p",
"DRA-L21": "Huawei Y5 Prime 2018",
"DRA-L09": "Huawei Y5p",
"DNN-L29": "Honor 10X Lite",
"Diego-TL10": "Huawei Enjoy 6S",
"Diego-L23": "Huawei Diego-L23",
"Diego-L21": "Huawei Honor 6C",
"Diego-L03": "Huawei Diego-L03",
"Diego-L01": "Huawei Diego-L01",
"Diego-AL00": "Huawei Diego-AL00",
"Delhi-TL20": "Huawei Honor 6A",
"Delhi-L42": "Huawei Honor 6A",
"Delhi-L22": "Huawei Honor 6A",
"Delhi-AL10": "Huawei Honor 6A",
"DAV-703": "Huawei P8 MAX",
"DAV-702L": "Huawei P8 max",
"DAV-701L": "Huawei P8 max",
"D2-0082": "Huawei Ascend D2",
"CUN-U29": "Huawei Y5 II",
"CUN-TL00": "Huawei Honor 5",
"CUN-L33": "Huawei Y5 II",
"CUN-L23": "Huawei Y5 II",
"CUN-L22": "Huawei Y5 II",
"CUN-L21": "Huawei Y5 II",
"CUN-L03": "Huawei Y5 II",
"CUN-L02": "Huawei Y5 II",
"CUN-L01": "Huawei Y5 II",
"CUN-AL00": "Huawei Honor 5",
"CRR-UL20": "Huawei Mate S",
"CRR-UL00": "Huawei Mate S",
"CRR-TL00": "Huawei Mate S",
"CRR-L13": "Huawei Mate S",
"CRR-L09": "Huawei Mate S",
"CRR-CL20": "Huawei Mate S",
"CRR-CL00": "Huawei Mate S",
"CRO-UL00": "Huawei Y3 2017",
"CRO-L03": "Huawei Y3 2017",
"CRO-L02": "Huawei Y3 2017",
"CPN-W09": "Huawei M3 Lite",
"CPN-L09": "Huawei MediaPad M3 Lite",
"CPN-AL00": "Huawei M3 Lite",
"COR-TL10": "Honor Play",
"COR-AL10": "Honor Play",
"COR-AL00": "Honor Play",
"COL-TL10": "Huawei Honor 10",
"COL-TL00": "Huawei Honor 10",
"COL-L29": "Huawei Honor 10",
"COL-AL10": "Honor 10",
"CND-AN00": "Huawei nova 7 SE 5G Youth",
"CMR-W19": "Huawei MediaPad M5 Pro 10.8",
"CMR-W09TWN": "Huawei MediaPad M5",
"CMR-W09": "Huawei MediaPad M5 10.8",
"CMR-AL19": "Huawei MediaPad M5 Pro 10.8",
"CMR-AL09": "Huawei MediaPad M5 10.8",
"CM990": "Huawei CM990",
"CLT-TL00": "Huawei P20 Pro",
"CLT-L29": "Huawei P20 Pro Dual SIM",
"CLT-L09": "Huawei P20 Pro Dual SIM",
"CLT-L04": "Huawei P20 Pro Dual SIM",
"CLT-AL01": "Huawei P20 Pro Dual SIM",
"CLT-AL00": "Huawei P20 Pro Dual SIM",
"CHM-UL00": "Huawei Honor 4C",
"CHM-U01": "Huawei Honor 4C",
"CHM-TL00H": "Huawei Honor 4C",
"CHM-TL00": "Huawei Honor 4C",
"CHM-CL00": "Huawei Honor 4C",
"CHL-AL60CH": "Huawei nova 8 SE",
"CherryPlus-TL00": "Huawei Honor 4X",
"CherryPlus-L23": "Huawei Honor 4X",
"CherryPlus-L12": "Huawei Honor 4X LTE",
"CherryPlus-L11": "Huawei HONOR 4X",
"Cherry-L04": "Huawei Honor 4X",
"Cherry-CL20": "Huawei Honor 4X",
"Cherry-CL10": "Huawei Honor 4X",
"CHE2-L12": "Huawei Honor 4X",
"Che2-L11": "Huawei Honor 4X",
"CHE1-L04": "Huawei Honor 4X",
"CHE1-CL20": "Huawei Honor 4X",
"CHE1-CL10": "Huawei Honor 4X",
"CHE-TL00H": "Huawei Honor 4x",
"CHE-TL00": "Huawei Honor 4X",
"Che-L11": "Huawei Honor 4X",
"CHC-U23": "Huawei G Play Mini",
"CHC-U03": "Huawei G Play mini",
"CHC-U01": "Huawei G Play Mini",
"CDY-TN90": "Honor 30S",
"CDY-TN20": "Huawei nova 7 SE",
"CDY-TN00": "Huawei nova 7 SE",
"CDY-N29H": "Huawei nova 7 SE",
"CDY-N29B": "Huawei nova 7 SE",
"CDY-N29": "Huawei nova 7 SE",
"CDY-AN95": "Huawei nova 7 SE",
"CDY-AN90": "Honor 30S",
"CDY-AN20": "Huawei nova 7 SE",
"CDY-AN00": "Huawei nova 7 SE",
"CDL-AN50": "Huawei nova 7 SE",
"CAZ-TL20": "Huawei Nova",
"CAZ-TL10": "Huawei Nova",
"CAZ-AL10": "Huawei Nova",
"Cannes-L12": "Huawei Nova",
"Cannes-L11": "Huawei Nova",
"Cannes-L01": "Huawei Nova",
"Cannes-AL10": "Huawei Nova Cannes-AL10",
"CAN-L13": "Huawei Nova",
"CAN-L12": "Huawei Nova",
"CAN-L11": "Huawei nova",
"CAN-L03": "Huawei Nova",
"CAN-L01": "Huawei Nova",
"Cameron-W19": "Huawei MediaPad M5 Pro 10.8",
"CAM-UL00": "Huawei Honor 5A",
"CAM-TL00": "Huawei Honor 5A",
"CAM-L23": "Huawei Y6 II",
"CAM-L21": "Huawei Y6 II",
"CAM-L03": "Huawei Y6 II Compact",
"CAM-AL00": "Huawei Honor 5A",
"CairoGO-L22": "Huawei CairoGO-L22",
"CairoGO-L02": "Huawei Y3 2018",
"Cairo-U00": "Huawei Cairo-U00",
"Cairo-L23": "Huawei Cairo-L23",
"Cairo-L22": "Huawei Cairo-L22",
"Cairo-L03": "Huawei Cairo-L03",
"Cairo-L02": "Huawei Cairo-L02",
"CAG-L02": "Huawei Y3 2018",
"C8860V": "Huawei Honor",
"C8817E": "Huawei C8817E",
"C8817D": "Huawei Honor 6 Pro",
"C8816D": "Huawei C8816D",
"C8816": "Huawei C8816",
"C199s": "Huawei C199S",
"BZT3-W59": "Huawei C5 10.4",
"BZT3-W09": "",
"BZT3-AL00": "Honor 5c",
"BZT-W09": "Huawei MediaPad C5 10.1",
"BZD-W00": "Huawei MediaPad C3",
"BZD-AL00": "Huawei MediaPad C3",
"BZC-W00": "",
"BZC-AL00": "",
"BTV-W09": "Huawei M3",
"BTV-DL09": "Huawei MediaPad M3",
"BRQ-AN00CG": "Huawei nova 8 Pro 4G",
"BRQ-AN00": "Huawei nova 8 Pro 5G",
"BRQ-AL00": "Huawei nova 8 Pro 5G",
"Bond-L24": "Huawei Honor 7X",
"BOND-L21": "Huawei Honor 7X",
"BND-TL10": "Huawei Honor 7X",
"BND-L34": "Huawei Mate SE",
"BND-L31A": "Huawei Honor 7X",
"BND-L31": "Huawei Honor 7X",
"BND-L24A": "Huawei Honor 7x",
"BND-L21": "Huawei Honor 7X",
"BND-AL10": "Huawei Honor 7X",
"BND-AL00": "Huawei Honor 7X",
"BMH-TN10": "Honor 30",
"BMH-N19": "Honor 30",
"BMH-AN20": "Honor 30",
"BMH-AN10": "Honor 30",
"BLN-TL10": "Huawei Honor 6X",
"BLN-TL00": "Huawei Honor 6X",
"BLN-L24": "Huawei Honor 6X",
"BLN-L22HN": "Huawei Honor 6X",
"BLN-L22": "Huawei Honor 6X",
"BLN-L21": "Huawei Honor 6X",
"BLN-AL40": "Huawei Honor 6X",
"BLN-AL30": "Huawei Honor 6X",
"BLN-AL20": "Huawei Honor 6X",
"BLN-AL10": "Huawei Honor 6X",
"BLL-L23": "Huawei Mate 9 Lite",
"BLL-L22": "Huawei GR5 2017",
"BLL-L21": "Huawei GR5 2017",
"BLA-TL00": "Huawei Mate 10 Pro",
"BLA-L29": "Huawei Mate 10 Pro",
"BLA-L09": "Huawei Mate 10 pro",
"BLA-AL00": "Huawei Mate 10 pro",
"BLA-A09": "Huawei Mate 10 pro",
"BKL-TL10": "Huawei Honor View 10",
"BKL-L09": "Huawei Honor View 10 Global",
"BKL-L04": "Huawei Honor View 10",
"BKL-AL20": "Huawei Honor V10",
"BKL-AL00": "Huawei Honor V10",
"BKK-TL00": "Huawei Honor 8C",
"BKK-L22": "Huawei Honor 8C",
"BKK-L21": "Huawei Honor 8C",
"BKK-AL10": "Huawei Honor 8C",
"BKK-AL00": "Honor 8C",
"BGO-L03": "Huawei MediaPad T2 7.0",
"BGO-DL09": "Huawei MediaPad T2 7.0",
"BG2-W09": "Huawei MediaPad T3",
"BG2-U03": "Huawei MediaPad T3",
"BG2-U01": "Huawei MediaPad T3 7 3G",
"Berlin-L23": "Huawei Honor 6X",
"Berlin-L22": "Huawei GR5 2017",
"Berlin-L21HN": "Huawei Honor 6X",
"Berlin-L21": "Huawei Honor 6X",
"Berkeley-LGRP2": "Huawei Honor V10",
"Barca-L22": "Huawei Barca-L22",
"Barca-L21": "Huawei Nova 2 Plus",
"Barca-L03": "Huawei Nova 2 Plus",
"BAH3-W59": "Huawei MatePad 10.4",
"BAH3-W09": "Huawei MatePad 10.4",
"BAH3-L09": "Huawei MatePad 10.4",
"BAH3-AN10": "Huawei MatePad 5G",
"BAH3-AL00": "Huawei MatePad 10.4",
"BAH2-W19": "Huawei MediaPad M5 lite",
"BAH2-W09": "Huawei MediaPad M5 lite",
"BAH2-L09": "Huawei MediaPad M5 Lite",
"BAH2-AL10": "Huawei MediaPad M5 lite",
"BAH-W09": "Huawei M3 Lite",
"BAH-L09": "Huawei MediaPad M3 Lite 10",
"BAH-AL00": "Huawei M3 Lite",
"BAC-TL00": "Huawei nova 2 plus",
"BAC-L23": "Huawei nova 2 plus",
"BAC-L22": "Huawei nova 2 plus",
"BAC-L21": "Huawei nova 2 plus",
"BAC-L03": "Huawei nova 2 plus",
"BAC-AL00": "Huawei Nova 2 Plus",
"AUM-L41": "Huawei Honor 7C (Enjoy 8)",
"AUM-L29": "Huawei Honor 7A Pro",
"ATU-LX3": "Huawei Y6 2018",
"ATU-L42": "Huawei Y6 Prime 2018",
"ATU-L22": "Huawei Y6 2018",
"ATU-L21": "Huawei Y6 2018",
"ATU-L11": "Huawei Y6 2018",
"ATU-L03": "Huawei Y6 2018",
"ATU-AL10": "Huawei Enjoy 8e",
"Atomu-L21": "Huawei Y6 Prime 2018",
"Atomu-L03": "Huawei Honor 7A",
"Atomu-AL20IND": "Huawei Honor 7A",
"ATH-UL06": "Huawei ShotX",
"ATH-UL01": "Huawei ShotX",
"ATH-UL00": "Huawei Honor 7i",
"ATH-TL00": "Huawei Honor 7i",
"ATH-CL00": "Huawei Honor 7i",
"ATH-AL00": "Huawei Honor 7i",
"ASKH-TL00": "Honor Play 3",
"ASKH-AL00": "Honor Play 3",
"ARTH-TL00": "Huawei Enjoy 10",
"ARTH-L29N": "Huawei Y7p",
"ARTH-L29": "Huawei Y7p",
"ARTH-L28": "Huawei Y7p",
"ARTH-L09": "Huawei Enjoy 10",
"ARTH-L08": "Huawei Enjoy 10",
"ARTH-AL00M": "Huawei Enjoy 10",
"ARTH-AL00": "Huawei Enjoy 10",
"ARS-TL00": "Huawei Enjoy 9 Max",
"ARS-L22": "Huawei Y Max",
"Ares-L22HW": "Huawei Y Max",
"ARE-TL00": "Huawei Honor 8X Max",
"ARE-L22HN": "Huawei Honor 8X Max",
"AQM-TL00": "Huawei Enjoy 10s",
"AQM-L21A": "Huawei Y8P",
"AQM-L01": "Huawei Y8p",
"AQM-AL10HN": "Honor Play 4T Pro",
"AQM-AL00": "Huawei Enjoy 10s",
"ANG-AN00": "Huawei nova 8 5G",
"ANE-TL00": "Huawei P20 lite",
"ANE-LX3": "Huawei P20 Lite",
"ANE-LX2JOT": "Huawei P20 Lite",
"ANE-LX2J": "Huawei P20 Lite",
"ANE-LX2": "Huawei Nova 3e",
"ANE-LX1": "Huawei P20 Lite",
"ANE-LGRP1": "Huawei P20 Lite",
"ANE-L21": "Huawei P20 Lite",
"ANE-L12JPZ": "Huawei Nova 3e",
"ANE-L12": "Huawei Nova 3e",
"ANE-L03": "Huawei Nova 3e",
"ANE-L02J": "Huawei Nova 3e",
"ANE-L02": "Huawei Nova 3e",
"ANE-AL00I": "Huawei P20 Lite",
"ANE-AL00": "Huawei P20 Lite",
"ANA-TN00": "Huawei P40",
"ANA-N29": "Huawei P40",
"ANA-L04": "Huawei P40",
"ANA-AN00": "Huawei P40",
"ANA-AL00": "Huawei P40 4G",
"AMN-L29": "Huawei Y5 (2019)",
"AMN-L22": "Huawei Y5 (2019)",
"AMN-L09": "Huawei Y5 (2019)",
"ALP-TL00ZZB51": "Huawei Mate 10",
"ALP-TL00B": "Huawei Mate 10",
"ALP-TL00": "Huawei Mate 10",
"ALP-LGRP2": "Huawei Mate 10",
"ALP-LGRP1": "Huawei Mate 10",
"ALP-L29": "Huawei Mate 10",
"ALP-L09": "Huawei Mate 10",
"ALP-AL00ZZB54": "Huawei Mate 10",
"ALP-AL00ZZB02": "Huawei Mate 10",
"ALP-AL00": "Huawei Mate 10",
"ALE-TL00": "Huawei P8 Lite",
"ALE-L32": "Huawei P8 Lite",
"ALE-L23URY": "Huawei P8 Lite",
"ALE-L23": "Huawei P8 Lite",
"ALE-L21TUR": "Huawei P8 Lite",
"ALE-L21S": "Huawei P8 Lite",
"ALE-L21POL": "Huawei P8 Lite",
"ALE-L21MKD": "Huawei P8 Lite",
"ALE-L21HUN": "Huawei P8 Lite",
"ALE-L21HR": "Huawei P8 Lite",
"ALE-L21GR": "Huawei P8 Lite",
"ALE-L21FRA": "Huawei P8 Lite",
"ALE-L21DEU": "Huawei P8 Lite",
"ALE-L21AUT": "Huawei P8 Lite",
"ALE-L21": "Huawei P8 Lite",
"ALE-L03": "Huawei P8 Lite",
"ALE-L02": "Huawei P8 Lite",
"ALE-L01": "Huawei P8 Lite",
"ALE-CL00": "Huawei P8 Lite",
"AKA-L29": "Honor Play 4T",
"AKA-AL20": "Honor Play 4T",
"AKA-AL10": "Honor Play 4T",
"AGS3K-W10": "Huawei MatePad T 10s",
"AGS3K-W09": "Huawei MatePad T 10s",
"AGS3K-L09": "Huawei MatePad T 10s",
"AGS3-W09HN": "Huawei Enjoy Tablet 2",
"AGS3-W09": "Huawei MatePad T 10s",
"AGS3-W00E": "Huawei Enjoy Tablet 2",
"AGS3-W00D": "Huawei Enjoy Tablet 2",
"AGS3-W00B": "Huawei Enjoy Tablet 2",
"AGS3-L09": "Huawei MatePad T 10s",
"AGS3-AL09HN": "Huawei Enjoy Tablet 2",
"AGS3-AL00": "Huawei Enjoy Tablet 2",
"AGS2-W09HN": "Huawei MediaPad T5",
"AGS2-W09AUS": "Huawei MediaPad T5",
"AGS2-W09": "Huawei MediaPad T5",
"AGS2-L09": "Huawei MediaPad T5",
"AGS2-L03": "Huawei MediaPad T5",
"AGS2-AL00HN": "Huawei MediaPad T5",
"AGS2-AL00": "Honor Pad 5 10.1",
"AGS-W09": "Huawei MediaPad T3 10",
"AGS-L09": "Huawei MediaPad T3 10",
"AGRK-W09K": "Huawei MatePad T 10s",
"AGRK-W09": "Huawei AGRK-W09",
"AGRK-L09K": "Huawei MatePad T 10s",
"AGRK-L09": "Huawei MatePad T 10s",
"AGR-W09K": "Honor Pad X6",
"AGR-W09HN": "Huawei Enjoy Tablet 2",
"AGR-W09": "Honor Pad X6",
"AGR-L09": "Huawei MatePad T 10s",
"AGR-AL09HN": "Honor Pad X6",
"7D-504L": "Huawei MediaPad X1 7.0",
"7D-501u": "Huawei MediaPad X1 7.0",
"7D-501L": "Huawei MediaPad X1 7.0",
"704HW": "Huawei Nova Lite 2",
"608HW": "Huawei nova lite",
"NOP-AN01P": "Huawei Mate 40 Pro+",
"NOP-AN00P": "Huawei Mate 40 Pro+",
"NOP-AN00": "Huawei Mate 40 Pro+",
"NOH-N29": "Huawei Mate 40 Pro",
"NOH-AN01": "Huawei Mate 40 Pro",
"NOH-AN00": "Huawei Mate 40 Pro",
"NMO-L31": "Huawei GT3",
"NMO-L22": "Huawei GT3",
"NMO-L02": "Huawei NMO-L02",
"NICE-TL10": "Huawei Nice-TL10",
"NICE-AL10": "Huawei Nice-AL10",
"NICE-AL00": "Huawei Nice-AL00",
"NEO-L29": "Huawei Mate RS",
"NEN-L23CQ": "Huawei nova 8 5G",
"NEN-L22CQ": "Huawei nova 8 5G",
"NEN-L21CQ": "Huawei nova 8 5G",
"NEN-L03CQ": "Huawei nova 8 5G",
"NEN-L01CQ": "Huawei nova 8 5G",
"NEM-UL10": "Huawei Honor 5C",
"NEM-TL00": "Huawei Honor 5C",
"NEM-L51": "Huawei Honor 5C",
"NEM-L22": "Huawei Honor 5C",
"NEM-L21": "Huawei HONOR 7 Lite",
"NEM-AL10": "Huawei Honor 5C",
"MXWM-TN00": "Honor 30 Youth",
"MXWM-AN00": "Honor 30 Youth",
"MT7-UL00": "Huawei Ascend Mate 7",
"MT7-TL10": "Huawei Ascend Mate7",
"MT7-TL00": "Huawei Mate 7",
"MT7-L11": "Huawei Ascend Mate7",
"MT7-L09": "Huawei Ascend Mate7",
"MT7-J1": "Huawei Ascend Mate 7",
"MT7-CL00": "Huawei Ascend Mate 7",
"MT2-L05": "Huawei Ascend Mate2",
"MT1-U06": "Huawei Ascend Mate7",
"MT-L09": "Huawei Ascend Mate7",
"MRX-W39": "Huawei MatePad Pro",
"MRX-W29": "Huawei MatePad Pro",
"MRX-W19": "Huawei MatePad Pro",
"MRX-W09": "Huawei MatePad Pro",
"MRX-AN19": "Huawei MatePad Pro 5G",
"MRX-AL19": "Huawei MatePad Pro",
"MRX-AL09": "Huawei MatePad Pro",
"MRD-TL00": "Huawei Enjoy 9e",
"MRD-LX3": "Huawei Y6 2019",
"MRD-L41A": "Huawei Y6 (2019)",
"MRD-L41": "Huawei Y6 2019",
"MRD-L23": "Huawei Y6 2019",
"MRD-L22": "Huawei Y6 Pro (2019)",
"MRD-L21A": "Huawei Y6 Pro (2019)",
"MRD-L21": "Huawei Y6 2019",
"MRD-L11": "Huawei Y6 2019",
"MRD-L01": "Huawei Y6 2019",
"MRD-AL00": "Huawei Enjoy 9e",
"MOA-TL00": "Honor Play 9A",
"MOA-L49I": "Honor Play 9A",
"MOA-L49": "Honor 9A",
"MOA-AL20": "Honor Play 9A",
"MOA-AL00": "Honor Play 9A",
"MLA-UL00": "Huawei G9 Plus",
"MLA-TL10": "Huawei G9 Plus",
"MLA-TL00": "Huawei G9 Plus",
"MLA-L13": "Huawei nova plus",
"MLA-L12": "Huawei nova plus",
"MLA-L11": "Huawei nova plus",
"MLA-L03": "Huawei Nova plus",
"MLA-L02": "Huawei Nova Plus",
"MLA-L01": "Huawei Nova Plus",
"MLA-AL10": "Huawei Nova Plus",
"MLA-AL00": "Huawei Maimang 5",
"MHA-TL00": "Huawei Mate 9",
"MHA-L29": "Huawei Mate 9",
"MHA-L09": "Huawei Mate 9",
"MHA-AL00": "Huawei Mate 9 Pro",
"MED-TL00": "Huawei Enjoy 10",
"MED-L49": "Huawei Y6p",
"MED-L29II": "Honor 9A",
"MED-L29": "Honor 9A",
"MED-L09": "Huawei Y6p",
"MED-AL20": "Honor Play 9A",
"MED-AL10": "Honor Play 9A",
"MED-AL00": "Honor Play 9A",
"Maya-U29": "Huawei Honor Maya",
"Maya-TL10": "Huawei Honor Maya",
"Maya-L41": "Huawei Y6 2017",
"Maya-L13": "Huawei Honor Maya",
"Maya-L11": "Huawei Y6 2017",
"Maya-L03": "Huawei Maya L03",
"Maya-AL10": "Huawei Honor Maya",
"MAR-TL00": "Huawei nova 4e",
"MAR-L22BX": "Huawei P30 lite",
"MAR-L22B": "Huawei P30 lite",
"MAR-L22A": "Huawei P30 lite",
"MAR-L21MEB": "Huawei P30 lite",
"MAR-L21MEA": "Huawei P30 lite",
"MAR-L21H": "Huawei P30 lite",
"MAR-L21B": "Huawei P30 lite",
"MAR-L21A": "Huawei P30 lite",
"MAR-L03A": "Huawei P30 lite",
"MAR-L01MEB": "Huawei P30 lite",
'2014215': 'Xiaomi Mi 4',
'2014712': 'Xiaomi Redmi Note',
'2014817': 'Xiaomi Redmi 2',
'2014818': 'Xiaomi Redmi 2',
'2015015': 'Xiaomi Mi 4i',
'2015051': 'Xiaomi Redmi Note 2',
'2015105': 'Xiaomi Mi 5',
'2015116': 'Xiaomi Redmi Note 3',
'2015161': 'Xiaomi Redmi Note 3',
'2015213': 'Xiaomi Mi Note 2',
'2015711': 'Xiaomi Mi 5s',
'2015816': 'Xiaomi Redmi 3',
'2016001': 'Xiaomi Mi Max',
'2016002': 'Xiaomi Mi Max',
'2016007': 'Xiaomi Mi Max',
'2016031': 'Xiaomi Redmi 3s',
'2016060': 'Xiaomi Redmi 4 (4X)',
'2016070': 'Xiaomi Mi 5s Plus',
'2016090': 'Xiaomi Redmi 4 (4X)',
'2016100': 'Xiaomi Redmi Note 4',
'2016117': 'Xiaomi Redmi 4A',
'AWM-A0': 'Xiaomi Black Shark Helo',
'DLT-A0': 'Xiaomi Black Shark 2 Pro',
'DLT-H0': 'Xiaomi Black Shark 2 Pro',
'M1803D5XA': 'Xiaomi Mi Mix 2S',
'M1803E1A': 'Xiaomi Mi 8',
'M1803E6G': 'Xiaomi Redmi S2 (Redmi Y2)',
'M1803E6H': 'Xiaomi Redmi S2 (Redmi Y2)',
'M1803E6I': 'Xiaomi Redmi S2 (Redmi Y2)',
'M1803E7SG': 'Xiaomi Redmi Note 5 AI Dual Camera',
'M1803E7SH': 'Xiaomi Redmi Note 5 AI Dual Camera',
'M1804C3CG': 'Xiaomi Redmi 6A',
'M1804C3CH': 'Xiaomi Redmi 6A',
'M1804C3CI': 'Xiaomi Redmi 6A',
'M1804C3DG': 'Xiaomi Redmi 6',
'M1804C3DH': 'Xiaomi Redmi 6',
'M1804C3DI;': 'Xiaomi Redmi 6',
'M1804D2SG': 'Xiaomi Mi A2 (Mi 6X)',
'M1804D2SI': 'Xiaomi Mi A2 (Mi 6X)',
'M1804E4A': 'Xiaomi Mi Max 3',
'M1805D1SG': 'Xiaomi Mi A2 Lite (Redmi 6 Pro)',
'M1805E10A': 'Xiaomi Pocophone F1',
'M1806E7TG': 'Xiaomi Redmi Note 6 Pro',
'M1806E7TH': 'Xiaomi Redmi Note 6 Pro',
'M1806E7TI': 'Xiaomi Redmi Note 6 Pro',
'M1807E8A': 'Xiaomi Mi 8 Pro',
'M1808D2TG': 'Xiaomi Mi 8 Lite',
'M1810F6LG': 'Xiaomi Redmi 7',
'M1810F6LH': 'Xiaomi Redmi 7',
'M1810F6LI': 'Xiaomi Redmi 7',
'M1901F71': 'Xiaomi Redmi Note 7S',
'M1901F7G': 'Xiaomi Redmi Note 7',
'M1901F7H': 'Xiaomi Redmi Note 7',
'M1901F7I': 'Xiaomi Redmi Note 7',
'M1901F7S': 'Xiaomi Redmi Note 7 Pro',
'M1901F9E': 'Xiaomi Mi Play',
'M1902F1G': 'Xiaomi Mi 9',
'M1903C3EG': 'Xiaomi Redmi 7A',
'M1903C3EH': 'Xiaomi Redmi 7A',
'M1903C3EI': 'Xiaomi Redmi 7A',
'M1903C3GG': 'Xiaomi Redmi Go',
'M1903C3GH': 'Xiaomi Redmi Go',
'M1903C3GI': 'Xiaomi Redmi Go',
'M1903F10G': 'Xiaomi Mi 9T',
'M1903F10I': 'Xiaomi Redmi K20',
'M1903F11G': 'Xiaomi Mi 9T Pro',
'M1903F2G': 'Xiaomi Mi 9 SE',
'M1904F3BG': 'Xiaomi Mi 9 Lite',
'M1906F9SH': 'Xiaomi Mi A3',
'M1906F9SI': 'Xiaomi Mi A3',
'M1906G7G': 'Xiaomi Redmi Note 8 Pro',
'M1906G7I': 'Xiaomi Redmi Note 8 Pro',
'M1908C3JG': 'Xiaomi Redmi Note 8',
'M1908C3JH': 'Xiaomi Redmi Note 8',
'M1908C3JI': 'Xiaomi Redmi Note 8',
'M1908C3KG': 'Xiaomi Redmi 8A',
'M1908C3KH': 'Xiaomi Redmi 8A',
'M1908C3XG': 'Xiaomi Redmi Note 8T',
'M1910F4E': 'Xiaomi Mi CC9 Pro',
'M1910F4G': 'Xiaomi Mi Note 10 Lite',
'M1910F4S': 'Xiaomi Mi Note 10 Pro',
'M1912G7BC': 'Xiaomi Redmi K30',
'M1912G7BE': 'Xiaomi Redmi K30',
'M2001C3K3I': 'Xiaomi Redmi 8A Dual',
'M2001J1G': 'Xiaomi Mi 10 Pro 5G',
'M2001J2G': 'Xiaomi Mi 10 5G',
'M2001J2I': 'Xiaomi Mi 10 5G',
'M2002F4LG': 'Xiaomi Mi Note 10 Lite',
'M2002J9E': 'Xiaomi Mi 10 Youth 5G',
'M2002J9G': 'Xiaomi Mi 10 Lite 5G',
'M2003J15SC': 'Xiaomi Redmi 10X 4G',
'M2003J15SG': 'Xiaomi Redmi Note 9',
'M2003J15SS': 'Xiaomi Redmi Note 9',
'M2003J6A1G': 'Xiaomi Redmi Note 9S',
'M2003J6A1I': 'Xiaomi Redmi Note 9 Pro (India)',
'M2003J6B1I': 'Xiaomi Redmi Note 9 Pro Max',
'M2004C3MI': 'Xiaomi Redmi 9 (India)',
'M2004J11G': 'Xiaomi Poco F2 Pro',
'M2004J19C': 'Xiaomi Redmi 9',
'M2004J19G': 'Xiaomi Redmi 9',
'M2010J19SI': 'Xiaomi Redmi 9 Power',
'M2004J19PI': 'Xiaomi Poco M2',
'M2004J7AC': 'Xiaomi Redmi Note 10',
'M2101K6I': 'Xiaomi Redmi Note 10 Pro Max',
'M2103K19G': 'Xiaomi Redmi Note 10 5G',
'M2004J7BC': 'Xiaomi Redmi 10X Pro 5G',
'M2006C3LC': 'Xiaomi Redmi 9A',
'M2006C3LG': 'Xiaomi Redmi 9A',
'M2006C3LI': 'Xiaomi Redmi 9A',
'M2006C3LII': 'Xiaomi Redmi 9i',
'M2006C3LVG': 'Xiaomi Redmi 9AT',
'M2006C3MG': 'Xiaomi Redmi 9C',
'M2006C3MII': 'Xiaomi Redmi 9 (India)',
'M2006C3MNG': 'Xiaomi Redmi 9C NFC',
'M2006J10C': 'Xiaomi Redmi K30 Ultra',
'M2007J17C': 'Xiaomi Redmi Note 9 Pro 5G',
'M2007J17G': 'Xiaomi Mi 10T Lite 5G',
'M2007J17I': 'Xiaomi Mi 10i',
'M2007J1SC': 'Xiaomi Mi 10 Ultra',
'M2007J20CG': 'Xiaomi Poco X3 NFC',
'M2007J20CI': 'Xiaomi Poco X3',
'M2007J20CT': 'Xiaomi Poco X3 NFC',
'M2007J22C': 'Xiaomi Redmi Note 9 5G',
'M2007J3SC': 'Xiaomi Redmi K30S',
'M2007J3SG': 'Xiaomi Mi 10T Pro 5G',
'M2007J3SY': 'Xiaomi Mi 10T 5G',
'M2010J19CG': 'Xiaomi Poco M3',
'M2010J19CI': 'Xiaomi Poco M3',
'MAE136': 'Xiaomi Redmi 4 (4X)',
'MAG138': 'Xiaomi Redmi 4 (4X)',
'MCE16': 'Xiaomi Mi 6',
'MCE8': 'Xiaomi Mi Note 3',
'MCG3B': 'Xiaomi Redmi 5A',
'MCI3B': 'Xiaomi Redmi 5A',
'MDE40': 'Xiaomi Mi Max 2',
'MDE5': 'Xiaomi Mi Mix 2',
'MDG1': 'Xiaomi Redmi 5',
'MDG2': 'Xiaomi Mi A1 (Mi 5X)',
'MDI1': 'Xiaomi Redmi 5',
'MDI2': 'Xiaomi Mi A1 (Mi 5X)',
'MDI40': 'Xiaomi Mi Max 2',
'MDI6': 'Xiaomi Redmi Y1 Lite',
'MDI6S': 'Xiaomi Redmi Y1 (Note 5A)',
'MEG7': 'Xiaomi Redmi 5 Plus (Redmi Note 5)',
'MEI7': 'Xiaomi Redmi Note 5 Pro',
'MEI7S': 'Xiaomi Redmi Note 5 Pro',
'MZB07QAIN': 'Xiaomi Poco C3',
'MZB07RHIN': 'Xiaomi Poco C3',
'MZB07RIIN': 'Xiaomi Poco C3',
'MZB07RJIN': 'Xiaomi Poco C3',
'MZB07RKIN': 'Xiaomi Poco C3',
'MZB07RLIN': 'Xiaomi Poco C3',
'MZB07Z0IN': 'Xiaomi Poco X3',
'MZB07Z1IN': 'Xiaomi Poco X3',
'MZB07Z2IN': 'Xiaomi Poco X3',
'MZB07Z3IN': 'Xiaomi Poco X3',
'MZB07Z4IN': 'Xiaomi Poco X3',
'MZB7995IN': 'Xiaomi Redmi 7A',
'MZB8458IN': 'Xiaomi Redmi 8A',
'MZB8741IN': 'Xiaomi Poco X2',
'MZB8742IN': 'Xiaomi Poco X2',
'MZB8743IN': 'Xiaomi Poco X2',
'MZB8744IN': 'Xiaomi Poco X2',
'MZB8745IN': 'Xiaomi Poco X2',
'MZB8746IN': 'Xiaomi Poco X2',
'MZB9011IN': 'Xiaomi Poco X2',
'MZB9012IN': 'Xiaomi Poco X2',
'MZB9013IN': 'Xiaomi Poco X2',
'MZB9919IN': 'Xiaomi Poco M2',
'MZB9965IN': 'Xiaomi Poco X3',
'SHARK MBU-A0': 'Xiaomi Black Shark 3 Pro',
'SHARK MBU-H0': 'Xiaomi Black Shark 3 Pro',
'SKW-A0': 'Xiaomi Black Shark 2',
'SKW-H0': 'Xiaomi Black Shark 2',
"RMX1931": "Realme X",
"RMX1901": "Realme X",
"RMX1941": "Realme C2",
"RMX2156": "Realme Narzo 30",
"RMX3360": "Realme GT Master Edition",
"RMX1851": "Realme 3 Pro",
"RMX2030": "Realme 5i",
"BBG100-1": "BlackBerry Evolve",
"ASUS_X00TD": "ASUS Zenfone Max Pro M1",
"ASUS_Z017DB": "ASUS Zenfone 3",
"ASUS_X00HD": "Asus Zenfone 4 Max",
"ASUS_X00TDA": "ASUS Zenfone Max Pro M1",
"ASUS_I01WD": "Asus Zenfone 6",
"ASUS_Z01RD": "Asus Zenfone 5Z",
"ZS630KL": "Asus Zenfone 6",
"I01WD": "Asus Zenfone 6",
"V2037": "vivo Y20G",
"I2012": "vivo"
}
| true | true |
1c4a31324e044f38931eecf708b21caa5af9f43f | 1,130 | py | Python | Esophagus/genMaskedImg.py | mintanwei/artificial-intelligence-for-classification-and-segmentation-of-esophagus-precancerous-lesions | bb7bdc06f2426e99fb16f17bc081993e55db9a81 | [
"Apache-2.0"
] | null | null | null | Esophagus/genMaskedImg.py | mintanwei/artificial-intelligence-for-classification-and-segmentation-of-esophagus-precancerous-lesions | bb7bdc06f2426e99fb16f17bc081993e55db9a81 | [
"Apache-2.0"
] | null | null | null | Esophagus/genMaskedImg.py | mintanwei/artificial-intelligence-for-classification-and-segmentation-of-esophagus-precancerous-lesions | bb7bdc06f2426e99fb16f17bc081993e55db9a81 | [
"Apache-2.0"
] | null | null | null | import skimage.io
import os
import numpy as np
import cv2
input_img_dir = './dataset/endoscope/val/img'
input_mask_dir = './result/multi_task_99/mask_dialated3_6_1/128.0'
output_dir = './128.0_output'
T = 0.5
L = '_2'
if not os.path.exists(output_dir):
os.mkdir(output_dir)
overlay_color_mask = [255, 0, 0]
transparency = 0
transparency = 1 - transparency
names = os.listdir(input_img_dir)
for name in names:
name = os.path.splitext(name)
img = skimage.io.imread(os.path.join(input_img_dir, name[0] + name[1]))
img = cv2.imdecode(np.fromfile(os.path.join(input_img_dir, name[0] + name[1]), dtype=np.uint8), -1)
mask = cv2.imdecode(np.fromfile(os.path.join(input_mask_dir, name[0] + L + name[1]), dtype=np.uint8), -1)
ret_mask, binary_mask = cv2.threshold(mask, 127, 255, cv2.THRESH_BINARY)
binary_mask, contours_mask, hierarchy_mask = cv2.findContours(binary_mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cv2.drawContours(img, contours_mask, -1, overlay_color_mask, 3)
cv2.imencode('.png', img)[1].tofile(os.path.join(output_dir, name[0] + L + name[1])) | 34.242424 | 119 | 0.69646 | import skimage.io
import os
import numpy as np
import cv2
input_img_dir = './dataset/endoscope/val/img'
input_mask_dir = './result/multi_task_99/mask_dialated3_6_1/128.0'
output_dir = './128.0_output'
T = 0.5
L = '_2'
if not os.path.exists(output_dir):
os.mkdir(output_dir)
overlay_color_mask = [255, 0, 0]
transparency = 0
transparency = 1 - transparency
names = os.listdir(input_img_dir)
for name in names:
name = os.path.splitext(name)
img = skimage.io.imread(os.path.join(input_img_dir, name[0] + name[1]))
img = cv2.imdecode(np.fromfile(os.path.join(input_img_dir, name[0] + name[1]), dtype=np.uint8), -1)
mask = cv2.imdecode(np.fromfile(os.path.join(input_mask_dir, name[0] + L + name[1]), dtype=np.uint8), -1)
ret_mask, binary_mask = cv2.threshold(mask, 127, 255, cv2.THRESH_BINARY)
binary_mask, contours_mask, hierarchy_mask = cv2.findContours(binary_mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cv2.drawContours(img, contours_mask, -1, overlay_color_mask, 3)
cv2.imencode('.png', img)[1].tofile(os.path.join(output_dir, name[0] + L + name[1])) | true | true |
1c4a321cc9d033f7439e19db2a7247d1847aba3d | 3,334 | py | Python | api/group.py | D3AdCa7/CTF-Platform | 1b4b66f3a5f25f69dcd53d233718276607bed8ac | [
"MIT"
] | 4 | 2016-03-15T14:29:13.000Z | 2019-03-26T09:39:19.000Z | api/group.py | D3AdCa7/CTF-Platform | 1b4b66f3a5f25f69dcd53d233718276607bed8ac | [
"MIT"
] | null | null | null | api/group.py | D3AdCa7/CTF-Platform | 1b4b66f3a5f25f69dcd53d233718276607bed8ac | [
"MIT"
] | 3 | 2016-03-15T14:28:32.000Z | 2019-01-28T06:05:56.000Z | __author__ = "Collin Petty"
__copyright__ = "Carnegie Mellon University"
__license__ = "MIT"
__maintainer__ = ["Collin Petty", "Peter Chapman"]
__credits__ = ["David Brumely", "Collin Petty", "Peter Chapman", "Tyler Nighswander", "Garrett Barboza"]
__email__ = ["[email protected]", "[email protected]"]
__status__ = "Production"
from common import db
import common
def get_group_membership(tid):
"""Get the group membership for a team.
Find all groups to which a tid is an owner then add all groups to which a user is just a member.
"""
groups = list()
owners = set()
for g in list(db.groups.find({'owners': tid}, {'name': 1, 'gid': 1})):
groups.append({'name': g['name'],
'gid': g['gid'],
'owner': True})
owners.add(g['gid'])
groups += filter(lambda g: g['gid'] not in owners,
({'name': g['name'],
'gid': g['gid'],
'owner': False} for g in list(db.groups.find({'members': tid}, {'name': 1, 'gid': 1}))))
return groups
def create_group(tid, gname):
"""Create a new group.
Get a groupname posted from a logged in user. Check to see if the group exists, if it does notify the user.
If the group does not exist create it and add the user as a member/owner.
"""
if gname == '':
return {'status': 0, 'message': "The group name cannot be empty!"}
if db.groups.find_one({'name': gname}) is not None:
return {'status': 2, 'message': "This group exists, would you like to join it?"}
db.groups.insert({"name": gname, "owners": [tid], "members": [tid], "gid": common.token()})
return {'status': 1, 'message': "Successfully created the group"}
def join_group(tid, gname):
"""Join a group.
Get a groupname posted from a logged in user. Errors if the name is empty. Search db for the non-empty group
name, if no group with that name exists and error is returned. If a group is found, we query db to see if the
user is already a member/owner, if either, error. If we haven't error so far add the user as a member to the group
and return a status=1 for success.
"""
if gname == '':
return {'status': 0, 'message': "The group name cannot be empty!"}
group = db.groups.find_one({'name': gname})
if group is None:
return {'status': 3, 'message': "Cannot find group '%s', create it?" % gname}
if db.groups.find({'gid': group['gid'], '$or': [{'owners': tid},
{'members': tid}]}).count() != 0:
return {'status': 2, 'message': "You are already in '%s'." % gname}
db.groups.update({'gid': group['gid']}, {'$push': {'members': tid}})
return {'status': 1, 'message': "Success! You have been added to '%s'." % gname}
def leave_group(tid, gid):
"""Removes the current team from a group"""
if gid is None:
return {'status': 0, 'message': "No group id passed."}
if db.groups.find_one({'gid': gid}) is None:
return {'status': 0, 'message': "Internal error, group not found."}
db.groups.update({'gid': gid}, {'$pull': {'owners': tid}})
db.groups.update({'gid': gid}, {'$pull': {'members': tid}})
return {'status': 1, 'message': "You have successfully been removed from the group."}
| 43.868421 | 119 | 0.595081 | __author__ = "Collin Petty"
__copyright__ = "Carnegie Mellon University"
__license__ = "MIT"
__maintainer__ = ["Collin Petty", "Peter Chapman"]
__credits__ = ["David Brumely", "Collin Petty", "Peter Chapman", "Tyler Nighswander", "Garrett Barboza"]
__email__ = ["[email protected]", "[email protected]"]
__status__ = "Production"
from common import db
import common
def get_group_membership(tid):
groups = list()
owners = set()
for g in list(db.groups.find({'owners': tid}, {'name': 1, 'gid': 1})):
groups.append({'name': g['name'],
'gid': g['gid'],
'owner': True})
owners.add(g['gid'])
groups += filter(lambda g: g['gid'] not in owners,
({'name': g['name'],
'gid': g['gid'],
'owner': False} for g in list(db.groups.find({'members': tid}, {'name': 1, 'gid': 1}))))
return groups
def create_group(tid, gname):
if gname == '':
return {'status': 0, 'message': "The group name cannot be empty!"}
if db.groups.find_one({'name': gname}) is not None:
return {'status': 2, 'message': "This group exists, would you like to join it?"}
db.groups.insert({"name": gname, "owners": [tid], "members": [tid], "gid": common.token()})
return {'status': 1, 'message': "Successfully created the group"}
def join_group(tid, gname):
if gname == '':
return {'status': 0, 'message': "The group name cannot be empty!"}
group = db.groups.find_one({'name': gname})
if group is None:
return {'status': 3, 'message': "Cannot find group '%s', create it?" % gname}
if db.groups.find({'gid': group['gid'], '$or': [{'owners': tid},
{'members': tid}]}).count() != 0:
return {'status': 2, 'message': "You are already in '%s'." % gname}
db.groups.update({'gid': group['gid']}, {'$push': {'members': tid}})
return {'status': 1, 'message': "Success! You have been added to '%s'." % gname}
def leave_group(tid, gid):
if gid is None:
return {'status': 0, 'message': "No group id passed."}
if db.groups.find_one({'gid': gid}) is None:
return {'status': 0, 'message': "Internal error, group not found."}
db.groups.update({'gid': gid}, {'$pull': {'owners': tid}})
db.groups.update({'gid': gid}, {'$pull': {'members': tid}})
return {'status': 1, 'message': "You have successfully been removed from the group."}
| true | true |
1c4a353f8eb312454c2ccc0840d0e804a826813f | 1,475 | py | Python | tests/stores/test_ssh_tunnel.py | materialsproject/maggflow | 9f8d7a0865ec13212a3fd00d5edebd3cb7b40e7d | [
"BSD-3-Clause-LBNL"
] | 15 | 2017-06-15T16:35:23.000Z | 2022-03-05T09:57:02.000Z | tests/stores/test_ssh_tunnel.py | materialsproject/maggflow | 9f8d7a0865ec13212a3fd00d5edebd3cb7b40e7d | [
"BSD-3-Clause-LBNL"
] | 573 | 2017-06-14T15:54:27.000Z | 2022-03-31T23:20:55.000Z | tests/stores/test_ssh_tunnel.py | rkingsbury/maggma | 53def068df1cb410bfe91e7045903997813e173a | [
"BSD-3-Clause-LBNL"
] | 28 | 2017-06-14T20:50:26.000Z | 2022-03-04T16:56:40.000Z | import asyncio
import paramiko
import pymongo
import pytest
from monty.serialization import dumpfn, loadfn
from paramiko.ssh_exception import (
AuthenticationException,
NoValidConnectionsError,
SSHException,
)
from maggma.stores.mongolike import MongoStore, SSHTunnel
@pytest.fixture
def ssh_server_available():
"""
Fixture to determine if an SSH server is available
to test the SSH tunnel
"""
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
try:
client.connect("127.0.0.1", 22)
client.close()
except (AuthenticationException, NoValidConnectionsError, SSHException):
pytest.skip("No SSH server to test tunnel against")
def test_mongostore_connect_via_ssh(ssh_server_available):
server = SSHTunnel("127.0.0.1:22", "127.0.0.1:27017")
mongostore = MongoStore("maggma_test", "test", ssh_tunnel=server)
mongostore.connect()
assert isinstance(mongostore._collection, pymongo.collection.Collection)
mongostore.remove_docs({})
assert mongostore.count() == 0
mongostore.update([{"task_id": 0}])
assert mongostore.count() == 1
mongostore.remove_docs({})
mongostore.close()
def test_serialization(tmpdir, ssh_server_available):
tunnel = SSHTunnel("127.0.0.1:22", "127.0.0.1:27017")
dumpfn(tunnel, tmpdir / "tunnel.json")
new_tunnel = loadfn(tmpdir / "tunnel.json")
assert isinstance(new_tunnel, SSHTunnel)
| 27.314815 | 76 | 0.721356 | import asyncio
import paramiko
import pymongo
import pytest
from monty.serialization import dumpfn, loadfn
from paramiko.ssh_exception import (
AuthenticationException,
NoValidConnectionsError,
SSHException,
)
from maggma.stores.mongolike import MongoStore, SSHTunnel
@pytest.fixture
def ssh_server_available():
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
try:
client.connect("127.0.0.1", 22)
client.close()
except (AuthenticationException, NoValidConnectionsError, SSHException):
pytest.skip("No SSH server to test tunnel against")
def test_mongostore_connect_via_ssh(ssh_server_available):
server = SSHTunnel("127.0.0.1:22", "127.0.0.1:27017")
mongostore = MongoStore("maggma_test", "test", ssh_tunnel=server)
mongostore.connect()
assert isinstance(mongostore._collection, pymongo.collection.Collection)
mongostore.remove_docs({})
assert mongostore.count() == 0
mongostore.update([{"task_id": 0}])
assert mongostore.count() == 1
mongostore.remove_docs({})
mongostore.close()
def test_serialization(tmpdir, ssh_server_available):
tunnel = SSHTunnel("127.0.0.1:22", "127.0.0.1:27017")
dumpfn(tunnel, tmpdir / "tunnel.json")
new_tunnel = loadfn(tmpdir / "tunnel.json")
assert isinstance(new_tunnel, SSHTunnel)
| true | true |
1c4a35ff9382304504947b41d126b9b2e6bc6f14 | 4,332 | py | Python | ray_exp.py | vakker/spg-experiments | 4824861c3ac66387078023c14ead47ba9a9e6c72 | [
"MIT"
] | null | null | null | ray_exp.py | vakker/spg-experiments | 4824861c3ac66387078023c14ead47ba9a9e6c72 | [
"MIT"
] | null | null | null | ray_exp.py | vakker/spg-experiments | 4824861c3ac66387078023c14ead47ba9a9e6c72 | [
"MIT"
] | 2 | 2021-02-15T11:12:27.000Z | 2021-04-20T17:15:10.000Z | import argparse
from datetime import datetime
import ray
from ray import tune
from ray.tune import CLIReporter
from ray.tune.suggest.variant_generator import grid_search
from spg_experiments import models
from spg_experiments.gym_env import PlaygroundEnv
def exp_name(prefix):
return prefix + '.' + datetime.now().strftime("%Y-%m-%d.%H:%M:%S")
class E(dict):
def keys(self):
return []
def trial_str_creator(trial):
params = {
k.split('/')[-1]: p[-1] if isinstance(p, list) else str(p)
for k, p in trial.evaluated_params.items()
}
name = '-'.join([f'{k}:{p}' for k, p in params.items()])
return f'trial-{name}'
def main(args):
ray.init(local_mode=args.local)
config = {
"num_workers": args.num_workers, # parallelism
"num_envs_per_worker": 2,
"num_cpus_per_worker": 0.5,
"evaluation_num_workers": args.num_workers,
# "evaluation_config": {
# },
"evaluation_interval": 10,
"env": PlaygroundEnv,
"output": "logdir",
"env_config": {
"agent_type": "base",
# "index_exp": grid_search([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]),
"playground_name": grid_search([
["foraging", "candy_collect"],
["foraging", "candy_fireballs"],
['navigation', 'endgoal_cue'],
['sequential', 'door_dispenser_coin'],
]),
"sensors_name": grid_search([
"blind",
"rgb",
"depth",
"rgb_depth",
"rgb_touch",
"rgb_depth_touch",
]),
# "multisteps": grid_search([0, 2, 3, 4])
# "multisteps": 0
},
"num_gpus": 0.5 if args.gpu else 0,
"framework": "torch",
"gamma": grid_search([0.1, 0.2, 0.5, 0.8, 0.99]), # checked
"lr": grid_search([0.001, 0.0001, 0.00001]),
"lambda": 0.95, # checked
# "kl_coeff": 0.5, # ?
"clip_rewards": False,
"clip_param": 0.2, # checked?
"grad_clip": 0.5, # checked
# "vf_clip_param": 10, # checked, it's None in SB, 10 in RLlib
"vf_loss_coeff": 0.0001, # checked
"entropy_coeff": grid_search([0.05, 0.01, 0.005, 0.001]), # checked
"train_batch_size": 128 * 10 * 8, # checked, but check the *4*2
"sgd_minibatch_size": 128, # could be larger
"num_sgd_iter": 4, # checked?
"batch_mode": "truncate_episodes",
"observation_filter": "NoFilter",
"model": {
"custom_model": "vision-1d",
"conv_filters": [
[64, 5, 3],
[64, 3, 2],
[64, 3, 2],
[128, 3, 2],
[128, 3, 2],
# [128, 3, 2],
],
"use_lstm": grid_search([True, False]),
},
}
stop = {"timesteps_total": args.stop_timesteps}
if args.stop_iters:
stop.update({"training_iteration": args.stop_iters})
if args.stop_reward:
stop.update({"episode_reward_mean": args.stop_reward})
name = exp_name('PPO')
reporter = CLIReporter(parameter_columns=E({"_": "_"}))
results = tune.run(
args.run,
config=config,
stop=stop,
local_dir=args.logdir,
checkpoint_at_end=True,
checkpoint_freq=1,
keep_checkpoints_num=2,
trial_name_creator=trial_str_creator,
trial_dirname_creator=trial_str_creator,
progress_reporter=reporter,
name=name,
max_failures=3,
verbose=1)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--logdir", type=str, default="logs")
parser.add_argument("--run", type=str, default="PPO")
parser.add_argument("--as-test", action="store_true")
parser.add_argument("--local", action="store_true")
parser.add_argument("--gpu", action="store_true")
parser.add_argument("--monitor", action="store_true")
parser.add_argument("--num-workers", type=int, default=5)
parser.add_argument("--stop-timesteps", type=int, default=1000000)
parser.add_argument("--stop-iters", type=int)
parser.add_argument("--stop-reward", type=float)
args = parser.parse_args()
main(args)
| 32.328358 | 76 | 0.557248 | import argparse
from datetime import datetime
import ray
from ray import tune
from ray.tune import CLIReporter
from ray.tune.suggest.variant_generator import grid_search
from spg_experiments import models
from spg_experiments.gym_env import PlaygroundEnv
def exp_name(prefix):
return prefix + '.' + datetime.now().strftime("%Y-%m-%d.%H:%M:%S")
class E(dict):
def keys(self):
return []
def trial_str_creator(trial):
params = {
k.split('/')[-1]: p[-1] if isinstance(p, list) else str(p)
for k, p in trial.evaluated_params.items()
}
name = '-'.join([f'{k}:{p}' for k, p in params.items()])
return f'trial-{name}'
def main(args):
ray.init(local_mode=args.local)
config = {
"num_workers": args.num_workers, "num_envs_per_worker": 2,
"num_cpus_per_worker": 0.5,
"evaluation_num_workers": args.num_workers,
"evaluation_interval": 10,
"env": PlaygroundEnv,
"output": "logdir",
"env_config": {
"agent_type": "base",
"playground_name": grid_search([
["foraging", "candy_collect"],
["foraging", "candy_fireballs"],
['navigation', 'endgoal_cue'],
['sequential', 'door_dispenser_coin'],
]),
"sensors_name": grid_search([
"blind",
"rgb",
"depth",
"rgb_depth",
"rgb_touch",
"rgb_depth_touch",
]),
},
"num_gpus": 0.5 if args.gpu else 0,
"framework": "torch",
"gamma": grid_search([0.1, 0.2, 0.5, 0.8, 0.99]), "lr": grid_search([0.001, 0.0001, 0.00001]),
"lambda": 0.95, "clip_rewards": False,
"clip_param": 0.2, "grad_clip": 0.5, "vf_loss_coeff": 0.0001, # checked
"entropy_coeff": grid_search([0.05, 0.01, 0.005, 0.001]), # checked
"train_batch_size": 128 * 10 * 8, # checked, but check the *4*2
"sgd_minibatch_size": 128, # could be larger
"num_sgd_iter": 4, # checked?
"batch_mode": "truncate_episodes",
"observation_filter": "NoFilter",
"model": {
"custom_model": "vision-1d",
"conv_filters": [
[64, 5, 3],
[64, 3, 2],
[64, 3, 2],
[128, 3, 2],
[128, 3, 2],
# [128, 3, 2],
],
"use_lstm": grid_search([True, False]),
},
}
stop = {"timesteps_total": args.stop_timesteps}
if args.stop_iters:
stop.update({"training_iteration": args.stop_iters})
if args.stop_reward:
stop.update({"episode_reward_mean": args.stop_reward})
name = exp_name('PPO')
reporter = CLIReporter(parameter_columns=E({"_": "_"}))
results = tune.run(
args.run,
config=config,
stop=stop,
local_dir=args.logdir,
checkpoint_at_end=True,
checkpoint_freq=1,
keep_checkpoints_num=2,
trial_name_creator=trial_str_creator,
trial_dirname_creator=trial_str_creator,
progress_reporter=reporter,
name=name,
max_failures=3,
verbose=1)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--logdir", type=str, default="logs")
parser.add_argument("--run", type=str, default="PPO")
parser.add_argument("--as-test", action="store_true")
parser.add_argument("--local", action="store_true")
parser.add_argument("--gpu", action="store_true")
parser.add_argument("--monitor", action="store_true")
parser.add_argument("--num-workers", type=int, default=5)
parser.add_argument("--stop-timesteps", type=int, default=1000000)
parser.add_argument("--stop-iters", type=int)
parser.add_argument("--stop-reward", type=float)
args = parser.parse_args()
main(args)
| true | true |
1c4a375ec65d60b933b68a9c2cccd74c1d4c9d3c | 2,168 | py | Python | test_scripts/B3_VanillaBSM_tests.py | Abhi1588/PricingToolBox | 2c0bded1a6374c481113c972c819101df043d9f2 | [
"MIT"
] | null | null | null | test_scripts/B3_VanillaBSM_tests.py | Abhi1588/PricingToolBox | 2c0bded1a6374c481113c972c819101df043d9f2 | [
"MIT"
] | null | null | null | test_scripts/B3_VanillaBSM_tests.py | Abhi1588/PricingToolBox | 2c0bded1a6374c481113c972c819101df043d9f2 | [
"MIT"
] | null | null | null |
def main():
spot = 100
strike = 100
maturity = 1
rate = 0.02
dividend = 0
vol = .05
# Put Call Parity
put = europeanPutOptionPrice(spot,strike,maturity,rate,dividend,vol)
call = europeanCallOptionPrice(spot,strike,maturity,rate,dividend,vol)
fwd = forwardPrice(spot,strike,maturity,rate,dividend)
print("Put Call Parity \nCall :{} - Put :{} = {} \nForward: {}".format(call,put,call-put,fwd))
print("+"*20)
#Price of call is monotonically decreasing in strike
lStrike = []
lcallPrice = []
for i in range(strike-90, strike+110, 10):
lStrike.append(i)
lcallPrice.append(europeanCallOptionPrice(spot,i,maturity,rate,dividend,vol))
fig, ax = plt.subplots()
ax.plot(lStrike, lcallPrice, label = "call price")
ax.set_xlabel('strikes') # Add an x-label to the axes.
ax.set_ylabel('option price') # Add a y-label to the axes.
ax.set_title("Call Option Price vs Strike") # Add a title to the axes.
ax.legend()
plt.show()
print("+"*20)
#Price of call is between S and S - k e^(-rt)
f = spot - strike*math.exp(-rate*maturity)
if f < call and call < spot:
print("True")
print("+"*20)
#Price of call is monotonically increasign in vol
lvol = []
lcallPrice = []
for i in np.arange(vol*.5, vol*1.5, 0.005):
lvol.append(i)
lcallPrice.append(europeanCallOptionPrice(spot,strike,maturity,rate,dividend,i))
fig, ax = plt.subplots()
ax.plot(lvol, lcallPrice, label = "call price")
ax.set_xlabel('vol') # Add an x-label to the axes.
ax.set_ylabel('option price') # Add a y-label to the axes.
ax.set_title("Call Option Price vs vol") # Add a title to the axes.
ax.legend()
plt.show()
print("+"*20)
Dcall = digitalCall(spot,strike,maturity,rate,dividend,vol)
Dput = digitalPut(spot,strike,maturity,rate,dividend,vol)
zcb = zerocouponbond(rate,maturity)
call_short = europeanCallOptionPrice(spot,strike+1,maturity,rate,dividend,vol)
spread = call - call_short
print(Dcall+Dput, "ZCB : {}".format(zcb))
print(Dcall,"Spread: {}".format(spread))
| 30.971429 | 98 | 0.642528 |
def main():
spot = 100
strike = 100
maturity = 1
rate = 0.02
dividend = 0
vol = .05
put = europeanPutOptionPrice(spot,strike,maturity,rate,dividend,vol)
call = europeanCallOptionPrice(spot,strike,maturity,rate,dividend,vol)
fwd = forwardPrice(spot,strike,maturity,rate,dividend)
print("Put Call Parity \nCall :{} - Put :{} = {} \nForward: {}".format(call,put,call-put,fwd))
print("+"*20)
lStrike = []
lcallPrice = []
for i in range(strike-90, strike+110, 10):
lStrike.append(i)
lcallPrice.append(europeanCallOptionPrice(spot,i,maturity,rate,dividend,vol))
fig, ax = plt.subplots()
ax.plot(lStrike, lcallPrice, label = "call price")
ax.set_xlabel('strikes') ax.set_ylabel('option price') ax.set_title("Call Option Price vs Strike") ax.legend()
plt.show()
print("+"*20)
f = spot - strike*math.exp(-rate*maturity)
if f < call and call < spot:
print("True")
print("+"*20)
lvol = []
lcallPrice = []
for i in np.arange(vol*.5, vol*1.5, 0.005):
lvol.append(i)
lcallPrice.append(europeanCallOptionPrice(spot,strike,maturity,rate,dividend,i))
fig, ax = plt.subplots()
ax.plot(lvol, lcallPrice, label = "call price")
ax.set_xlabel('vol') ax.set_ylabel('option price') ax.set_title("Call Option Price vs vol") ax.legend()
plt.show()
print("+"*20)
Dcall = digitalCall(spot,strike,maturity,rate,dividend,vol)
Dput = digitalPut(spot,strike,maturity,rate,dividend,vol)
zcb = zerocouponbond(rate,maturity)
call_short = europeanCallOptionPrice(spot,strike+1,maturity,rate,dividend,vol)
spread = call - call_short
print(Dcall+Dput, "ZCB : {}".format(zcb))
print(Dcall,"Spread: {}".format(spread))
| true | true |
1c4a3ad80df40d707d4b56018ddf73806353244b | 1,790 | py | Python | config/settings/test.py | iamjdcollins/districtwebsite | 89e2aea47ca3d221665bc23586a4374421be5800 | [
"MIT"
] | null | null | null | config/settings/test.py | iamjdcollins/districtwebsite | 89e2aea47ca3d221665bc23586a4374421be5800 | [
"MIT"
] | null | null | null | config/settings/test.py | iamjdcollins/districtwebsite | 89e2aea47ca3d221665bc23586a4374421be5800 | [
"MIT"
] | null | null | null | from .base import *
ENVIRONMENT_MODE = 'test'
ALLOWED_HOSTS += [
'backman-test.slcschools.org',
'beaconheights-test.slcschools.org',
'bennion-test.slcschools.org',
'bonneville-test.slcschools.org',
'bryant-test.slcschools.org',
'clayton-test.slcschools.org',
'dilworth-test.slcschools.org',
'east-test.slcschools.org',
'edison-test.slcschools.org',
'emerson-test.slcschools.org',
'ensign-test.slcschools.org',
'escalante-test.slcschools.org',
'franklin-test.slcschools.org',
'glendale-test.slcschools.org',
'hawthorne-test.slcschools.org',
'highland-test.slcschools.org',
'highlandpark-test.slcschools.org',
'hillside-test.slcschools.org',
'horizonte-test.slcschools.org',
'horizonte-test.slcschools.org',
'indianhills-test.slcschools.org',
'innovations-test.slcschools.org',
'innovations-test.slcschools.org',
'liberty-test.slcschools.org',
'maryjackson-test.slcschools.org',
'meadowlark-test.slcschools.org',
'mountainview-test.slcschools.org',
'newman-test.slcschools.org',
'nibleypark-test.slcschools.org',
'northstar-test.slcschools.org',
'northwest-test.slcschools.org',
'parkview-test.slcschools.org',
'riley-test.slcschools.org',
'rosepark-test.slcschools.org',
'uintah-test.slcschools.org',
'wasatch-test.slcschools.org',
'washington-test.slcschools.org',
'websites-test.slcschools.org',
'west-test.slcschools.org',
'whittier-test.slcschools.org',
'www-test.ocslc.org',
'www-test.saltlakespa.org',
'www-test.slcschools.org',
'www-test.slcse.org',
]
EMAIL_BACKEND = 'django.core.mail.backends.filebased.EmailBackend'
EMAIL_FILE_PATH = '/tmp'
STATIC_URL = 'https://websites-test.slcschools.org/static/'
| 31.964286 | 66 | 0.698324 | from .base import *
ENVIRONMENT_MODE = 'test'
ALLOWED_HOSTS += [
'backman-test.slcschools.org',
'beaconheights-test.slcschools.org',
'bennion-test.slcschools.org',
'bonneville-test.slcschools.org',
'bryant-test.slcschools.org',
'clayton-test.slcschools.org',
'dilworth-test.slcschools.org',
'east-test.slcschools.org',
'edison-test.slcschools.org',
'emerson-test.slcschools.org',
'ensign-test.slcschools.org',
'escalante-test.slcschools.org',
'franklin-test.slcschools.org',
'glendale-test.slcschools.org',
'hawthorne-test.slcschools.org',
'highland-test.slcschools.org',
'highlandpark-test.slcschools.org',
'hillside-test.slcschools.org',
'horizonte-test.slcschools.org',
'horizonte-test.slcschools.org',
'indianhills-test.slcschools.org',
'innovations-test.slcschools.org',
'innovations-test.slcschools.org',
'liberty-test.slcschools.org',
'maryjackson-test.slcschools.org',
'meadowlark-test.slcschools.org',
'mountainview-test.slcschools.org',
'newman-test.slcschools.org',
'nibleypark-test.slcschools.org',
'northstar-test.slcschools.org',
'northwest-test.slcschools.org',
'parkview-test.slcschools.org',
'riley-test.slcschools.org',
'rosepark-test.slcschools.org',
'uintah-test.slcschools.org',
'wasatch-test.slcschools.org',
'washington-test.slcschools.org',
'websites-test.slcschools.org',
'west-test.slcschools.org',
'whittier-test.slcschools.org',
'www-test.ocslc.org',
'www-test.saltlakespa.org',
'www-test.slcschools.org',
'www-test.slcse.org',
]
EMAIL_BACKEND = 'django.core.mail.backends.filebased.EmailBackend'
EMAIL_FILE_PATH = '/tmp'
STATIC_URL = 'https://websites-test.slcschools.org/static/'
| true | true |
1c4a3b9240115b8c64d64f4014238d4942020122 | 3,525 | py | Python | textbox/model/Seq2Seq/t5.py | StevenTang1998/TextBox | acd8298c7e6618384d585146f799d02cc475520c | [
"MIT"
] | 347 | 2021-01-09T07:55:55.000Z | 2022-03-27T00:46:36.000Z | textbox/model/Seq2Seq/t5.py | StevenTang1998/TextBox | acd8298c7e6618384d585146f799d02cc475520c | [
"MIT"
] | 18 | 2021-01-12T07:37:06.000Z | 2022-01-11T02:26:49.000Z | textbox/model/Seq2Seq/t5.py | StevenTang1998/TextBox | acd8298c7e6618384d585146f799d02cc475520c | [
"MIT"
] | 67 | 2021-01-09T07:23:52.000Z | 2022-03-27T12:02:12.000Z | # @Time : 2021/3/15
# @Author : Zhuohao Yu
# @Email : [email protected]
r"""
T5
################################################
Reference:
Colin et al. "Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer" at JMLR 2020.
"""
import torch
import torch.nn as nn
import torch.functional as F
from textbox.model.abstract_generator import Seq2SeqGenerator
from transformers import T5Tokenizer, T5ForConditionalGeneration, T5Config
class T5(Seq2SeqGenerator):
def __init__(self, config, dataset):
super(T5, self).__init__(config, dataset)
self.pretrained_model_path = config['pretrained_model_path']
self.tokenizer = T5Tokenizer.from_pretrained(self.pretrained_model_path)
self.configuration = T5Config.from_pretrained(self.pretrained_model_path)
self.model = T5ForConditionalGeneration.from_pretrained(self.pretrained_model_path, config=self.configuration)
self.padding_token_idx = self.tokenizer.pad_token_id
self.loss = nn.CrossEntropyLoss(ignore_index=self.padding_token_idx, reduction='none')
if config['task_type'] == "summarization":
self.t5_task_text = "summarize: "
elif config['task_type'] == "translation":
self.t5_task_text = "translate German to English: "
else:
raise NotImplementedError("Only summarization and translation are supported.")
def generate(self, batch_data, eval_data):
source_text = batch_data['source_text']
input_ids, attn_masks = self.tokenize_text(source_text)
sample_outputs = self.model.generate(
input_ids, attention_mask=attn_masks, num_beams=5, max_length=self.target_max_length, early_stopping=True
)
generated_text = self.tokenizer.batch_decode(sample_outputs, skip_special_tokens=True)
generate_corpus = [text.lower().split() for text in generated_text]
return generate_corpus
def tokenize_text(self, text, is_target=False):
input_ids = []
attn_masks = []
texts = [(self.t5_task_text if not is_target else '') + ' '.join(t) for t in text]
encoding_dict = self.tokenizer(
texts, max_length=self.source_max_length, padding=True, truncation=True, return_tensors="pt"
)
input_ids = encoding_dict['input_ids'].to(self.device)
attn_masks = encoding_dict['attention_mask'].to(self.device)
return input_ids, attn_masks
def forward(self, corpus, epoch_idx=-1):
source_text = corpus['source_text']
target_text = corpus['target_text']
input_ids, attn_masks = self.tokenize_text(source_text)
target_ids, decoder_attn_masks = self.tokenize_text(target_text, is_target=True)
decoder_input_ids = target_ids[:, :-1].contiguous()
decoder_attn_masks = decoder_attn_masks[:, :-1].contiguous()
decoder_target_ids = target_ids[:, 1:].contiguous()
outputs = self.model(
input_ids,
attention_mask=attn_masks,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attn_masks,
use_cache=False
)
token_logits = outputs.logits
loss = self.loss(token_logits.view(-1, token_logits.size(-1)), decoder_target_ids.view(-1))
loss = loss.reshape_as(decoder_target_ids)
length = (decoder_target_ids != self.padding_token_idx).sum(dim=1).float()
loss = loss.sum(dim=1) / length
return loss.mean()
| 38.736264 | 118 | 0.680284 |
import torch
import torch.nn as nn
import torch.functional as F
from textbox.model.abstract_generator import Seq2SeqGenerator
from transformers import T5Tokenizer, T5ForConditionalGeneration, T5Config
class T5(Seq2SeqGenerator):
def __init__(self, config, dataset):
super(T5, self).__init__(config, dataset)
self.pretrained_model_path = config['pretrained_model_path']
self.tokenizer = T5Tokenizer.from_pretrained(self.pretrained_model_path)
self.configuration = T5Config.from_pretrained(self.pretrained_model_path)
self.model = T5ForConditionalGeneration.from_pretrained(self.pretrained_model_path, config=self.configuration)
self.padding_token_idx = self.tokenizer.pad_token_id
self.loss = nn.CrossEntropyLoss(ignore_index=self.padding_token_idx, reduction='none')
if config['task_type'] == "summarization":
self.t5_task_text = "summarize: "
elif config['task_type'] == "translation":
self.t5_task_text = "translate German to English: "
else:
raise NotImplementedError("Only summarization and translation are supported.")
def generate(self, batch_data, eval_data):
source_text = batch_data['source_text']
input_ids, attn_masks = self.tokenize_text(source_text)
sample_outputs = self.model.generate(
input_ids, attention_mask=attn_masks, num_beams=5, max_length=self.target_max_length, early_stopping=True
)
generated_text = self.tokenizer.batch_decode(sample_outputs, skip_special_tokens=True)
generate_corpus = [text.lower().split() for text in generated_text]
return generate_corpus
def tokenize_text(self, text, is_target=False):
input_ids = []
attn_masks = []
texts = [(self.t5_task_text if not is_target else '') + ' '.join(t) for t in text]
encoding_dict = self.tokenizer(
texts, max_length=self.source_max_length, padding=True, truncation=True, return_tensors="pt"
)
input_ids = encoding_dict['input_ids'].to(self.device)
attn_masks = encoding_dict['attention_mask'].to(self.device)
return input_ids, attn_masks
def forward(self, corpus, epoch_idx=-1):
source_text = corpus['source_text']
target_text = corpus['target_text']
input_ids, attn_masks = self.tokenize_text(source_text)
target_ids, decoder_attn_masks = self.tokenize_text(target_text, is_target=True)
decoder_input_ids = target_ids[:, :-1].contiguous()
decoder_attn_masks = decoder_attn_masks[:, :-1].contiguous()
decoder_target_ids = target_ids[:, 1:].contiguous()
outputs = self.model(
input_ids,
attention_mask=attn_masks,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attn_masks,
use_cache=False
)
token_logits = outputs.logits
loss = self.loss(token_logits.view(-1, token_logits.size(-1)), decoder_target_ids.view(-1))
loss = loss.reshape_as(decoder_target_ids)
length = (decoder_target_ids != self.padding_token_idx).sum(dim=1).float()
loss = loss.sum(dim=1) / length
return loss.mean()
| true | true |
1c4a3c1daeaf23bbe73948eea1a5de1332e04d05 | 2,766 | py | Python | examples/complex/karman.py | david-moravec/classy_examples | f57e8b77db6f3e536a5367fb00ec850c6d901333 | [
"MIT"
] | null | null | null | examples/complex/karman.py | david-moravec/classy_examples | f57e8b77db6f3e536a5367fb00ec850c6d901333 | [
"MIT"
] | null | null | null | examples/complex/karman.py | david-moravec/classy_examples | f57e8b77db6f3e536a5367fb00ec850c6d901333 | [
"MIT"
] | null | null | null | from classy_blocks.classes.mesh import Mesh
from classy_blocks.classes.shapes import ExtrudedRing, Box
def get_mesh():
cylinder_diameter = 20e-3 # [m]
ring_thickness = 5e-3 # [m]
# domain size
domain_height = 0.05 # [m] (increase for "proper" simulation)
upstream_length = 0.03 # [m]
downstream_length = 0.05 # [m]
# size to roughly match cells outside ring
cell_size = 0.3*ring_thickness
bl_thickness = 1e-4
c2c_expansion = 1.2 # cell-to-cell expansion ratio
# it's a 2-dimensional case
z = 0.01
mesh = Mesh()
# a layer of cells on the cylinder
d = 2**0.5/2
ring_point = d*cylinder_diameter/2
outer_point = d*(cylinder_diameter/2 + ring_thickness)
wall_ring = ExtrudedRing(
[0, 0, 0],
[0, 0, z],
[ring_point, ring_point, 0],
cylinder_diameter/2 + ring_thickness
)
wall_ring.chop_axial(count=1)
wall_ring.chop_tangential(start_size=cell_size)
wall_ring.chop_radial(start_size=bl_thickness, c2c_expansion=c2c_expansion)
wall_ring.set_inner_patch('cylinder')
mesh.add(wall_ring)
# boxes that fill up the whole domain
def make_box(p1, p2, size_axes, patches):
box = Box(
[p1[0], p1[1], 0],
[p2[0], p2[1], z])
for axis in size_axes:
box.chop(axis, start_size=cell_size)
for side, name in patches.items():
box.set_patch(side, name)
mesh.add(box)
# top 3 boxes
make_box(
[-upstream_length, outer_point],
[-outer_point, domain_height/2],
[0, 1],
{'back': 'upper_wall', 'left': 'inlet'})
make_box(
[-outer_point, outer_point],
[outer_point, domain_height/2],
[],
{'back': 'upper_wall'})
make_box(
[outer_point, outer_point],
[downstream_length, domain_height/2],
[0, 1],
{'back': 'upper_wall', 'right': 'outlet'})
# left and right of the cylinder
make_box(
[-upstream_length, -outer_point],
[-outer_point, outer_point],
[],
{'left': 'inlet'})
make_box(
[outer_point, -outer_point],
[downstream_length, outer_point],
[],
{'right': 'outlet'})
# bottom 3 boxes
make_box(
[-upstream_length, -domain_height/2],
[-outer_point, -outer_point],
[0, 1],
{'front': 'lower_wall', 'left': 'inlet'})
make_box(
[-outer_point, -domain_height/2],
[outer_point, -outer_point],
[],
{'front': 'lower_wall'})
make_box(
[outer_point, -domain_height/2],
[downstream_length, -outer_point],
[0, 1],
{'front': 'lower_wall', 'right': 'outlet'})
return mesh | 27.117647 | 79 | 0.58026 | from classy_blocks.classes.mesh import Mesh
from classy_blocks.classes.shapes import ExtrudedRing, Box
def get_mesh():
cylinder_diameter = 20e-3 ring_thickness = 5e-3
domain_height = 0.05 upstream_length = 0.03 downstream_length = 0.05
cell_size = 0.3*ring_thickness
bl_thickness = 1e-4
c2c_expansion = 1.2
z = 0.01
mesh = Mesh()
# a layer of cells on the cylinder
d = 2**0.5/2
ring_point = d*cylinder_diameter/2
outer_point = d*(cylinder_diameter/2 + ring_thickness)
wall_ring = ExtrudedRing(
[0, 0, 0],
[0, 0, z],
[ring_point, ring_point, 0],
cylinder_diameter/2 + ring_thickness
)
wall_ring.chop_axial(count=1)
wall_ring.chop_tangential(start_size=cell_size)
wall_ring.chop_radial(start_size=bl_thickness, c2c_expansion=c2c_expansion)
wall_ring.set_inner_patch('cylinder')
mesh.add(wall_ring)
# boxes that fill up the whole domain
def make_box(p1, p2, size_axes, patches):
box = Box(
[p1[0], p1[1], 0],
[p2[0], p2[1], z])
for axis in size_axes:
box.chop(axis, start_size=cell_size)
for side, name in patches.items():
box.set_patch(side, name)
mesh.add(box)
# top 3 boxes
make_box(
[-upstream_length, outer_point],
[-outer_point, domain_height/2],
[0, 1],
{'back': 'upper_wall', 'left': 'inlet'})
make_box(
[-outer_point, outer_point],
[outer_point, domain_height/2],
[],
{'back': 'upper_wall'})
make_box(
[outer_point, outer_point],
[downstream_length, domain_height/2],
[0, 1],
{'back': 'upper_wall', 'right': 'outlet'})
# left and right of the cylinder
make_box(
[-upstream_length, -outer_point],
[-outer_point, outer_point],
[],
{'left': 'inlet'})
make_box(
[outer_point, -outer_point],
[downstream_length, outer_point],
[],
{'right': 'outlet'})
# bottom 3 boxes
make_box(
[-upstream_length, -domain_height/2],
[-outer_point, -outer_point],
[0, 1],
{'front': 'lower_wall', 'left': 'inlet'})
make_box(
[-outer_point, -domain_height/2],
[outer_point, -outer_point],
[],
{'front': 'lower_wall'})
make_box(
[outer_point, -domain_height/2],
[downstream_length, -outer_point],
[0, 1],
{'front': 'lower_wall', 'right': 'outlet'})
return mesh | true | true |
1c4a3cc7dcf16ecf0590cb9b2204ae30f0f7f58c | 32,215 | py | Python | arcade/tilemap/tilemap.py | EnlNovius/arcade | 020d3aafecb6c202dd76cfdf1dbd576117a608c2 | [
"MIT"
] | null | null | null | arcade/tilemap/tilemap.py | EnlNovius/arcade | 020d3aafecb6c202dd76cfdf1dbd576117a608c2 | [
"MIT"
] | null | null | null | arcade/tilemap/tilemap.py | EnlNovius/arcade | 020d3aafecb6c202dd76cfdf1dbd576117a608c2 | [
"MIT"
] | null | null | null | """
This module provides functionality to load in JSON map files from
the Tiled Map Editor. This is achieved using the pytiled-parser
library.
For more info on Tiled see: https://www.mapeditor.org/
For more info on pytiled-parser see: https://github.com/Beefy-Swain/pytiled_parser
"""
import copy
import math
import os
from collections import OrderedDict
from pathlib import Path
from typing import Any, Dict, List, Optional, OrderedDict, Tuple, Union, cast
import pytiled_parser
import pytiled_parser.tiled_object
from arcade import (
AnimatedTimeBasedSprite,
AnimationKeyframe,
Sprite,
SpriteList,
load_texture,
)
from arcade.arcade_types import Point, TiledObject
from arcade.resources import resolve_resource_path
_FLIPPED_HORIZONTALLY_FLAG = 0x80000000
_FLIPPED_VERTICALLY_FLAG = 0x40000000
_FLIPPED_DIAGONALLY_FLAG = 0x20000000
def _get_image_info_from_tileset(tile: pytiled_parser.Tile):
image_x = 0
image_y = 0
if tile.tileset.image is not None:
margin = tile.tileset.margin or 0
spacing = tile.tileset.spacing or 0
row = tile.id // tile.tileset.columns
image_y = margin + row * (tile.tileset.tile_height + spacing)
col = tile.id % tile.tileset.columns
image_x = margin + col * (tile.tileset.tile_width + spacing)
if tile.tileset.image:
width = tile.tileset.tile_width
height = tile.tileset.tile_height
else:
width = tile.image_width
height = tile.image_height
return image_x, image_y, width, height
def _get_image_source(
tile: pytiled_parser.Tile,
map_directory: Optional[str],
) -> Optional[Path]:
image_file = None
if tile.image:
image_file = tile.image
elif tile.tileset.image:
image_file = tile.tileset.image
if not image_file:
print(
f"Warning for tile {tile.id}, no image source listed either for individual tile, or as a tileset."
)
return None
if os.path.exists(image_file):
return image_file
if map_directory:
try2 = Path(map_directory, image_file)
if os.path.exists(try2):
return try2
print(f"Warning, can't find image {image_file} for tile {tile.id}")
return None
class TileMap:
"""
Class that represents a fully parsed and loaded map from Tiled.
For examples on how to use this class, see:
https://arcade.academy/examples/index.html#using-tiled-map-editor-to-create-maps
Attributes:
:tiled_map: The pytiled-parser map object. This can be useful for implementing features
that aren't supported by this class by accessing the raw map data directly.
:width: The width of the map in tiles. This is the number of tiles, not pixels.
:height: The height of the map in tiles. This is the number of tiles, not pixels.
:tile_width: The width in pixels of each tile.
:tile_height: The height in pixels of each tile.
:background_color: The background color of the map.
:scaling: A global scaling value to be applied to all Sprites in the map.
:sprite_lists: A dictionary mapping SpriteLists to their layer names. This is used
for all tile layers of the map.
:object_lists: A dictionary mapping TiledObjects to their layer names. This is used
for all object layers of the map.
"""
def __init__(
self,
map_file: Union[str, Path],
scaling: float = 1.0,
layer_options: Optional[Dict[str, Dict[str, Any]]] = None,
use_spatial_hash: Optional[bool] = None,
hit_box_algorithm: str = "Simple",
hit_box_detail: float = 4.5,
) -> None:
"""
Given a .json file, this will read in a Tiled map file, and
initialize a new TileMap object.
The `layer_options` parameter can be used to specify per layer arguments.
The available options for this are:
use_spatial_hash - A boolean to enable spatial hashing on this layer's SpriteList.
scaling - A float providing layer specific Sprite scaling.
hit_box_algorithm - A string for the hit box algorithm to use for the Sprite's in this layer.
hit_box_detail - A float specifying the level of detail for each Sprite's hitbox
For example:
code-block::
layer_options = {
"Platforms": {
"use_spatial_hash": True,
"scaling": 2.5,
},
}
The keys and their values in each layer are passed to the layer processing functions
using the `**` operator on the dictionary.
:param Union[str, Path] map_file: The JSON map file.
:param float scaling: Global scaling to apply to all Sprites.
:param Dict[str, Dict[str, Any]] layer_options: Extra parameters for each layer.
:param Optional[bool] use_spatial_hash: If set to True, this will make moving a sprite
in the SpriteList slower, but it will speed up collision detection
with items in the SpriteList. Great for doing collision detection
with static walls/platforms.
:param str hit_box_algorithm: One of 'None', 'Simple' or 'Detailed'.
:param float hit_box_detail: Float, defaults to 4.5. Used with 'Detailed' to hit box.
"""
# If we should pull from local resources, replace with proper path
map_file = resolve_resource_path(map_file)
# This attribute stores the pytiled-parser map object
self.tiled_map = pytiled_parser.parse_map(map_file)
# Set Map Attributes
self.width = self.tiled_map.map_size.width
self.height = self.tiled_map.map_size.height
self.tile_width = self.tiled_map.tile_size.width
self.tile_height = self.tiled_map.tile_size.height
self.background_color = self.tiled_map.background_color
# Global Layer Defaults
self.scaling = scaling
self.use_spatial_hash = use_spatial_hash
self.hit_box_algorithm = hit_box_algorithm
self.hit_box_detail = hit_box_detail
# Dictionaries to store the SpriteLists for processed layers
self.sprite_lists: OrderedDict[str, SpriteList] = OrderedDict[str, SpriteList]()
self.object_lists: OrderedDict[str, List[TiledObject]] = OrderedDict[
str, SpriteList
]()
self.properties = self.tiled_map.properties
global_options = {
"scaling": self.scaling,
"use_spatial_hash": self.use_spatial_hash,
"hit_box_algorithm": self.hit_box_algorithm,
"hit_box_detail": self.hit_box_detail,
}
for layer in self.tiled_map.layers:
if (layer.name in self.sprite_lists) or (layer.name in self.object_lists):
raise AttributeError(
f"You have a duplicate layer name '{layer.name}' in your Tiled map. "
"Please use unique names for all layers and tilesets in your map."
)
self._process_layer(layer, global_options, layer_options)
def _process_layer(
self,
layer: pytiled_parser.Layer,
global_options: Dict[str, Any],
layer_options: Optional[Dict[str, Dict[str, Any]]] = None,
) -> None:
processed: Union[
SpriteList, Tuple[Optional[SpriteList], Optional[List[TiledObject]]]
]
options = global_options
if layer_options:
if layer.name in layer_options:
new_options = {
key: layer_options[layer.name].get(key, global_options[key])
for key in global_options
}
options = new_options
if isinstance(layer, pytiled_parser.TileLayer):
processed = self._process_tile_layer(layer, **options)
self.sprite_lists[layer.name] = processed
elif isinstance(layer, pytiled_parser.ObjectLayer):
processed = self._process_object_layer(layer, **options)
if processed[0]:
sprite_list = processed[0]
if sprite_list:
self.sprite_lists[layer.name] = sprite_list
if processed[1]:
object_list = processed[1]
if object_list:
self.object_lists[layer.name] = object_list
elif isinstance(layer, pytiled_parser.ImageLayer):
processed = self._process_image_layer(layer, **options)
self.sprite_lists[layer.name] = processed
elif isinstance(layer, pytiled_parser.LayerGroup):
for sub_layer in layer.layers:
self._process_layer(sub_layer, global_options, layer_options)
def get_cartesian(
self,
x: float,
y: float,
) -> Tuple[float, float]:
"""
Given a set of coordinates in pixel units, this returns the cartesian coordinates.
This assumes the supplied coordinates are pixel coordinates, and bases the cartesian
grid off of the Map's tile size.
If you have a map with 128x128 pixel Tiles, and you supply coordinates 500, 250 to
this function you'll receive back 3, 2
:param float x: The X Coordinate to convert
:param float y: The Y Coordinate to convert
"""
x = math.floor(x / (self.tile_width * self.scaling))
y = math.floor(y / (self.tile_height * self.scaling))
return x, y
def get_tilemap_layer(self, layer_path: str) -> Optional[pytiled_parser.Layer]:
assert isinstance(layer_path, str)
def _get_tilemap_layer(my_path, layers):
layer_name = my_path.pop(0)
for my_layer in layers:
if my_layer.name == layer_name:
if isinstance(my_layer, pytiled_parser.LayerGroup):
if len(my_path) != 0:
return _get_tilemap_layer(my_path, my_layer.layers)
else:
return my_layer
return None
path = layer_path.strip("/").split("/")
layer = _get_tilemap_layer(path, self.tiled_map.layers)
return layer
def _get_tile_by_gid(self, tile_gid: int) -> Optional[pytiled_parser.Tile]:
flipped_diagonally = False
flipped_horizontally = False
flipped_vertically = False
if tile_gid & _FLIPPED_HORIZONTALLY_FLAG:
flipped_horizontally = True
tile_gid -= _FLIPPED_HORIZONTALLY_FLAG
if tile_gid & _FLIPPED_DIAGONALLY_FLAG:
flipped_diagonally = True
tile_gid -= _FLIPPED_DIAGONALLY_FLAG
if tile_gid & _FLIPPED_VERTICALLY_FLAG:
flipped_vertically = True
tile_gid -= _FLIPPED_VERTICALLY_FLAG
for tileset_key, tileset in self.tiled_map.tilesets.items():
if tile_gid < tileset_key:
continue
# No specific tile info, but there is a tile sheet
# print(f"data {tileset_key} {tileset.tiles} {tileset.image} {tileset_key} {tile_gid} {tileset.tile_count}")
if (
tileset.image is not None
and tileset_key <= tile_gid < tileset_key + tileset.tile_count
):
# No specific tile info, but there is a tile sheet
tile_ref = pytiled_parser.Tile(
id=(tile_gid - tileset_key), image=tileset.image
)
elif tileset.tiles is None and tileset.image is not None:
# Not in this tileset, move to the next
continue
else:
if tileset.tiles is None:
return None
tile_ref = tileset.tiles.get(tile_gid - tileset_key)
if tile_ref:
my_tile = copy.copy(tile_ref)
my_tile.tileset = tileset
my_tile.flipped_vertically = flipped_vertically
my_tile.flipped_diagonally = flipped_diagonally
my_tile.flipped_horizontally = flipped_horizontally
return my_tile
print(f"Returning NO tile for {tile_gid}.")
return None
def _get_tile_by_id(
self, tileset: pytiled_parser.Tileset, tile_id: int
) -> Optional[pytiled_parser.Tile]:
for tileset_key, cur_tileset in self.tiled_map.tilesets.items():
if cur_tileset is tileset:
for tile_key, tile in cur_tileset.tiles.items():
if tile_id == tile.id:
return tile
return None
def _create_sprite_from_tile(
self,
tile: pytiled_parser.Tile,
scaling: float = 1.0,
hit_box_algorithm: str = "Simple",
hit_box_detail: float = 4.5,
) -> Sprite:
"""Given a tile from the parser, try and create a Sprite from it."""
# --- Step 1, Find a reference to an image this is going to be based off of
map_source = self.tiled_map.map_file
map_directory = os.path.dirname(map_source)
image_file = _get_image_source(tile, map_directory)
if tile.animation:
my_sprite: Sprite = AnimatedTimeBasedSprite(image_file, scaling)
else:
image_x, image_y, width, height = _get_image_info_from_tileset(tile)
my_sprite = Sprite(
image_file,
scaling,
image_x,
image_y,
width,
height,
flipped_horizontally=tile.flipped_horizontally,
flipped_vertically=tile.flipped_vertically,
flipped_diagonally=tile.flipped_diagonally,
hit_box_algorithm=hit_box_algorithm,
hit_box_detail=hit_box_detail,
)
if tile.properties is not None and len(tile.properties) > 0:
for key, value in tile.properties.items():
my_sprite.properties[key] = value
if tile.type:
my_sprite.properties["type"] = tile.type
if tile.objects is not None:
if not isinstance(tile.objects, pytiled_parser.ObjectLayer):
print("Warning, tile.objects is not an ObjectLayer as expected.")
return my_sprite
if len(tile.objects.tiled_objects) > 1:
if tile.image:
print(
f"Warning, only one hit box supported for tile with image {tile.image}."
)
else:
print(f"Warning, only one hit box supported for tile.")
for hitbox in tile.objects.tiled_objects:
points: List[Point] = []
if isinstance(hitbox, pytiled_parser.tiled_object.Rectangle):
if hitbox.size is None:
print(
f"Warning: Rectangle hitbox created for without a "
f"height or width Ignoring."
)
continue
sx = hitbox.coordinates.x - (my_sprite.width / (scaling * 2))
sy = -(hitbox.coordinates.y - (my_sprite.height / (scaling * 2)))
ex = (hitbox.coordinates.x + hitbox.size.width) - (
my_sprite.width / (scaling * 2)
)
ey = -(hitbox.coordinates.y + hitbox.size.height) - (
my_sprite.height / (scaling * 2)
)
points = [[sx, sy], [ex, sy], [ex, ey], [sx, ey]]
elif isinstance(
hitbox, pytiled_parser.tiled_object.Polygon
) or isinstance(hitbox, pytiled_parser.tiled_object.Polyline):
for point in hitbox.points:
adj_x = (
point.x
+ hitbox.coordinates.x
- my_sprite.width / (scaling * 2)
)
adj_y = -(
point.y
+ hitbox.coordinates.y
- my_sprite.height / (scaling * 2)
)
adj_point = [adj_x, adj_y]
points.append(adj_point)
if points[0][0] == points[-1][0] and points[0][1] == points[-1][1]:
points.pop()
elif isinstance(hitbox, pytiled_parser.tiled_object.Ellipse):
if not hitbox.size:
print(
f"Warning: Ellipse hitbox created without a height "
f" or width for {tile.image}. Ignoring."
)
continue
hw = hitbox.size.width / 2
hh = hitbox.size.height / 2
cx = hitbox.coordinates.x + hw
cy = hitbox.coordinates.y + hh
acx = cx - (my_sprite.width / (scaling * 2))
acy = cy - (my_sprite.height / (scaling * 2))
total_steps = 8
angles = [
step / total_steps * 2 * math.pi for step in range(total_steps)
]
for angle in angles:
x = hw * math.cos(angle) + acx
y = -(hh * math.sin(angle) + acy)
points.append([x, y])
else:
print(f"Warning: Hitbox type {type(hitbox)} not supported.")
my_sprite.hit_box = points
if tile.animation:
key_frame_list = []
for frame in tile.animation:
frame_tile = self._get_tile_by_id(tile.tileset, frame.tile_id)
if frame_tile:
image_file = _get_image_source(frame_tile, map_directory)
if frame_tile.image and image_file:
texture = load_texture(image_file)
elif not frame_tile.image and image_file:
# No image for tile, pull from tilesheet
(
image_x,
image_y,
width,
height,
) = _get_image_info_from_tileset(frame_tile)
texture = load_texture(
image_file, image_x, image_y, width, height
)
else:
print(
f"Warning: failed to load image for animation frame for tile {frame_tile.id}"
)
texture = None
key_frame = AnimationKeyframe(
frame.tile_id, frame.duration, texture
)
key_frame_list.append(key_frame)
if len(key_frame_list) == 1:
my_sprite.texture = key_frame.texture
cast(AnimatedTimeBasedSprite, my_sprite).frames = key_frame_list
return my_sprite
def _process_image_layer(
self,
layer: pytiled_parser.ImageLayer,
scaling: float = 1.0,
use_spatial_hash: Optional[bool] = None,
hit_box_algorithm: str = "Simple",
hit_box_detail: float = 4.5,
) -> SpriteList:
sprite_list: SpriteList = SpriteList(use_spatial_hash=use_spatial_hash)
map_source = self.tiled_map.map_file
map_directory = os.path.dirname(map_source)
image_file = layer.image
if not os.path.exists(image_file) and (map_directory):
try2 = Path(map_directory, image_file)
if not os.path.exists(try2):
print(
f"Warning, can't find image {image_file} for Image Layer {layer.name}"
)
image_file = try2
my_texture = load_texture(
image_file,
hit_box_algorithm=hit_box_algorithm,
hit_box_detail=hit_box_detail,
)
if layer.transparent_color:
data = my_texture.image.getdata()
target = layer.transparent_color
new_data = []
for item in data:
if (
item[0] == target[0]
and item[1] == target[1]
and item[2] == target[2]
):
new_data.append((255, 255, 255, 0))
else:
new_data.append(item)
my_texture.image.putdata(new_data)
my_sprite = Sprite(
image_file,
scaling,
texture=my_texture,
hit_box_algorithm=hit_box_algorithm,
hit_box_detail=hit_box_detail,
)
if layer.properties:
for key, value in layer.properties.items():
my_sprite.properties[key] = value
if layer.tint_color:
my_sprite.color = layer.tint_color
if layer.opacity:
my_sprite.alpha = int(layer.opacity * 255)
my_sprite.center_x = (layer.offset[0] * scaling) + my_sprite.width / 2
my_sprite.center_y = layer.offset[1]
sprite_list.append(my_sprite)
return sprite_list
def _process_tile_layer(
self,
layer: pytiled_parser.TileLayer,
scaling: float = 1.0,
use_spatial_hash: Optional[bool] = None,
hit_box_algorithm: str = "Simple",
hit_box_detail: float = 4.5,
) -> SpriteList:
sprite_list: SpriteList = SpriteList(use_spatial_hash=use_spatial_hash)
map_array = layer.data
# Loop through the layer and add in the list
for row_index, row in enumerate(map_array):
for column_index, item in enumerate(row):
# Check for an empty tile
if item == 0:
continue
tile = self._get_tile_by_gid(item)
if tile is None:
raise ValueError(
(
f"Couldn't find tile for item {item} in layer "
f"'{layer.name}' in file '{self.tiled_map.map_file}'"
f"at ({column_index}, {row_index})."
)
)
my_sprite = self._create_sprite_from_tile(
tile,
scaling=scaling,
hit_box_algorithm=hit_box_algorithm,
hit_box_detail=hit_box_detail,
)
if my_sprite is None:
print(
f"Warning: Could not create sprite number {item} in layer '{layer.name}' {tile.image}"
)
else:
my_sprite.center_x = (
column_index * (self.tiled_map.tile_size[0] * scaling)
+ my_sprite.width / 2
)
my_sprite.center_y = (
self.tiled_map.map_size.height - row_index - 1
) * (self.tiled_map.tile_size[1] * scaling) + my_sprite.height / 2
# Tint
if layer.tint_color:
my_sprite.color = layer.tint_color
# Opacity
opacity = layer.opacity
if opacity:
my_sprite.alpha = int(opacity * 255)
sprite_list.append(my_sprite)
return sprite_list
def _process_object_layer(
self,
layer: pytiled_parser.ObjectLayer,
scaling: float = 1.0,
use_spatial_hash: Optional[bool] = None,
hit_box_algorithm: str = "Simple",
hit_box_detail: float = 4.5,
) -> Tuple[Optional[SpriteList], Optional[List[TiledObject]]]:
if not scaling:
scaling = self.scaling
sprite_list: Optional[SpriteList] = None
objects_list: Optional[List[TiledObject]] = []
for cur_object in layer.tiled_objects:
# shape: Optional[Union[Point, PointList, Rect]] = None
if isinstance(cur_object, pytiled_parser.tiled_object.Tile):
if not sprite_list:
sprite_list = SpriteList(use_spatial_hash=use_spatial_hash)
tile = self._get_tile_by_gid(cur_object.gid)
my_sprite = self._create_sprite_from_tile(
tile,
scaling=scaling,
hit_box_algorithm=hit_box_algorithm,
hit_box_detail=hit_box_detail,
)
x = cur_object.coordinates.x * scaling
y = (
self.tiled_map.map_size.height * self.tiled_map.tile_size[1]
- cur_object.coordinates.y
) * scaling
my_sprite.width = width = cur_object.size[0] * scaling
my_sprite.height = height = cur_object.size[1] * scaling
center_x = width / 2
center_y = height / 2
if cur_object.rotation:
rotation = -math.radians(cur_object.rotation)
else:
rotation = 0
cos_rotation = math.cos(rotation)
sin_rotation = math.sin(rotation)
rotated_center_x = center_x * cos_rotation - center_y * sin_rotation
rotated_center_y = center_y * sin_rotation + center_y * cos_rotation
my_sprite.position = (x + rotated_center_x, y + rotated_center_y)
my_sprite.angle = math.degrees(rotation)
if layer.tint_color:
my_sprite.color = layer.tint_color
opacity = layer.opacity
if opacity:
my_sprite.alpha = int(opacity * 255)
if cur_object.properties and "change_x" in cur_object.properties:
my_sprite.change_x = float(cur_object.properties["change_x"])
if cur_object.properties and "change_y" in cur_object.properties:
my_sprite.change_y = float(cur_object.properties["change_y"])
if cur_object.properties and "boundary_bottom" in cur_object.properties:
my_sprite.boundary_bottom = float(
cur_object.properties["boundary_bottom"]
)
if cur_object.properties and "boundary_top" in cur_object.properties:
my_sprite.boundary_top = float(
cur_object.properties["boundary_top"]
)
if cur_object.properties and "boundary_left" in cur_object.properties:
my_sprite.boundary_left = float(
cur_object.properties["boundary_left"]
)
if cur_object.properties and "boundary_right" in cur_object.properties:
my_sprite.boundary_right = float(
cur_object.properties["boundary_right"]
)
if cur_object.properties:
my_sprite.properties.update(cur_object.properties)
if cur_object.type:
my_sprite.properties["type"] = cur_object.type
if cur_object.name:
my_sprite.properties["name"] = cur_object.name
sprite_list.append(my_sprite)
continue
elif isinstance(cur_object, pytiled_parser.tiled_object.Point):
x = cur_object.coordinates.x * scaling
y = (
self.tiled_map.map_size.height * self.tiled_map.tile_size[1]
- cur_object.coordinates.y
) * scaling
shape = [x, y]
elif isinstance(cur_object, pytiled_parser.tiled_object.Rectangle):
sx = cur_object.coordinates.x
sy = -cur_object.coordinates.y
ex = cur_object.coordinates.x + cur_object.size.width
ey = -(cur_object.coordinates.y + cur_object.size.height)
p1 = [sx, sy]
p2 = [ex, sy]
p3 = [ex, ey]
p4 = [sx, ey]
shape = [p1, p2, p3, p4]
elif isinstance(
cur_object, pytiled_parser.tiled_object.Polygon
) or isinstance(cur_object, pytiled_parser.tiled_object.Polyline):
shape = []
for point in cur_object.points:
x = point.x + cur_object.coordinates.x
y = (self.height * self.tile_height) - (
point.y + cur_object.coordinates.y
)
point = (x, y)
shape.append(point)
# If shape is a polyline, and it is closed, we need to remove the duplicate end point
if shape[0][0] == shape[-1][0] and shape[0][1] == shape[-1][1]:
shape.pop()
elif isinstance(cur_object, pytiled_parser.tiled_object.Ellipse):
hw = cur_object.size.width / 2
hh = cur_object.size.height / 2
cx = cur_object.coordinates.x + hw
cy = cur_object.coordinates.y + hh
total_steps = 8
angles = [
step / total_steps * 2 * math.pi for step in range(total_steps)
]
shape = []
for angle in angles:
x = hw * math.cos(angle) + cx
y = -(hh * math.sin(angle) + cy)
point = [x, y]
shape.append(point)
else:
continue
if shape:
tiled_object = TiledObject(
shape, cur_object.properties, cur_object.name, cur_object.type
)
if not objects_list:
objects_list = []
objects_list.append(tiled_object)
return sprite_list or None, objects_list or None
def load_tilemap(
map_file: Union[str, Path],
scaling: float = 1.0,
layer_options: Optional[Dict[str, Dict[str, Any]]] = None,
use_spatial_hash: Optional[bool] = None,
hit_box_algorithm: str = "Simple",
hit_box_detail: float = 4.5,
) -> TileMap:
"""
Given a .json map file, loads in and returns a `TileMap` object.
A TileMap can be created directly using the classes `__init__` function.
This function exists for ease of use.
For more clarification on the layer_options key, see the `__init__` function
of the `TileMap` class
:param Union[str, Path] map_file: The JSON map file.
:param float scaling: The global scaling to apply to all Sprite's within the map.
:param Optional[bool] use_spatial_hash: If set to True, this will make moving a sprite
in the SpriteList slower, but it will speed up collision detection
with items in the SpriteList. Great for doing collision detection
with static walls/platforms.
:param str hit_box_algorithm: One of 'None', 'Simple' or 'Detailed'.
:param float hit_box_detail: Float, defaults to 4.5. Used with 'Detailed' to hit box.
:param Dict[str, Dict[str, Any]] layer_options: Layer specific options for the map.
"""
return TileMap(
map_file,
scaling,
layer_options,
use_spatial_hash,
hit_box_algorithm,
hit_box_detail,
)
def read_tmx(map_file: Union[str, Path]) -> pytiled_parser.TiledMap:
"""
Deprecated function to raise a warning that it has been removed.
Exists to provide info for outdated code bases.
"""
raise DeprecationWarning(
"The read_tmx function has been replaced by the new TileMap class."
)
| 38.673469 | 120 | 0.556418 |
import copy
import math
import os
from collections import OrderedDict
from pathlib import Path
from typing import Any, Dict, List, Optional, OrderedDict, Tuple, Union, cast
import pytiled_parser
import pytiled_parser.tiled_object
from arcade import (
AnimatedTimeBasedSprite,
AnimationKeyframe,
Sprite,
SpriteList,
load_texture,
)
from arcade.arcade_types import Point, TiledObject
from arcade.resources import resolve_resource_path
_FLIPPED_HORIZONTALLY_FLAG = 0x80000000
_FLIPPED_VERTICALLY_FLAG = 0x40000000
_FLIPPED_DIAGONALLY_FLAG = 0x20000000
def _get_image_info_from_tileset(tile: pytiled_parser.Tile):
image_x = 0
image_y = 0
if tile.tileset.image is not None:
margin = tile.tileset.margin or 0
spacing = tile.tileset.spacing or 0
row = tile.id // tile.tileset.columns
image_y = margin + row * (tile.tileset.tile_height + spacing)
col = tile.id % tile.tileset.columns
image_x = margin + col * (tile.tileset.tile_width + spacing)
if tile.tileset.image:
width = tile.tileset.tile_width
height = tile.tileset.tile_height
else:
width = tile.image_width
height = tile.image_height
return image_x, image_y, width, height
def _get_image_source(
tile: pytiled_parser.Tile,
map_directory: Optional[str],
) -> Optional[Path]:
image_file = None
if tile.image:
image_file = tile.image
elif tile.tileset.image:
image_file = tile.tileset.image
if not image_file:
print(
f"Warning for tile {tile.id}, no image source listed either for individual tile, or as a tileset."
)
return None
if os.path.exists(image_file):
return image_file
if map_directory:
try2 = Path(map_directory, image_file)
if os.path.exists(try2):
return try2
print(f"Warning, can't find image {image_file} for tile {tile.id}")
return None
class TileMap:
def __init__(
self,
map_file: Union[str, Path],
scaling: float = 1.0,
layer_options: Optional[Dict[str, Dict[str, Any]]] = None,
use_spatial_hash: Optional[bool] = None,
hit_box_algorithm: str = "Simple",
hit_box_detail: float = 4.5,
) -> None:
# If we should pull from local resources, replace with proper path
map_file = resolve_resource_path(map_file)
# This attribute stores the pytiled-parser map object
self.tiled_map = pytiled_parser.parse_map(map_file)
# Set Map Attributes
self.width = self.tiled_map.map_size.width
self.height = self.tiled_map.map_size.height
self.tile_width = self.tiled_map.tile_size.width
self.tile_height = self.tiled_map.tile_size.height
self.background_color = self.tiled_map.background_color
# Global Layer Defaults
self.scaling = scaling
self.use_spatial_hash = use_spatial_hash
self.hit_box_algorithm = hit_box_algorithm
self.hit_box_detail = hit_box_detail
# Dictionaries to store the SpriteLists for processed layers
self.sprite_lists: OrderedDict[str, SpriteList] = OrderedDict[str, SpriteList]()
self.object_lists: OrderedDict[str, List[TiledObject]] = OrderedDict[
str, SpriteList
]()
self.properties = self.tiled_map.properties
global_options = {
"scaling": self.scaling,
"use_spatial_hash": self.use_spatial_hash,
"hit_box_algorithm": self.hit_box_algorithm,
"hit_box_detail": self.hit_box_detail,
}
for layer in self.tiled_map.layers:
if (layer.name in self.sprite_lists) or (layer.name in self.object_lists):
raise AttributeError(
f"You have a duplicate layer name '{layer.name}' in your Tiled map. "
"Please use unique names for all layers and tilesets in your map."
)
self._process_layer(layer, global_options, layer_options)
def _process_layer(
self,
layer: pytiled_parser.Layer,
global_options: Dict[str, Any],
layer_options: Optional[Dict[str, Dict[str, Any]]] = None,
) -> None:
processed: Union[
SpriteList, Tuple[Optional[SpriteList], Optional[List[TiledObject]]]
]
options = global_options
if layer_options:
if layer.name in layer_options:
new_options = {
key: layer_options[layer.name].get(key, global_options[key])
for key in global_options
}
options = new_options
if isinstance(layer, pytiled_parser.TileLayer):
processed = self._process_tile_layer(layer, **options)
self.sprite_lists[layer.name] = processed
elif isinstance(layer, pytiled_parser.ObjectLayer):
processed = self._process_object_layer(layer, **options)
if processed[0]:
sprite_list = processed[0]
if sprite_list:
self.sprite_lists[layer.name] = sprite_list
if processed[1]:
object_list = processed[1]
if object_list:
self.object_lists[layer.name] = object_list
elif isinstance(layer, pytiled_parser.ImageLayer):
processed = self._process_image_layer(layer, **options)
self.sprite_lists[layer.name] = processed
elif isinstance(layer, pytiled_parser.LayerGroup):
for sub_layer in layer.layers:
self._process_layer(sub_layer, global_options, layer_options)
def get_cartesian(
self,
x: float,
y: float,
) -> Tuple[float, float]:
x = math.floor(x / (self.tile_width * self.scaling))
y = math.floor(y / (self.tile_height * self.scaling))
return x, y
def get_tilemap_layer(self, layer_path: str) -> Optional[pytiled_parser.Layer]:
assert isinstance(layer_path, str)
def _get_tilemap_layer(my_path, layers):
layer_name = my_path.pop(0)
for my_layer in layers:
if my_layer.name == layer_name:
if isinstance(my_layer, pytiled_parser.LayerGroup):
if len(my_path) != 0:
return _get_tilemap_layer(my_path, my_layer.layers)
else:
return my_layer
return None
path = layer_path.strip("/").split("/")
layer = _get_tilemap_layer(path, self.tiled_map.layers)
return layer
def _get_tile_by_gid(self, tile_gid: int) -> Optional[pytiled_parser.Tile]:
flipped_diagonally = False
flipped_horizontally = False
flipped_vertically = False
if tile_gid & _FLIPPED_HORIZONTALLY_FLAG:
flipped_horizontally = True
tile_gid -= _FLIPPED_HORIZONTALLY_FLAG
if tile_gid & _FLIPPED_DIAGONALLY_FLAG:
flipped_diagonally = True
tile_gid -= _FLIPPED_DIAGONALLY_FLAG
if tile_gid & _FLIPPED_VERTICALLY_FLAG:
flipped_vertically = True
tile_gid -= _FLIPPED_VERTICALLY_FLAG
for tileset_key, tileset in self.tiled_map.tilesets.items():
if tile_gid < tileset_key:
continue
# No specific tile info, but there is a tile sheet
# print(f"data {tileset_key} {tileset.tiles} {tileset.image} {tileset_key} {tile_gid} {tileset.tile_count}")
if (
tileset.image is not None
and tileset_key <= tile_gid < tileset_key + tileset.tile_count
):
# No specific tile info, but there is a tile sheet
tile_ref = pytiled_parser.Tile(
id=(tile_gid - tileset_key), image=tileset.image
)
elif tileset.tiles is None and tileset.image is not None:
# Not in this tileset, move to the next
continue
else:
if tileset.tiles is None:
return None
tile_ref = tileset.tiles.get(tile_gid - tileset_key)
if tile_ref:
my_tile = copy.copy(tile_ref)
my_tile.tileset = tileset
my_tile.flipped_vertically = flipped_vertically
my_tile.flipped_diagonally = flipped_diagonally
my_tile.flipped_horizontally = flipped_horizontally
return my_tile
print(f"Returning NO tile for {tile_gid}.")
return None
def _get_tile_by_id(
self, tileset: pytiled_parser.Tileset, tile_id: int
) -> Optional[pytiled_parser.Tile]:
for tileset_key, cur_tileset in self.tiled_map.tilesets.items():
if cur_tileset is tileset:
for tile_key, tile in cur_tileset.tiles.items():
if tile_id == tile.id:
return tile
return None
def _create_sprite_from_tile(
self,
tile: pytiled_parser.Tile,
scaling: float = 1.0,
hit_box_algorithm: str = "Simple",
hit_box_detail: float = 4.5,
) -> Sprite:
# --- Step 1, Find a reference to an image this is going to be based off of
map_source = self.tiled_map.map_file
map_directory = os.path.dirname(map_source)
image_file = _get_image_source(tile, map_directory)
if tile.animation:
my_sprite: Sprite = AnimatedTimeBasedSprite(image_file, scaling)
else:
image_x, image_y, width, height = _get_image_info_from_tileset(tile)
my_sprite = Sprite(
image_file,
scaling,
image_x,
image_y,
width,
height,
flipped_horizontally=tile.flipped_horizontally,
flipped_vertically=tile.flipped_vertically,
flipped_diagonally=tile.flipped_diagonally,
hit_box_algorithm=hit_box_algorithm,
hit_box_detail=hit_box_detail,
)
if tile.properties is not None and len(tile.properties) > 0:
for key, value in tile.properties.items():
my_sprite.properties[key] = value
if tile.type:
my_sprite.properties["type"] = tile.type
if tile.objects is not None:
if not isinstance(tile.objects, pytiled_parser.ObjectLayer):
print("Warning, tile.objects is not an ObjectLayer as expected.")
return my_sprite
if len(tile.objects.tiled_objects) > 1:
if tile.image:
print(
f"Warning, only one hit box supported for tile with image {tile.image}."
)
else:
print(f"Warning, only one hit box supported for tile.")
for hitbox in tile.objects.tiled_objects:
points: List[Point] = []
if isinstance(hitbox, pytiled_parser.tiled_object.Rectangle):
if hitbox.size is None:
print(
f"Warning: Rectangle hitbox created for without a "
f"height or width Ignoring."
)
continue
sx = hitbox.coordinates.x - (my_sprite.width / (scaling * 2))
sy = -(hitbox.coordinates.y - (my_sprite.height / (scaling * 2)))
ex = (hitbox.coordinates.x + hitbox.size.width) - (
my_sprite.width / (scaling * 2)
)
ey = -(hitbox.coordinates.y + hitbox.size.height) - (
my_sprite.height / (scaling * 2)
)
points = [[sx, sy], [ex, sy], [ex, ey], [sx, ey]]
elif isinstance(
hitbox, pytiled_parser.tiled_object.Polygon
) or isinstance(hitbox, pytiled_parser.tiled_object.Polyline):
for point in hitbox.points:
adj_x = (
point.x
+ hitbox.coordinates.x
- my_sprite.width / (scaling * 2)
)
adj_y = -(
point.y
+ hitbox.coordinates.y
- my_sprite.height / (scaling * 2)
)
adj_point = [adj_x, adj_y]
points.append(adj_point)
if points[0][0] == points[-1][0] and points[0][1] == points[-1][1]:
points.pop()
elif isinstance(hitbox, pytiled_parser.tiled_object.Ellipse):
if not hitbox.size:
print(
f"Warning: Ellipse hitbox created without a height "
f" or width for {tile.image}. Ignoring."
)
continue
hw = hitbox.size.width / 2
hh = hitbox.size.height / 2
cx = hitbox.coordinates.x + hw
cy = hitbox.coordinates.y + hh
acx = cx - (my_sprite.width / (scaling * 2))
acy = cy - (my_sprite.height / (scaling * 2))
total_steps = 8
angles = [
step / total_steps * 2 * math.pi for step in range(total_steps)
]
for angle in angles:
x = hw * math.cos(angle) + acx
y = -(hh * math.sin(angle) + acy)
points.append([x, y])
else:
print(f"Warning: Hitbox type {type(hitbox)} not supported.")
my_sprite.hit_box = points
if tile.animation:
key_frame_list = []
for frame in tile.animation:
frame_tile = self._get_tile_by_id(tile.tileset, frame.tile_id)
if frame_tile:
image_file = _get_image_source(frame_tile, map_directory)
if frame_tile.image and image_file:
texture = load_texture(image_file)
elif not frame_tile.image and image_file:
# No image for tile, pull from tilesheet
(
image_x,
image_y,
width,
height,
) = _get_image_info_from_tileset(frame_tile)
texture = load_texture(
image_file, image_x, image_y, width, height
)
else:
print(
f"Warning: failed to load image for animation frame for tile {frame_tile.id}"
)
texture = None
key_frame = AnimationKeyframe(
frame.tile_id, frame.duration, texture
)
key_frame_list.append(key_frame)
if len(key_frame_list) == 1:
my_sprite.texture = key_frame.texture
cast(AnimatedTimeBasedSprite, my_sprite).frames = key_frame_list
return my_sprite
def _process_image_layer(
self,
layer: pytiled_parser.ImageLayer,
scaling: float = 1.0,
use_spatial_hash: Optional[bool] = None,
hit_box_algorithm: str = "Simple",
hit_box_detail: float = 4.5,
) -> SpriteList:
sprite_list: SpriteList = SpriteList(use_spatial_hash=use_spatial_hash)
map_source = self.tiled_map.map_file
map_directory = os.path.dirname(map_source)
image_file = layer.image
if not os.path.exists(image_file) and (map_directory):
try2 = Path(map_directory, image_file)
if not os.path.exists(try2):
print(
f"Warning, can't find image {image_file} for Image Layer {layer.name}"
)
image_file = try2
my_texture = load_texture(
image_file,
hit_box_algorithm=hit_box_algorithm,
hit_box_detail=hit_box_detail,
)
if layer.transparent_color:
data = my_texture.image.getdata()
target = layer.transparent_color
new_data = []
for item in data:
if (
item[0] == target[0]
and item[1] == target[1]
and item[2] == target[2]
):
new_data.append((255, 255, 255, 0))
else:
new_data.append(item)
my_texture.image.putdata(new_data)
my_sprite = Sprite(
image_file,
scaling,
texture=my_texture,
hit_box_algorithm=hit_box_algorithm,
hit_box_detail=hit_box_detail,
)
if layer.properties:
for key, value in layer.properties.items():
my_sprite.properties[key] = value
if layer.tint_color:
my_sprite.color = layer.tint_color
if layer.opacity:
my_sprite.alpha = int(layer.opacity * 255)
my_sprite.center_x = (layer.offset[0] * scaling) + my_sprite.width / 2
my_sprite.center_y = layer.offset[1]
sprite_list.append(my_sprite)
return sprite_list
def _process_tile_layer(
self,
layer: pytiled_parser.TileLayer,
scaling: float = 1.0,
use_spatial_hash: Optional[bool] = None,
hit_box_algorithm: str = "Simple",
hit_box_detail: float = 4.5,
) -> SpriteList:
sprite_list: SpriteList = SpriteList(use_spatial_hash=use_spatial_hash)
map_array = layer.data
for row_index, row in enumerate(map_array):
for column_index, item in enumerate(row):
if item == 0:
continue
tile = self._get_tile_by_gid(item)
if tile is None:
raise ValueError(
(
f"Couldn't find tile for item {item} in layer "
f"'{layer.name}' in file '{self.tiled_map.map_file}'"
f"at ({column_index}, {row_index})."
)
)
my_sprite = self._create_sprite_from_tile(
tile,
scaling=scaling,
hit_box_algorithm=hit_box_algorithm,
hit_box_detail=hit_box_detail,
)
if my_sprite is None:
print(
f"Warning: Could not create sprite number {item} in layer '{layer.name}' {tile.image}"
)
else:
my_sprite.center_x = (
column_index * (self.tiled_map.tile_size[0] * scaling)
+ my_sprite.width / 2
)
my_sprite.center_y = (
self.tiled_map.map_size.height - row_index - 1
) * (self.tiled_map.tile_size[1] * scaling) + my_sprite.height / 2
# Tint
if layer.tint_color:
my_sprite.color = layer.tint_color
# Opacity
opacity = layer.opacity
if opacity:
my_sprite.alpha = int(opacity * 255)
sprite_list.append(my_sprite)
return sprite_list
def _process_object_layer(
self,
layer: pytiled_parser.ObjectLayer,
scaling: float = 1.0,
use_spatial_hash: Optional[bool] = None,
hit_box_algorithm: str = "Simple",
hit_box_detail: float = 4.5,
) -> Tuple[Optional[SpriteList], Optional[List[TiledObject]]]:
if not scaling:
scaling = self.scaling
sprite_list: Optional[SpriteList] = None
objects_list: Optional[List[TiledObject]] = []
for cur_object in layer.tiled_objects:
# shape: Optional[Union[Point, PointList, Rect]] = None
if isinstance(cur_object, pytiled_parser.tiled_object.Tile):
if not sprite_list:
sprite_list = SpriteList(use_spatial_hash=use_spatial_hash)
tile = self._get_tile_by_gid(cur_object.gid)
my_sprite = self._create_sprite_from_tile(
tile,
scaling=scaling,
hit_box_algorithm=hit_box_algorithm,
hit_box_detail=hit_box_detail,
)
x = cur_object.coordinates.x * scaling
y = (
self.tiled_map.map_size.height * self.tiled_map.tile_size[1]
- cur_object.coordinates.y
) * scaling
my_sprite.width = width = cur_object.size[0] * scaling
my_sprite.height = height = cur_object.size[1] * scaling
center_x = width / 2
center_y = height / 2
if cur_object.rotation:
rotation = -math.radians(cur_object.rotation)
else:
rotation = 0
cos_rotation = math.cos(rotation)
sin_rotation = math.sin(rotation)
rotated_center_x = center_x * cos_rotation - center_y * sin_rotation
rotated_center_y = center_y * sin_rotation + center_y * cos_rotation
my_sprite.position = (x + rotated_center_x, y + rotated_center_y)
my_sprite.angle = math.degrees(rotation)
if layer.tint_color:
my_sprite.color = layer.tint_color
opacity = layer.opacity
if opacity:
my_sprite.alpha = int(opacity * 255)
if cur_object.properties and "change_x" in cur_object.properties:
my_sprite.change_x = float(cur_object.properties["change_x"])
if cur_object.properties and "change_y" in cur_object.properties:
my_sprite.change_y = float(cur_object.properties["change_y"])
if cur_object.properties and "boundary_bottom" in cur_object.properties:
my_sprite.boundary_bottom = float(
cur_object.properties["boundary_bottom"]
)
if cur_object.properties and "boundary_top" in cur_object.properties:
my_sprite.boundary_top = float(
cur_object.properties["boundary_top"]
)
if cur_object.properties and "boundary_left" in cur_object.properties:
my_sprite.boundary_left = float(
cur_object.properties["boundary_left"]
)
if cur_object.properties and "boundary_right" in cur_object.properties:
my_sprite.boundary_right = float(
cur_object.properties["boundary_right"]
)
if cur_object.properties:
my_sprite.properties.update(cur_object.properties)
if cur_object.type:
my_sprite.properties["type"] = cur_object.type
if cur_object.name:
my_sprite.properties["name"] = cur_object.name
sprite_list.append(my_sprite)
continue
elif isinstance(cur_object, pytiled_parser.tiled_object.Point):
x = cur_object.coordinates.x * scaling
y = (
self.tiled_map.map_size.height * self.tiled_map.tile_size[1]
- cur_object.coordinates.y
) * scaling
shape = [x, y]
elif isinstance(cur_object, pytiled_parser.tiled_object.Rectangle):
sx = cur_object.coordinates.x
sy = -cur_object.coordinates.y
ex = cur_object.coordinates.x + cur_object.size.width
ey = -(cur_object.coordinates.y + cur_object.size.height)
p1 = [sx, sy]
p2 = [ex, sy]
p3 = [ex, ey]
p4 = [sx, ey]
shape = [p1, p2, p3, p4]
elif isinstance(
cur_object, pytiled_parser.tiled_object.Polygon
) or isinstance(cur_object, pytiled_parser.tiled_object.Polyline):
shape = []
for point in cur_object.points:
x = point.x + cur_object.coordinates.x
y = (self.height * self.tile_height) - (
point.y + cur_object.coordinates.y
)
point = (x, y)
shape.append(point)
# If shape is a polyline, and it is closed, we need to remove the duplicate end point
if shape[0][0] == shape[-1][0] and shape[0][1] == shape[-1][1]:
shape.pop()
elif isinstance(cur_object, pytiled_parser.tiled_object.Ellipse):
hw = cur_object.size.width / 2
hh = cur_object.size.height / 2
cx = cur_object.coordinates.x + hw
cy = cur_object.coordinates.y + hh
total_steps = 8
angles = [
step / total_steps * 2 * math.pi for step in range(total_steps)
]
shape = []
for angle in angles:
x = hw * math.cos(angle) + cx
y = -(hh * math.sin(angle) + cy)
point = [x, y]
shape.append(point)
else:
continue
if shape:
tiled_object = TiledObject(
shape, cur_object.properties, cur_object.name, cur_object.type
)
if not objects_list:
objects_list = []
objects_list.append(tiled_object)
return sprite_list or None, objects_list or None
def load_tilemap(
map_file: Union[str, Path],
scaling: float = 1.0,
layer_options: Optional[Dict[str, Dict[str, Any]]] = None,
use_spatial_hash: Optional[bool] = None,
hit_box_algorithm: str = "Simple",
hit_box_detail: float = 4.5,
) -> TileMap:
return TileMap(
map_file,
scaling,
layer_options,
use_spatial_hash,
hit_box_algorithm,
hit_box_detail,
)
def read_tmx(map_file: Union[str, Path]) -> pytiled_parser.TiledMap:
raise DeprecationWarning(
"The read_tmx function has been replaced by the new TileMap class."
)
| true | true |
1c4a3d45e3fd189c77bba8dbb394542a0543752f | 303 | py | Python | Achive/rpyc_MyService.py | rscd27p/DockerTest | aee56356f7cdaded1c6ef787e6cdf8415308c8c3 | [
"MIT"
] | 1 | 2021-08-30T14:22:15.000Z | 2021-08-30T14:22:15.000Z | Achive/rpyc_MyService.py | rscd27p/DockerTest | aee56356f7cdaded1c6ef787e6cdf8415308c8c3 | [
"MIT"
] | null | null | null | Achive/rpyc_MyService.py | rscd27p/DockerTest | aee56356f7cdaded1c6ef787e6cdf8415308c8c3 | [
"MIT"
] | null | null | null | import rpyc
import nidmm
class MyService(rpyc.Service):
exposed_nidmm = nidmm
if __name__ == "__main__":
from rpyc.utils.server import ThreadedServer
t = ThreadedServer(MyService, port = 18861, protocol_config = {"allow_public_attrs" : True, "allow_all_attrs" : True})
t.start()
| 27.545455 | 122 | 0.712871 | import rpyc
import nidmm
class MyService(rpyc.Service):
exposed_nidmm = nidmm
if __name__ == "__main__":
from rpyc.utils.server import ThreadedServer
t = ThreadedServer(MyService, port = 18861, protocol_config = {"allow_public_attrs" : True, "allow_all_attrs" : True})
t.start()
| true | true |
1c4a3d7c38a7fb4505e3308b90c0223962601a64 | 8,701 | py | Python | test/test.py | ahcorde/srdfdom | 1fb929e4aab58f5fe889d4bf2b4e330423605632 | [
"BSD-3-Clause"
] | null | null | null | test/test.py | ahcorde/srdfdom | 1fb929e4aab58f5fe889d4bf2b4e330423605632 | [
"BSD-3-Clause"
] | 1 | 2020-01-13T15:58:39.000Z | 2020-01-13T16:32:08.000Z | test/test.py | ahcorde/srdfdom | 1fb929e4aab58f5fe889d4bf2b4e330423605632 | [
"BSD-3-Clause"
] | 1 | 2019-12-31T16:07:31.000Z | 2019-12-31T16:07:31.000Z | #!/usr/bin/env python
PKG = 'srdfdom'
import sys
import rospkg
import unittest
from srdfdom.srdf import SRDF
from xml.dom.minidom import parseString
import xml.dom
# xml match code from test_xacro.py
# by Stuart Glaser and William Woodall
def first_child_element(elt):
c = elt.firstChild
while c:
if c.nodeType == xml.dom.Node.ELEMENT_NODE:
return c
c = c.nextSibling
return None
def next_sibling_element(elt):
c = elt.nextSibling
while c:
if c.nodeType == xml.dom.Node.ELEMENT_NODE:
return c
c = c.nextSibling
return None
def all_attributes_match(a, b):
if len(a.attributes) != len(b.attributes):
print("Different number of attributes")
return False
a_atts = [(a.attributes.item(i).name, a.attributes.item(i).value) for i in range(len(a.attributes))]
b_atts = [(b.attributes.item(i).name, b.attributes.item(i).value) for i in range(len(b.attributes))]
a_atts.sort()
b_atts.sort()
for i in range(len(a_atts)):
if a_atts[i][0] != b_atts[i][0]:
print("Different attribute names: %s and %s" % (a_atts[i][0], b_atts[i][0]))
return False
try:
if abs(float(a_atts[i][1]) - float(b_atts[i][1])) > 1.0e-9:
print("Different attribute values: %s and %s" % (a_atts[i][1], b_atts[i][1]))
return False
except ValueError: # Attribute values aren't numeric
if a_atts[i][1] != b_atts[i][1]:
print("Different attribute values: %s and %s" % (a_atts[i][1], b_atts[i][1]))
return False
return True
def elements_match(a, b):
if not a and not b:
return True
if not a or not b:
return False
if a.nodeType != b.nodeType:
print("Different node types: %d and %d" % (a.nodeType, b.nodeType))
return False
if a.nodeName != b.nodeName:
print("Different element names: %s and %s" % (a.nodeName, b.nodeName))
return False
if not all_attributes_match(a, b):
return False
if not elements_match(first_child_element(a), first_child_element(b)):
return False
if not elements_match(next_sibling_element(a), next_sibling_element(b)):
return False
return True
def xml_matches(a, b):
if isinstance(a, str) or isinstance(a, unicode):
return xml_matches(parseString(a).documentElement, b)
if isinstance(b, str) or isinstance(b, unicode):
return xml_matches(a, parseString(b).documentElement)
if a.nodeType == xml.dom.Node.DOCUMENT_NODE:
return xml_matches(a.documentElement, b)
if b.nodeType == xml.dom.Node.DOCUMENT_NODE:
return xml_matches(a, b.documentElement)
if not elements_match(a, b):
print("Match failed:")
a.writexml(sys.stdout)
print
print('=' * 78)
b.writexml(sys.stdout)
return False
return True
## A python unit test for srdf
class TestSRDFParser(unittest.TestCase):
## test valid srdf
def test_full_srdf(self):
srdf_data = '''
<robot name="myrobot">
<group name="body">
<joint name="J1" />
<joint name="J2" />
<joint name="J3" />
<chain base_link="robot_base" tip_link="robot_tip" />
<group name="arm" />
</group>
<group_state name="zero" group="body">
<joint name="J1" value="0" />
<joint name="J2" value="0" />
<joint name="J3" value="0" />
</group_state>
<end_effector name="tip_ee" parent_link="tip" group="arm" parent_group="body" />
<end_effector name="othertip_ee" parent_link="othertip" group="arm" />
<virtual_joint name="virtual_joint" type="floating" parent_frame="body_frame" child_link="arm" />
<disable_collisions link1="link1" link2="link3" />
<disable_collisions reason="Adjacent" link1="link1" link2="link2" />
<link_sphere_approximation link="link1" />
<link_sphere_approximation link="link2" >
<sphere center="1.0 2.0 3.0" radius="1.0" />
<sphere center="1.0 2.0 4.0" radius="2.0" />
</link_sphere_approximation>
</robot>
'''
expected = '''
<robot name="myrobot">
<group name="body">
<joint name="J1" />
<joint name="J2" />
<joint name="J3" />
<chain base_link="robot_base" tip_link="robot_tip"/>
<group name="arm" />
</group>
<group_state name="zero" group="body">
<joint name="J1" value="0" />
<joint name="J2" value="0" />
<joint name="J3" value="0" />
</group_state>
<end_effector group="arm" name="tip_ee" parent_group="body" parent_link="tip"/>
<end_effector name="othertip_ee" parent_link="othertip" group="arm" />
<virtual_joint child_link="arm" name="virtual_joint" parent_frame="body_frame" type="floating" />
<disable_collisions link1="link1" link2="link3" />
<disable_collisions link1="link1" link2="link2" reason="Adjacent" />
<link_sphere_approximation link="link1" />
<link_sphere_approximation link="link2" >
<sphere center="1.0 2.0 3.0" radius="1.0" />
<sphere center="1.0 2.0 4.0" radius="2.0" />
</link_sphere_approximation>
</robot>
'''
robot = SRDF.from_xml_string(srdf_data)
self.assertTrue(xml_matches(robot.to_xml_string(),expected))
def test_simple_srdf(self):
datadir=rospkg.RosPack().get_path('srdfdom')+"/test/resources/"
stream = open(datadir+'pr2_desc.1.srdf', 'r')
robot = SRDF.from_xml_string(stream.read())
stream.close()
self.assertTrue(len(robot.virtual_joints)==0)
self.assertTrue(len(robot.groups)==0)
self.assertTrue(len(robot.group_states)==0)
self.assertTrue(len(robot.disable_collisionss)==0)
self.assertTrue(len(robot.end_effectors)==0)
stream = open(datadir+'pr2_desc.2.srdf', 'r')
robot = SRDF.from_xml_string(stream.read())
stream.close()
self.assertTrue(len(robot.virtual_joints)==1)
self.assertTrue(len(robot.groups)==1)
self.assertTrue(len(robot.group_states)==0)
self.assertTrue(len(robot.disable_collisionss)==0)
self.assertTrue(len(robot.end_effectors)==0)
def test_complex_srdf(self):
datadir=rospkg.RosPack().get_path('srdfdom')+"/test/resources/"
stream = open(datadir+'pr2_desc.3.srdf', 'r')
robot = SRDF.from_xml_string(stream.read())
stream.close()
self.assertTrue(len(robot.virtual_joints)==1)
self.assertTrue(len(robot.groups)==7)
self.assertTrue(len(robot.group_states)==2)
self.assertTrue(len(robot.disable_collisionss)==2)
self.assertTrue(robot.disable_collisionss[0].reason=="adjacent")
self.assertTrue(len(robot.end_effectors)==2)
self.assertTrue(robot.virtual_joints[0].name=="world_joint")
self.assertTrue(robot.virtual_joints[0].type=="planar")
for group in robot.groups:
if (group.name == "left_arm" or group.name == "right_arm" ):
self.assertTrue(len(group.chains)==1)
if group.name == "arms":
self.assertTrue(len(group.subgroups)==2)
if group.name == "base":
self.assertTrue(len(group.joints)==1)
if (group.name == "l_end_effector" or group.name == "r_end_effector" ):
self.assertTrue(len(group.links)==1)
self.assertTrue(len(group.joints)==9)
if group.name == "whole_body" :
self.assertTrue(len(group.joints)==1)
self.assertTrue(len(group.subgroups)==2)
index=0
if robot.group_states[0].group !="arms":
index=1
self.assertTrue(robot.group_states[index].group =="arms")
self.assertTrue(robot.group_states[index].name =="tuck_arms")
self.assertTrue(robot.group_states[1-index].group =="base")
self.assertTrue(robot.group_states[1-index].name =="home")
v=next((joint.value for joint in robot.group_states[index].joints if joint.name=="l_shoulder_pan_joint"),None)
self.assertTrue(len(v) == 1)
self.assertTrue(v[0] ==0.2)
w=next((joint.value for joint in robot.group_states[1-index].joints if joint.name=="world_joint"),None)
self.assertTrue(len(w) == 3)
self.assertTrue(w[0] ==0.4)
self.assertTrue(w[1] ==0)
self.assertTrue(w[2] ==-1)
index = 0 if (robot.end_effectors[0].name[0] == 'r') else 1
self.assertTrue(robot.end_effectors[index].name == 'r_end_effector')
self.assertTrue(robot.end_effectors[index].group == 'r_end_effector')
self.assertTrue(robot.end_effectors[index].parent_link == 'r_wrist_roll_link')
if __name__ == '__main__':
import rostest
rostest.rosrun(PKG, 'srdf_python_parser_test', TestSRDFParser)
| 38.5 | 120 | 0.635329 | PKG = 'srdfdom'
import sys
import rospkg
import unittest
from srdfdom.srdf import SRDF
from xml.dom.minidom import parseString
import xml.dom
def first_child_element(elt):
c = elt.firstChild
while c:
if c.nodeType == xml.dom.Node.ELEMENT_NODE:
return c
c = c.nextSibling
return None
def next_sibling_element(elt):
c = elt.nextSibling
while c:
if c.nodeType == xml.dom.Node.ELEMENT_NODE:
return c
c = c.nextSibling
return None
def all_attributes_match(a, b):
if len(a.attributes) != len(b.attributes):
print("Different number of attributes")
return False
a_atts = [(a.attributes.item(i).name, a.attributes.item(i).value) for i in range(len(a.attributes))]
b_atts = [(b.attributes.item(i).name, b.attributes.item(i).value) for i in range(len(b.attributes))]
a_atts.sort()
b_atts.sort()
for i in range(len(a_atts)):
if a_atts[i][0] != b_atts[i][0]:
print("Different attribute names: %s and %s" % (a_atts[i][0], b_atts[i][0]))
return False
try:
if abs(float(a_atts[i][1]) - float(b_atts[i][1])) > 1.0e-9:
print("Different attribute values: %s and %s" % (a_atts[i][1], b_atts[i][1]))
return False
except ValueError: if a_atts[i][1] != b_atts[i][1]:
print("Different attribute values: %s and %s" % (a_atts[i][1], b_atts[i][1]))
return False
return True
def elements_match(a, b):
if not a and not b:
return True
if not a or not b:
return False
if a.nodeType != b.nodeType:
print("Different node types: %d and %d" % (a.nodeType, b.nodeType))
return False
if a.nodeName != b.nodeName:
print("Different element names: %s and %s" % (a.nodeName, b.nodeName))
return False
if not all_attributes_match(a, b):
return False
if not elements_match(first_child_element(a), first_child_element(b)):
return False
if not elements_match(next_sibling_element(a), next_sibling_element(b)):
return False
return True
def xml_matches(a, b):
if isinstance(a, str) or isinstance(a, unicode):
return xml_matches(parseString(a).documentElement, b)
if isinstance(b, str) or isinstance(b, unicode):
return xml_matches(a, parseString(b).documentElement)
if a.nodeType == xml.dom.Node.DOCUMENT_NODE:
return xml_matches(a.documentElement, b)
if b.nodeType == xml.dom.Node.DOCUMENT_NODE:
return xml_matches(a, b.documentElement)
if not elements_match(a, b):
print("Match failed:")
a.writexml(sys.stdout)
print
print('=' * 78)
b.writexml(sys.stdout)
return False
return True
## A python unit test for srdf
class TestSRDFParser(unittest.TestCase):
## test valid srdf
def test_full_srdf(self):
srdf_data = '''
<robot name="myrobot">
<group name="body">
<joint name="J1" />
<joint name="J2" />
<joint name="J3" />
<chain base_link="robot_base" tip_link="robot_tip" />
<group name="arm" />
</group>
<group_state name="zero" group="body">
<joint name="J1" value="0" />
<joint name="J2" value="0" />
<joint name="J3" value="0" />
</group_state>
<end_effector name="tip_ee" parent_link="tip" group="arm" parent_group="body" />
<end_effector name="othertip_ee" parent_link="othertip" group="arm" />
<virtual_joint name="virtual_joint" type="floating" parent_frame="body_frame" child_link="arm" />
<disable_collisions link1="link1" link2="link3" />
<disable_collisions reason="Adjacent" link1="link1" link2="link2" />
<link_sphere_approximation link="link1" />
<link_sphere_approximation link="link2" >
<sphere center="1.0 2.0 3.0" radius="1.0" />
<sphere center="1.0 2.0 4.0" radius="2.0" />
</link_sphere_approximation>
</robot>
'''
expected = '''
<robot name="myrobot">
<group name="body">
<joint name="J1" />
<joint name="J2" />
<joint name="J3" />
<chain base_link="robot_base" tip_link="robot_tip"/>
<group name="arm" />
</group>
<group_state name="zero" group="body">
<joint name="J1" value="0" />
<joint name="J2" value="0" />
<joint name="J3" value="0" />
</group_state>
<end_effector group="arm" name="tip_ee" parent_group="body" parent_link="tip"/>
<end_effector name="othertip_ee" parent_link="othertip" group="arm" />
<virtual_joint child_link="arm" name="virtual_joint" parent_frame="body_frame" type="floating" />
<disable_collisions link1="link1" link2="link3" />
<disable_collisions link1="link1" link2="link2" reason="Adjacent" />
<link_sphere_approximation link="link1" />
<link_sphere_approximation link="link2" >
<sphere center="1.0 2.0 3.0" radius="1.0" />
<sphere center="1.0 2.0 4.0" radius="2.0" />
</link_sphere_approximation>
</robot>
'''
robot = SRDF.from_xml_string(srdf_data)
self.assertTrue(xml_matches(robot.to_xml_string(),expected))
def test_simple_srdf(self):
datadir=rospkg.RosPack().get_path('srdfdom')+"/test/resources/"
stream = open(datadir+'pr2_desc.1.srdf', 'r')
robot = SRDF.from_xml_string(stream.read())
stream.close()
self.assertTrue(len(robot.virtual_joints)==0)
self.assertTrue(len(robot.groups)==0)
self.assertTrue(len(robot.group_states)==0)
self.assertTrue(len(robot.disable_collisionss)==0)
self.assertTrue(len(robot.end_effectors)==0)
stream = open(datadir+'pr2_desc.2.srdf', 'r')
robot = SRDF.from_xml_string(stream.read())
stream.close()
self.assertTrue(len(robot.virtual_joints)==1)
self.assertTrue(len(robot.groups)==1)
self.assertTrue(len(robot.group_states)==0)
self.assertTrue(len(robot.disable_collisionss)==0)
self.assertTrue(len(robot.end_effectors)==0)
def test_complex_srdf(self):
datadir=rospkg.RosPack().get_path('srdfdom')+"/test/resources/"
stream = open(datadir+'pr2_desc.3.srdf', 'r')
robot = SRDF.from_xml_string(stream.read())
stream.close()
self.assertTrue(len(robot.virtual_joints)==1)
self.assertTrue(len(robot.groups)==7)
self.assertTrue(len(robot.group_states)==2)
self.assertTrue(len(robot.disable_collisionss)==2)
self.assertTrue(robot.disable_collisionss[0].reason=="adjacent")
self.assertTrue(len(robot.end_effectors)==2)
self.assertTrue(robot.virtual_joints[0].name=="world_joint")
self.assertTrue(robot.virtual_joints[0].type=="planar")
for group in robot.groups:
if (group.name == "left_arm" or group.name == "right_arm" ):
self.assertTrue(len(group.chains)==1)
if group.name == "arms":
self.assertTrue(len(group.subgroups)==2)
if group.name == "base":
self.assertTrue(len(group.joints)==1)
if (group.name == "l_end_effector" or group.name == "r_end_effector" ):
self.assertTrue(len(group.links)==1)
self.assertTrue(len(group.joints)==9)
if group.name == "whole_body" :
self.assertTrue(len(group.joints)==1)
self.assertTrue(len(group.subgroups)==2)
index=0
if robot.group_states[0].group !="arms":
index=1
self.assertTrue(robot.group_states[index].group =="arms")
self.assertTrue(robot.group_states[index].name =="tuck_arms")
self.assertTrue(robot.group_states[1-index].group =="base")
self.assertTrue(robot.group_states[1-index].name =="home")
v=next((joint.value for joint in robot.group_states[index].joints if joint.name=="l_shoulder_pan_joint"),None)
self.assertTrue(len(v) == 1)
self.assertTrue(v[0] ==0.2)
w=next((joint.value for joint in robot.group_states[1-index].joints if joint.name=="world_joint"),None)
self.assertTrue(len(w) == 3)
self.assertTrue(w[0] ==0.4)
self.assertTrue(w[1] ==0)
self.assertTrue(w[2] ==-1)
index = 0 if (robot.end_effectors[0].name[0] == 'r') else 1
self.assertTrue(robot.end_effectors[index].name == 'r_end_effector')
self.assertTrue(robot.end_effectors[index].group == 'r_end_effector')
self.assertTrue(robot.end_effectors[index].parent_link == 'r_wrist_roll_link')
if __name__ == '__main__':
import rostest
rostest.rosrun(PKG, 'srdf_python_parser_test', TestSRDFParser)
| true | true |
1c4a3e0601d9a72176eb416461930fed869c830b | 1,543 | py | Python | zvt/domain/fundamental/valuation.py | aaron8tang/zvt | 568cf0d42577eb05b96e1a07ec512aed34245b2d | [
"MIT"
] | null | null | null | zvt/domain/fundamental/valuation.py | aaron8tang/zvt | 568cf0d42577eb05b96e1a07ec512aed34245b2d | [
"MIT"
] | null | null | null | zvt/domain/fundamental/valuation.py | aaron8tang/zvt | 568cf0d42577eb05b96e1a07ec512aed34245b2d | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from sqlalchemy import Column, String, Float
from sqlalchemy.orm import declarative_base
from zvt.contract import Mixin
from zvt.contract.register import register_schema
ValuationBase = declarative_base()
class StockValuation(ValuationBase, Mixin):
"""
股票估值相关的数据,比如PE、市净率等。
"""
__tablename__ = 'stock_valuation'
code = Column(String(length=32))
name = Column(String(length=32))
# 总股本(股)
capitalization = Column(Float)
# 公司已发行的普通股股份总数(包含A股,B股和H股的总股本)
circulating_cap = Column(Float)
# 市值
market_cap = Column(Float)
# 流通市值
circulating_market_cap = Column(Float)
# 换手率
turnover_ratio = Column(Float)
# 静态pe
pe = Column(Float)
# 动态pe
pe_ttm = Column(Float)
# 市净率
pb = Column(Float)
# 市销率
ps = Column(Float)
# 市现率
pcf = Column(Float)
class EtfValuation(ValuationBase, Mixin):
__tablename__ = 'etf_valuation'
code = Column(String(length=32))
name = Column(String(length=32))
# 静态pe
pe = Column(Float)
# 加权
pe1 = Column(Float)
# 动态pe
pe_ttm = Column(Float)
# 加权
pe_ttm1 = Column(Float)
# 市净率
pb = Column(Float)
# 加权
pb1 = Column(Float)
# 市销率
ps = Column(Float)
# 加权
ps1 = Column(Float)
# 市现率
pcf = Column(Float)
# 加权
pcf1 = Column(Float)
register_schema(providers=['joinquant'], db_name='valuation', schema_base=ValuationBase, entity_type='stock')
# the __all__ is generated
__all__ = ['StockValuation', 'EtfValuation'] | 21.732394 | 109 | 0.644848 | from sqlalchemy import Column, String, Float
from sqlalchemy.orm import declarative_base
from zvt.contract import Mixin
from zvt.contract.register import register_schema
ValuationBase = declarative_base()
class StockValuation(ValuationBase, Mixin):
__tablename__ = 'stock_valuation'
code = Column(String(length=32))
name = Column(String(length=32))
capitalization = Column(Float)
circulating_cap = Column(Float)
market_cap = Column(Float)
circulating_market_cap = Column(Float)
turnover_ratio = Column(Float)
pe = Column(Float)
pe_ttm = Column(Float)
pb = Column(Float)
ps = Column(Float)
pcf = Column(Float)
class EtfValuation(ValuationBase, Mixin):
__tablename__ = 'etf_valuation'
code = Column(String(length=32))
name = Column(String(length=32))
pe = Column(Float)
pe1 = Column(Float)
pe_ttm = Column(Float)
pe_ttm1 = Column(Float)
pb = Column(Float)
pb1 = Column(Float)
ps = Column(Float)
ps1 = Column(Float)
pcf = Column(Float)
pcf1 = Column(Float)
register_schema(providers=['joinquant'], db_name='valuation', schema_base=ValuationBase, entity_type='stock')
__all__ = ['StockValuation', 'EtfValuation'] | true | true |
1c4a3e27ced94a73850bd98d2b3abc4188cff8f8 | 42 | py | Python | frappe/public/hello.py | process-success/frappeModifyed | 17031bdf5bb81d335d47aa3028a36be288bd9a1a | [
"MIT"
] | null | null | null | frappe/public/hello.py | process-success/frappeModifyed | 17031bdf5bb81d335d47aa3028a36be288bd9a1a | [
"MIT"
] | null | null | null | frappe/public/hello.py | process-success/frappeModifyed | 17031bdf5bb81d335d47aa3028a36be288bd9a1a | [
"MIT"
] | 1 | 2018-03-21T18:34:08.000Z | 2018-03-21T18:34:08.000Z | #!/usr/bin/python2.7
print("Hello world") | 21 | 21 | 0.690476 | print("Hello world") | true | true |
1c4a3fee5a6e45dc07ce48fd0fa054883c373055 | 144 | py | Python | main.py | alwye/snake-game | dd35388576a4e44b0426cac67b08fed4cada7aa2 | [
"MIT"
] | null | null | null | main.py | alwye/snake-game | dd35388576a4e44b0426cac67b08fed4cada7aa2 | [
"MIT"
] | null | null | null | main.py | alwye/snake-game | dd35388576a4e44b0426cac67b08fed4cada7aa2 | [
"MIT"
] | null | null | null | """
Famous snake game
"""
from settings import *
from game import SnakeGame
if __name__ == "__main__":
game = SnakeGame()
game.run() | 12 | 26 | 0.652778 |
from settings import *
from game import SnakeGame
if __name__ == "__main__":
game = SnakeGame()
game.run() | true | true |
1c4a41169b1b0f2b1b6e98f644def8bec59c8f6f | 616 | py | Python | tests/test_replace_intensity.py | elsandal/pyclesperanto_prototype | 7bda828813b86b44b63d73d5e8f466d9769cded1 | [
"BSD-3-Clause"
] | 64 | 2020-03-18T12:11:22.000Z | 2022-03-31T08:19:18.000Z | tests/test_replace_intensity.py | elsandal/pyclesperanto_prototype | 7bda828813b86b44b63d73d5e8f466d9769cded1 | [
"BSD-3-Clause"
] | 148 | 2020-05-14T06:14:11.000Z | 2022-03-26T15:02:31.000Z | tests/test_replace_intensity.py | elsandal/pyclesperanto_prototype | 7bda828813b86b44b63d73d5e8f466d9769cded1 | [
"BSD-3-Clause"
] | 16 | 2020-05-31T00:53:44.000Z | 2022-03-23T13:20:57.000Z | import pyclesperanto_prototype as cle
import numpy as np
def test_replace_intensity():
test1 = cle.push(np.asarray([
[0, 0, 0, 0, 0],
[0, 1, 2, 3, 0],
[0, 2, 3, 4, 0],
[0, 4, 4, 5, 0],
[0, 0, 0, 0, 0]
]))
reference = cle.push(np.asarray([
[0, 0, 0, 0, 0],
[0, 1, 8, 3, 0],
[0, 8, 3, 4, 0],
[0, 4, 4, 5, 0],
[0, 0, 0, 0, 0]
]))
result = cle.create(test1)
cle.replace_intensity(test1, result, 2, 8)
print(result)
a = cle.pull(result)
b = cle.pull(reference)
assert (np.allclose(a, b, 0.001))
| 20.533333 | 46 | 0.465909 | import pyclesperanto_prototype as cle
import numpy as np
def test_replace_intensity():
test1 = cle.push(np.asarray([
[0, 0, 0, 0, 0],
[0, 1, 2, 3, 0],
[0, 2, 3, 4, 0],
[0, 4, 4, 5, 0],
[0, 0, 0, 0, 0]
]))
reference = cle.push(np.asarray([
[0, 0, 0, 0, 0],
[0, 1, 8, 3, 0],
[0, 8, 3, 4, 0],
[0, 4, 4, 5, 0],
[0, 0, 0, 0, 0]
]))
result = cle.create(test1)
cle.replace_intensity(test1, result, 2, 8)
print(result)
a = cle.pull(result)
b = cle.pull(reference)
assert (np.allclose(a, b, 0.001))
| true | true |
1c4a42653316a4744277d052b7656cb48a32dd27 | 6,951 | py | Python | api/batch_processing/postprocessing/combine_api_outputs.py | alsnothome/CameraTraps | fddd64d5e1a941bb46557bec09af7091da334cf4 | [
"MIT"
] | null | null | null | api/batch_processing/postprocessing/combine_api_outputs.py | alsnothome/CameraTraps | fddd64d5e1a941bb46557bec09af7091da334cf4 | [
"MIT"
] | 1 | 2021-02-24T00:17:21.000Z | 2021-02-24T00:17:21.000Z | api/batch_processing/postprocessing/combine_api_outputs.py | isabella232/CameraTraps | 8a01a191cd061deac3aa8ab9edf89b210a89a0be | [
"MIT"
] | null | null | null | """
Merges two or more .json files in batch API output format, optionally
writing the results to another .json file.
- Concatenates image lists, erroring if images are not unique.
- Errors if class lists are conflicting; errors on unrecognized fields.
- Checks compatibility in info structs, within reason.
File format:
https://github.com/microsoft/CameraTraps/tree/master/api/batch_processing#batch-processing-api-output-format
Command-line use:
combine_api_outputs input1.json input2.json ... inputN.json output.json
Also see combine_api_shard_files() (not exposed via the command line yet) to
combine the intermediate files created by the API.
"""
#%% Constants and imports
import argparse
import json
from typing import Any, Dict, Iterable, Mapping, List, Optional
#%% Merge functions
def combine_api_output_files(input_files: List[str],
output_file: Optional[str] = None,
require_uniqueness: bool = True
) -> Dict[str, Any]:
"""Merges list of JSON API detection files *input_files* into a single
dictionary, optionally writing the result to *output_file*.
Args:
input_files: list of str, paths to JSON detection files
output_file: optional str, path to write merged JSON
require_uniqueness: bool, TODO
"""
input_dicts = []
print('Loading input files')
for fn in input_files:
with open(fn, 'r') as f:
input_dicts.append(json.load(f))
print('Merging results')
merged_dict = combine_api_output_dictionaries(
input_dicts, require_uniqueness=require_uniqueness)
print('Writing output')
if output_file is not None:
with open(output_file, 'w') as f:
json.dump(merged_dict, f, indent=1)
return merged_dict
def combine_api_output_dictionaries(input_dicts: Iterable[Mapping[str, Any]],
require_uniqueness: bool = True
) -> Dict[str, Any]:
"""Merges the list of API detection dictionaries *input_dicts*. See header
comment for details on merge rules.
Args:
input_dicts: list of dicts, each dict is the JSON of the detections
output file from the Batch Processing API
require_uniqueness: bool, whether to require that the images in
each input_dict be unique
Returns: dict, represents the merged JSON
"""
# Map image filenames to detections, we'll convert to a list later
images = {}
info: Dict[str, str] = {}
detection_categories: Dict[str, str] = {}
classification_categories: Dict[str, str] = {}
n_redundant_images = 0
n_images = 0
known_fields = ['info', 'detection_categories', 'classification_categories',
'images']
for input_dict in input_dicts:
for k in input_dict:
if k not in known_fields:
raise ValueError(f'Unrecognized API output field: {k}')
# Check compatibility of detection categories
for cat_id in input_dict['detection_categories']:
cat_name = input_dict['detection_categories'][cat_id]
if cat_id in detection_categories:
assert detection_categories[cat_id] == cat_name, (
'Detection category mismatch')
else:
detection_categories[cat_id] = cat_name
# Check compatibility of classification categories
if 'classification_categories' in input_dict:
for cat_id in input_dict['classification_categories']:
cat_name = input_dict['classification_categories'][cat_id]
if cat_id in classification_categories:
assert classification_categories[cat_id] == cat_name, (
'Classification category mismatch')
else:
classification_categories[cat_id] = cat_name
# Merge image lists, checking uniqueness
for im in input_dict['images']:
im_file = im['file']
if require_uniqueness:
assert im_file not in images, f'Duplicate image: {im_file}'
elif im_file in images:
n_redundant_images += 1
# print(f'Warning, duplicate results for image: {im_file}')
images[im_file] = im
n_images += 1
# Merge info dicts, don't check completion time fields
if len(info) == 0:
info = input_dict['info']
else:
info_compare = input_dict['info']
assert info_compare['detector'] == info['detector'], (
'Incompatible detection versions in merging')
assert info_compare['format_version'] == info['format_version'], (
'Incompatible API output versions in merging')
if 'classifier' in info_compare:
if 'classifier' in info:
assert info['classifier'] == info_compare['classifier']
else:
info['classifier'] = info_compare['classifier']
# ...for each dictionary
if n_redundant_images > 0:
print(f'Warning: found {n_redundant_images} redundant images '
f'(out of {n_images} total) during merge')
# Convert merged image dictionaries to a sorted list
sorted_images = sorted(images.values(), key=lambda im: im['file'])
merged_dict = {'info': info,
'detection_categories': detection_categories,
'classification_categories': classification_categories,
'images': sorted_images}
return merged_dict
def combine_api_shard_files(input_files, output_file=None):
"""
Merges the list of .json-formatted API shard files *input_files* into a single
list of dictionaries, optionally writing the result to *output_file*.
"""
input_lists = []
print('Loading input files')
for fn in input_files:
input_lists.append(json.load(open(fn)))
detections = []
# detection_list = input_lists[0]
for detection_list in input_lists:
assert isinstance(detection_list, list)
# d = detection_list[0]
for d in detection_list:
assert 'file' in d
assert 'max_detection_conf' in d
assert 'detections' in d
detections.extend([d])
print('Writing output')
if output_file is not None:
with open(output_file, 'w') as f:
json.dump(detections, f, indent=1)
return detections
#%% Driver
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'input_paths', nargs='+',
help='List of input .json files')
parser.add_argument(
'output_path',
help='Output .json file')
args = parser.parse_args()
combine_api_output_files(args.input_paths, args.output_path)
if __name__ == '__main__':
main()
| 35.464286 | 108 | 0.630269 |
import argparse
import json
from typing import Any, Dict, Iterable, Mapping, List, Optional
def combine_api_output_files(input_files: List[str],
output_file: Optional[str] = None,
require_uniqueness: bool = True
) -> Dict[str, Any]:
input_dicts = []
print('Loading input files')
for fn in input_files:
with open(fn, 'r') as f:
input_dicts.append(json.load(f))
print('Merging results')
merged_dict = combine_api_output_dictionaries(
input_dicts, require_uniqueness=require_uniqueness)
print('Writing output')
if output_file is not None:
with open(output_file, 'w') as f:
json.dump(merged_dict, f, indent=1)
return merged_dict
def combine_api_output_dictionaries(input_dicts: Iterable[Mapping[str, Any]],
require_uniqueness: bool = True
) -> Dict[str, Any]:
images = {}
info: Dict[str, str] = {}
detection_categories: Dict[str, str] = {}
classification_categories: Dict[str, str] = {}
n_redundant_images = 0
n_images = 0
known_fields = ['info', 'detection_categories', 'classification_categories',
'images']
for input_dict in input_dicts:
for k in input_dict:
if k not in known_fields:
raise ValueError(f'Unrecognized API output field: {k}')
# Check compatibility of detection categories
for cat_id in input_dict['detection_categories']:
cat_name = input_dict['detection_categories'][cat_id]
if cat_id in detection_categories:
assert detection_categories[cat_id] == cat_name, (
'Detection category mismatch')
else:
detection_categories[cat_id] = cat_name
# Check compatibility of classification categories
if 'classification_categories' in input_dict:
for cat_id in input_dict['classification_categories']:
cat_name = input_dict['classification_categories'][cat_id]
if cat_id in classification_categories:
assert classification_categories[cat_id] == cat_name, (
'Classification category mismatch')
else:
classification_categories[cat_id] = cat_name
# Merge image lists, checking uniqueness
for im in input_dict['images']:
im_file = im['file']
if require_uniqueness:
assert im_file not in images, f'Duplicate image: {im_file}'
elif im_file in images:
n_redundant_images += 1
# print(f'Warning, duplicate results for image: {im_file}')
images[im_file] = im
n_images += 1
# Merge info dicts, don't check completion time fields
if len(info) == 0:
info = input_dict['info']
else:
info_compare = input_dict['info']
assert info_compare['detector'] == info['detector'], (
'Incompatible detection versions in merging')
assert info_compare['format_version'] == info['format_version'], (
'Incompatible API output versions in merging')
if 'classifier' in info_compare:
if 'classifier' in info:
assert info['classifier'] == info_compare['classifier']
else:
info['classifier'] = info_compare['classifier']
if n_redundant_images > 0:
print(f'Warning: found {n_redundant_images} redundant images '
f'(out of {n_images} total) during merge')
sorted_images = sorted(images.values(), key=lambda im: im['file'])
merged_dict = {'info': info,
'detection_categories': detection_categories,
'classification_categories': classification_categories,
'images': sorted_images}
return merged_dict
def combine_api_shard_files(input_files, output_file=None):
input_lists = []
print('Loading input files')
for fn in input_files:
input_lists.append(json.load(open(fn)))
detections = []
for detection_list in input_lists:
assert isinstance(detection_list, list)
for d in detection_list:
assert 'file' in d
assert 'max_detection_conf' in d
assert 'detections' in d
detections.extend([d])
print('Writing output')
if output_file is not None:
with open(output_file, 'w') as f:
json.dump(detections, f, indent=1)
return detections
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'input_paths', nargs='+',
help='List of input .json files')
parser.add_argument(
'output_path',
help='Output .json file')
args = parser.parse_args()
combine_api_output_files(args.input_paths, args.output_path)
if __name__ == '__main__':
main()
| true | true |
1c4a43254129f6dce18adb2aab2ca72a6cf10471 | 3,836 | py | Python | tests/core/contracts/test_contract_method_to_argument_matching.py | y19818/web3.py | 32a85a287ab63220d1e0c06d77be74de595ff02f | [
"MIT"
] | null | null | null | tests/core/contracts/test_contract_method_to_argument_matching.py | y19818/web3.py | 32a85a287ab63220d1e0c06d77be74de595ff02f | [
"MIT"
] | null | null | null | tests/core/contracts/test_contract_method_to_argument_matching.py | y19818/web3.py | 32a85a287ab63220d1e0c06d77be74de595ff02f | [
"MIT"
] | null | null | null | import json
import pytest
from web3._utils.abi import (
get_abi_input_types,
)
from web3._utils.function_identifiers import (
FallbackFn,
)
from web3.exceptions import (
ValidationError,
)
SINGLE_FN_NO_ARGS = json.loads('[{"constant":false,"inputs":[],"name":"a","outputs":[],"type":"function"}]') # noqa: E501
SINGLE_FN_ONE_ARG = json.loads('[{"constant":false,"inputs":[{"name":"","type":"uint256"}],"name":"a","outputs":[],"type":"function"}]') # noqa: E501
FALLBACK_FUNCTION = json.loads('[{"constant": false, "inputs": [], "name": "getData", "outputs": [{"name": "r", "type": "uint256"}], "payable": false, "stateMutability": "nonpayable", "type": "function"}, {"payable": false, "stateMutability": "nonpayable", "type": "fallback"}]') # noqa: E501
MULTIPLE_FUNCTIONS = json.loads('''
[
{
"constant": false,
"inputs": [],
"name": "a",
"outputs": [],
"type": "function"
},
{
"constant": false,
"inputs": [
{
"name": "",
"type": "bytes32"
}
],
"name": "a",
"outputs": [],
"type": "function"
},
{
"constant": false,
"inputs": [
{
"name": "",
"type": "uint256"
}
],
"name": "a",
"outputs": [],
"type": "function"
},
{
"constant": false,
"inputs": [
{
"name": "",
"type": "uint8"
}
],
"name": "a",
"outputs": [],
"type": "function"
},
{
"constant": false,
"inputs": [
{
"name": "",
"type": "int8"
}
],
"name": "a",
"outputs": [],
"type": "function"
},
{
"constant": false,
"inputs": [
{
"name": "",
"type": "tuple[]",
"components": [
{"name": "", "type": "int256"},
{"name": "", "type": "bool"}
]
}
],
"name": "a",
"outputs": [],
"type": "function"
}
]
''')
def test_finds_single_function_without_args(web3):
Contract = web3.vns.contract(abi=SINGLE_FN_NO_ARGS)
abi = Contract._find_matching_fn_abi('a', [])
assert abi['name'] == 'a'
assert abi['inputs'] == []
def test_finds_single_function_with_args(web3):
Contract = web3.vns.contract(abi=SINGLE_FN_ONE_ARG)
abi = Contract._find_matching_fn_abi('a', [1234])
assert abi['name'] == 'a'
assert len(abi['inputs']) == 1
assert abi['inputs'][0]['type'] == 'uint256'
def test_finds_fallback_function(web3):
Contract = web3.vns.contract(abi=FALLBACK_FUNCTION)
abi = Contract._find_matching_fn_abi(FallbackFn, [])
assert abi['type'] == 'fallback'
def test_error_when_no_function_name_match(web3):
Contract = web3.vns.contract(abi=SINGLE_FN_NO_ARGS)
with pytest.raises(ValidationError):
Contract._find_matching_fn_abi('no_function_name', [1234])
@pytest.mark.parametrize(
'arguments,expected_types',
(
([], []),
([b'arst'], ['bytes32']),
(['0xf00b47'], ['bytes32']),
([1234567890], ['uint256']),
# ([255], ['uint8']), # TODO: enable
([-1], ['int8']),
([[(-1, True), (2, False)]], ['(int256,bool)[]']),
)
)
def test_finds_function_with_matching_args(web3, arguments, expected_types):
Contract = web3.vns.contract(abi=MULTIPLE_FUNCTIONS)
abi = Contract._find_matching_fn_abi('a', arguments)
assert abi['name'] == 'a'
assert len(abi['inputs']) == len(expected_types)
assert set(get_abi_input_types(abi)) == set(expected_types)
def test_error_when_duplicate_match(web3):
Contract = web3.vns.contract(abi=MULTIPLE_FUNCTIONS)
with pytest.raises(ValidationError):
Contract._find_matching_fn_abi('a', [100])
| 25.403974 | 294 | 0.539103 | import json
import pytest
from web3._utils.abi import (
get_abi_input_types,
)
from web3._utils.function_identifiers import (
FallbackFn,
)
from web3.exceptions import (
ValidationError,
)
SINGLE_FN_NO_ARGS = json.loads('[{"constant":false,"inputs":[],"name":"a","outputs":[],"type":"function"}]') SINGLE_FN_ONE_ARG = json.loads('[{"constant":false,"inputs":[{"name":"","type":"uint256"}],"name":"a","outputs":[],"type":"function"}]') FALLBACK_FUNCTION = json.loads('[{"constant": false, "inputs": [], "name": "getData", "outputs": [{"name": "r", "type": "uint256"}], "payable": false, "stateMutability": "nonpayable", "type": "function"}, {"payable": false, "stateMutability": "nonpayable", "type": "fallback"}]') MULTIPLE_FUNCTIONS = json.loads('''
[
{
"constant": false,
"inputs": [],
"name": "a",
"outputs": [],
"type": "function"
},
{
"constant": false,
"inputs": [
{
"name": "",
"type": "bytes32"
}
],
"name": "a",
"outputs": [],
"type": "function"
},
{
"constant": false,
"inputs": [
{
"name": "",
"type": "uint256"
}
],
"name": "a",
"outputs": [],
"type": "function"
},
{
"constant": false,
"inputs": [
{
"name": "",
"type": "uint8"
}
],
"name": "a",
"outputs": [],
"type": "function"
},
{
"constant": false,
"inputs": [
{
"name": "",
"type": "int8"
}
],
"name": "a",
"outputs": [],
"type": "function"
},
{
"constant": false,
"inputs": [
{
"name": "",
"type": "tuple[]",
"components": [
{"name": "", "type": "int256"},
{"name": "", "type": "bool"}
]
}
],
"name": "a",
"outputs": [],
"type": "function"
}
]
''')
def test_finds_single_function_without_args(web3):
Contract = web3.vns.contract(abi=SINGLE_FN_NO_ARGS)
abi = Contract._find_matching_fn_abi('a', [])
assert abi['name'] == 'a'
assert abi['inputs'] == []
def test_finds_single_function_with_args(web3):
Contract = web3.vns.contract(abi=SINGLE_FN_ONE_ARG)
abi = Contract._find_matching_fn_abi('a', [1234])
assert abi['name'] == 'a'
assert len(abi['inputs']) == 1
assert abi['inputs'][0]['type'] == 'uint256'
def test_finds_fallback_function(web3):
Contract = web3.vns.contract(abi=FALLBACK_FUNCTION)
abi = Contract._find_matching_fn_abi(FallbackFn, [])
assert abi['type'] == 'fallback'
def test_error_when_no_function_name_match(web3):
Contract = web3.vns.contract(abi=SINGLE_FN_NO_ARGS)
with pytest.raises(ValidationError):
Contract._find_matching_fn_abi('no_function_name', [1234])
@pytest.mark.parametrize(
'arguments,expected_types',
(
([], []),
([b'arst'], ['bytes32']),
(['0xf00b47'], ['bytes32']),
([1234567890], ['uint256']),
([-1], ['int8']),
([[(-1, True), (2, False)]], ['(int256,bool)[]']),
)
)
def test_finds_function_with_matching_args(web3, arguments, expected_types):
Contract = web3.vns.contract(abi=MULTIPLE_FUNCTIONS)
abi = Contract._find_matching_fn_abi('a', arguments)
assert abi['name'] == 'a'
assert len(abi['inputs']) == len(expected_types)
assert set(get_abi_input_types(abi)) == set(expected_types)
def test_error_when_duplicate_match(web3):
Contract = web3.vns.contract(abi=MULTIPLE_FUNCTIONS)
with pytest.raises(ValidationError):
Contract._find_matching_fn_abi('a', [100])
| true | true |
1c4a437b86d7bfb9b1e7b2ed2f1ba8e9cb370973 | 754 | py | Python | setup.py | aaalgo/calign | 7e96f4c0df85200dec260e603711172486107f23 | [
"MIT"
] | null | null | null | setup.py | aaalgo/calign | 7e96f4c0df85200dec260e603711172486107f23 | [
"MIT"
] | null | null | null | setup.py | aaalgo/calign | 7e96f4c0df85200dec260e603711172486107f23 | [
"MIT"
] | null | null | null | from distutils.core import setup, Extension
picpac = Extension('_calign',
language = 'c++',
extra_compile_args = ['-O3', '-std=c++1y'],
include_dirs = ['/usr/local/include'],
libraries = ['opencv_highgui', 'opencv_core', 'boost_filesystem', 'boost_system', 'boost_python'],
library_dirs = ['/usr/local/lib'],
sources = ['python-api.cpp'],
depends = ['calign.h'])
setup (name = 'calign',
version = '0.0.1',
url = 'https://github.com/aaalgo/calign',
author = 'Wei Dong',
author_email = '[email protected]',
license = 'BSD',
description = 'This is a demo package',
ext_modules = [calign],
py_modules = ['picpac.mxnet', 'picpac.neon'],
)
| 34.272727 | 106 | 0.570292 | from distutils.core import setup, Extension
picpac = Extension('_calign',
language = 'c++',
extra_compile_args = ['-O3', '-std=c++1y'],
include_dirs = ['/usr/local/include'],
libraries = ['opencv_highgui', 'opencv_core', 'boost_filesystem', 'boost_system', 'boost_python'],
library_dirs = ['/usr/local/lib'],
sources = ['python-api.cpp'],
depends = ['calign.h'])
setup (name = 'calign',
version = '0.0.1',
url = 'https://github.com/aaalgo/calign',
author = 'Wei Dong',
author_email = '[email protected]',
license = 'BSD',
description = 'This is a demo package',
ext_modules = [calign],
py_modules = ['picpac.mxnet', 'picpac.neon'],
)
| true | true |
1c4a43914b6834384b739b37fc8cbf4a1fededbb | 25,191 | py | Python | faker/providers/job/da_DK/__init__.py | tristanHdez18/faker | 14cb25712e6efcb7bf8d9f30f404a7304722af6d | [
"MIT"
] | null | null | null | faker/providers/job/da_DK/__init__.py | tristanHdez18/faker | 14cb25712e6efcb7bf8d9f30f404a7304722af6d | [
"MIT"
] | 4 | 2022-02-04T17:24:59.000Z | 2022-03-29T20:02:57.000Z | faker/providers/job/da_DK/__init__.py | tristanHdez18/faker | 14cb25712e6efcb7bf8d9f30f404a7304722af6d | [
"MIT"
] | null | null | null | from .. import Provider as BaseProvider
class Provider(BaseProvider):
"""
Source: https://star.dk/it/saadan-arbejder-vi-med-it-i-styrelsen/oversigt-over-stillingsbetegnelser-og-kvalifikationer/. # NOQA
"""
jobs = [
"Lastvognsmekanikerlærling",
"Knallertmekaniker",
"Møbelarkitekt",
"Forsyningsoperatørelev",
"Hospitalsfysiker",
"Økonomicontroller",
"Revisor",
"Skatterevisor",
"Kontrollør",
"Musikpædagog",
"Pantefoged",
"Serveringsmedarbejder",
"Maskinmesteraspirant",
"Sygehusdirektør",
"Laborant",
"Overlæge",
"Designassistent",
"Teknisk chef",
"Socialformidler",
"Overassistent",
"Pædagogisk assistent",
"Pedel",
"Kustode",
"Pædagogmedhjælper",
"Projektmedarbejder",
"Pedelmedhjælper",
"Museumsbetjent",
"Molekylærbiolog",
"Lærervikar",
"Sognehjælper",
"Lysdesigner",
"Instruktørassistent",
"Teatertekniker",
"Researcher",
"Redaktør",
"Teknisk designer",
"Ressourceleder",
"Indkøbschef",
"E-commerce manager",
"Kontraktchef",
"Produktchef",
"Museumsinspektør",
"Kurator",
"Konservator",
"Modelkonstruktør",
"Kommunikationschef",
"Forskningschef",
"Skovrider",
"Fiskeriteknolog",
"Produktionschef",
"Driftsleder",
"Direktør",
"Officer",
"Sergent",
"IT-ingeniør",
"IT-arkitekt",
"IT-revisor",
"Programmør og systemudvikler",
"UX designer",
"Webredaktør",
"Webudvikler",
"Datakonsulent",
"Idrætsinstruktør og -konsulent",
"Efterretningsofficer",
"Miljøkonsulent",
"Campingpladsbestyrer",
"Miljøkoordinator",
"Grafisk tekniker",
"Elektrotekniker",
"Vindmølleoperatør",
"Urmager",
"Byplanlægger",
"Trafikplanlægger",
"GIS-medarbejder",
"Illustrator",
"Mediegrafiker",
"Artdirector",
"Multimediedesigner",
"Praktiserende læge",
"Speciallæge",
"Struktør",
"Pakkerimedarbejder",
"Cykelbud",
"Fabriksbager",
"Møller",
"Guld- og sølvsmed",
"Ciselør",
"Produktionsleder inden for film og teater",
"Centerleder",
"Lufthavnschef",
"Kameramand",
"Tonemester",
"Studietekniker",
"Eventtekniker",
"Produktionstekniker",
"Fødevareteknolog",
"Brygmester",
"Specialist i biomedicin",
"Botaniker",
"Biokemiker",
"Havbiolog",
"Fysiolog",
"Planteforædler",
"Skoleleder",
"Døvekonsulent",
"Import- og eksportmedarbejder",
"Friskolelærer",
"Au pair",
"Børnepasser",
"Landbrugsmaskinemekaniker",
"Trafikinformationsmedarbejder",
"Togfører",
"Guide",
"Kok",
"Vært i restaurant",
"Tjener",
"Bartender",
"Korrekturlæser",
"Postfunktionær",
"Biblioteksassistent",
"Telefonist",
"Kundeservicemedarbejder",
"Natportier",
"Interviewer",
"Vekselbureaumedarbejder",
"Skattefunktionær",
"Forsikringsfunktionær",
"Revisorassistent",
"Lønbogholder",
"Lagerforvalter",
"Overstyrmand",
"Flyklarerer",
"Marketingmedarbejder",
"Kreativ chef",
"Miljøanalytiker",
"Naturvejleder",
"Procesingeniør",
"Logistiker",
"Bankdirektør",
"Civilingeniør",
"Miljøingeniør",
"Maskiningeniør",
"Værkstedsleder",
"Programdirektør",
"Lystekniker",
"IT-supporter",
"IT-tekniker",
"IT-kvalitetsmedarbejder",
"Korleder",
"Marketingchef",
"Destinationschef",
"Ordblindelærer",
"Kursusleder",
"Produktspecialist",
"Områdechef",
"Rengøringsinspektør",
"Smedelærling",
"Stenhuggerlærling",
"Shippingmedarbejder",
"Lager- og logistikelev",
"Stukkatørlærling",
"Automekanikerlærling",
"Beklædningshåndværkerelev",
"Butikselev",
"Datateknikerelev",
"Industrislagterlærling",
"Shippingassistent",
"Konditorlærling",
"Gulvlæggerlærling",
"Køleteknikerlærling",
"Bygningstruktørlærling",
"Rustfast industrimontør",
"Værktøjsmagerlærling",
"Industriteknikerlærling",
"Vagtcentralassistent",
"Juridisk chef",
"Kunstlærer",
"Lærer på skuespillerskole",
"Asfaltør",
"Jordemoder",
"Erhvervsskolelærer",
"Personalekonsulent",
"Job- og virksomhedskonsulent",
"Tekstforfatter",
"Virksomhedsudvikler",
"Byggeleder",
"Departementschef",
"Politidirektør",
"Diplomat",
"Generalsekretær",
"Leder af offentlig forvaltning",
"Konstabel",
"Speditør",
"Flyttearbejder",
"Lager- og logistikmedarbejder",
"Havnearbejder",
"Anlægsarbejder",
"Slagteriarbejder",
"Fiskeindustriarbejder",
"Industrislagter",
"Slagtermester",
"Bager",
"Konditor",
"Mejeriarbejder",
"Mejerist",
"Familievejleder",
"Socialfaglig leder",
"HR-konsulent",
"SSP-medarbejder",
"Havnefoged",
"Lufthavnsoperatør",
"Assistent til salgssupport",
"Frisør",
"Model",
"Demonstratør",
"Call centermedarbejder",
"Viceskoleleder",
"Ortopædiskomager",
"Fiskeribetjent",
"Indkøber",
"Massageterapeut",
"Levnedsmiddelinspektør",
"Ambulancefører",
"Paramediciner",
"Kunstformidler",
"Arkivar",
"Registrar",
"Bibliotekar",
"Økonom",
"Antropolog",
"Arkæolog",
"Motorcykelmekanikerlærling",
"Skibsmekanikerlærling",
"Landbrugsmaskine-mekanikerlærling",
"VVS-lærling",
"Privatpraktiserende tandplejer",
"Glarmesterlærling",
"Ejendomsserviceteknikerelev",
"Audiologiassistentelev",
"Dyrepasserelev",
"Tømrerlærling",
"Autolakererlærling",
"Bygningsmalerlærling",
"Automatikteknikerelev",
"Skorstensfejerlærling",
"Bagerlærling",
"Vagtcentralmedarbejder",
"Murerlærling",
"Elektrikerlærling",
"Rørlægger",
"Flymekanikerlærling",
"Cykelmekanikerlærling",
"Skibsmontørlærling",
"Bygningssnedkerlærling",
"Studentermedhjælp",
"Redder",
"Chaufførelev",
"Slagterlærling",
"Tagdækkerlærling",
"Organist",
"Sagsbehandler",
"Databaseadministrator",
"Bankrådgiver",
"Realkreditmedarbejder",
"Bogholder",
"Bogholderi- og regnskabsassistent",
"Assurandør",
"Valuar",
"Taksator",
"Hardware-udvikler",
"Medicoingeniør",
"Sensortekniker",
"Boghandler",
"Ekspedient",
"Fiskehandler",
"Farvehandler",
"Blomsterbinder",
"Delikatesseassistent",
"Farmakonom",
"Serviceøkonom",
"SOME-medarbejder",
"Pressesekretær",
"Fundraiser",
"Kampagnemedarbejder",
"Kommunikationskonsulent",
"IT-konsulent",
"IT-direktør",
"IT-chef",
"IT-dokumentationschef",
"Chief data officer",
"IT-projektleder",
"Børne- og ungekoordinator",
"Leder af børne- og ungdomsklub",
"Børsmægler",
"Lagerekspedient",
"Sommelier",
"Levnedsmiddelingeniør",
"Vagt",
"Dørmand",
"Barista",
"Tekster",
"Flyinstruktør",
"Helikopterfører",
"Flymaskinist",
"Klimaforsker",
"Handelsskolelærer",
"Møbelpolstrer",
"Børneværnskonsulent",
"Klargører",
"Klubmedarbejder",
"Kontorchef",
"Koordinator",
"Efterskoleforstander",
"Vicerektor",
"Politisk medarbejder",
"Politisk konsulent",
"Kommunal planlægger",
"Fuldmægtig",
"Rådgivende konsulent",
"Business intelligence manager",
"Økonomiassistent",
"Finansanalytiker",
"Gymnasielærer",
"Folkeskolelærer",
"Pædagog",
"Studiesekretær",
"Speciallærer",
"Fotografmedhjælper",
"Erhvervsdykker",
"Danselærer",
"Geograf",
"Kriminolog",
"Sociolog",
"Historiker",
"Filosof",
"Socialrådgiver",
"Politolog",
"Psykolog",
"Socialarbejder",
"Socialpædagog",
"Præst",
"Geotekniker",
"Svejseinspektør",
"Designer",
"Merchandiser",
"Visual merchandiser",
"Scenograf",
"Tandklinikassistent",
"Tandplejer",
"Keramiker",
"Gravør",
"Kunstner",
"Tegner",
"Garver",
"Landinspektør",
"Byggemontagetekniker",
"Brolægger",
"Forskningsbibliotekar",
"Anlægsgartnerarbejder",
"Cafemedarbejder",
"Kontorleder",
"Farmakonomelev",
"Rejsebureauelev",
"Tandplejerelev",
"Tandteknikerelev",
"Frisørelev",
"Receptionistelev",
"Vejrvært",
"Arrangementchef",
"Udviklingschef",
"Indretningsarkitekt",
"Autoteknolog",
"Butiksassistent",
"Skolepædagog",
"Social- og sundhedsassistent",
"Social- og sundhedshjælper",
"Kasseassistent",
"Levnedsmiddeltekniker",
"Maskinsnedker",
"Møbelsnedker",
"Automationsingeniør",
"Produktionsmedarbejder",
"Byggetekniker",
"Reklamechef",
"Sproglærer",
"Tegnsprogslærer",
"Energiingeniør",
"Dagtilbudsleder",
"Vuggestueleder",
"Plejehjemsleder",
"Kommunikationskoordinator",
"Brandchef",
"Flysikkerhedschef",
"Miljø- og sikkerhedschef",
"Bibliotekschef",
"Museumsleder",
"Kunstnerisk leder",
"Kundeservicechef",
"Rigsarkivar",
"Flymekaniker",
"Skibsmekaniker",
"Entreprenørmaskinemekaniker",
"Kranmekaniker",
"Industrimekaniker",
"Cykelmekaniker",
"Skorstensfejer",
"Industrilakerer",
"Autolakerer",
"Murer",
"Stenhugger",
"Betonmager",
"Køkkenmontør",
"Tømrer",
"Skov- og naturteknikerelev",
"Lægemiddelkonsulent",
"Bevægelsespædagog",
"Ernæringsassistent",
"Ungdomsskolelærer",
"PAU-elev",
"IT-underviser",
"VUC-lærer",
"Uddannelses- og erhvervsvejleder",
"Finansrådgiver",
"Investeringsrådgiver",
"Musiklærer",
"Hotelchef",
"Butikschef",
"Regionschef",
"Teaterteknikerelev",
"Speditørelev",
"IT-supporterelev",
"Politielev",
"Vindmølleoperatørelev",
"Gartnerelev",
"Ortopædielev",
"Fotografelev",
"Film- og tvproduktionselev",
"Procesoperatørelev",
"Optikerelev",
"Radio- og TV-fagteknikerelev",
"Handelselev",
"Elektronikoperatørelev",
"Toldelev",
"Plastmagerelev",
"Social- og sundhedshjælperelev",
"Grafikerelev",
"Forsikringselev",
"Revisorelev",
"Shippingelev",
"Regnskabselev",
"Tjenerelev",
"Finmekanikerelev",
"Oliefyrsteknikerelev",
"Urmagerelev",
"Redderelev",
"Teleteknikerelev",
"Industrioperatørelev",
"Landbrugselev",
"Kosmetologelev",
"Asfaltørelev",
"Kontorelev",
"Fitnessinstruktørelev",
"Møbelsnedkerelev",
"Serviceassistentelev",
"Mejerielev",
"Ernæringsassistentelev",
"Neurofysiologiassistentelev",
"Kostumier",
"Buntmager",
"Parykmager",
"Skrædder",
"Skomager",
"Bore- og udvindingsarbejder",
"Offshorearbejder",
"Ordrebehandler",
"Reservedelsekspedient",
"Oldfrue",
"Vicevært",
"Ledsager",
"Bedemandsassistent",
"Bedemand",
"Graver",
"Kosmetolog",
"Stylist",
"Negletekniker",
"Massør",
"Tekstildesigner",
"Kostumedesigner",
"Institutleder på universitet",
"Rektor",
"Information- og videnchef IT",
"Dokumentationsmedarbejder",
"Efterskolelærer",
"Dagplejer",
"Yogalærer",
"Dommerfuldmægtig",
"Hotelmedarbejder",
"Dagplejeleder",
"Kordegn",
"FGU-lærer",
"Værkstedsassistent",
"Økonoma",
"Artist",
"Væksthusgartner",
"Gartneriarbejder",
"Chef for gartneriproduktion",
"Anlægsgartner",
"Staldmester",
"Støberitekniker",
"Skibsbygger",
"Svejser",
"Klejnsmed",
"Laboratorieleder",
"Skovfoged",
"Fiskeassistent",
"Fisker",
"Skytte",
"Landmand",
"Pelsdyravler",
"Fængselsfunktionær",
"Livredder",
"Nødhjælpsarbejder",
"Parkeringsvagt",
"Kældermester",
"Fødevareinspektør",
"Grossist",
"Varemægler",
"Skibsfører",
"Lods",
"Skibsmaskinist",
"Maskinmester",
"Maskinassistent",
"Mejeritekniker",
"Produktionsteknolog",
"Produktionsleder",
"Værkfører",
"Fysiker",
"Astronom",
"Metrolog",
"Meteorolog",
"Kemiker",
"Geofysiker",
"Geolog",
"Statistiker",
"Aktuar",
"Demograf",
"Matematiker",
"Farmakolog",
"Biolog",
"Skovbrugsrådgiver",
"Landbrugskonsulent",
"Agronom",
"Sagsadministrator",
"Detektiv",
"Kontormedhjælper",
"Sekretær",
"Tasteoperatør",
"Bankassistent",
"Croupier",
"Av tekniker",
"Tekniker radio- og TV-udsendelser",
"Webmaster",
"Garderobeassistent",
"Butiksdetektiv",
"Beklædningsdesigner",
"Psykoterapeut",
"Klinisk psykolog",
"Produktionsingeniør",
"Regionsdirektør",
"Havearkitekt",
"Salgs- og kundeansvarlig",
"Systemadministrator",
"IT-sikkerhedskonsulent",
"Eventmanager",
"Eventassistent",
"Ejendomsadministrator",
"Ejendomsmægler",
"Reklamekonsulent",
"Auktionsleder",
"Musiker",
"Danser",
"Koreograf",
"Kirketjener",
"Driftschef",
"Chefkonsulent",
"Turismechef",
"Brandinspektør",
"Testingeniør",
"Materialetekniker",
"Kemiingeniør",
"Økonomichef",
"Cykelhandler",
"Bagermester",
"Politifuldmægtig",
"Musikterapeut",
"Kvalitetsingeniør",
"Hundetræner",
"Beslagsmed",
"Teatermedarbejder",
"Scenefunktionær",
"Sikkerhedschef",
"Plade- og konstruktionssmed",
"Smed",
"Finmekaniker",
"Værktøjsmager",
"Modelsnedker",
"Låsesmed",
"Hundefører",
"Medarbejder på et dyreinternat",
"Kørelærer",
"Instrumentbygger",
"Lydtekniker",
"Tandklinikassistentelev",
"Museumsmedhjælper",
"Bådebyggerlærling",
"Teknisk isolatørelev",
"VVS-montør",
"Blikkenslager",
"Galvanisør",
"Bådebygger",
"Lastvognsmekaniker",
"Knallertmekanikerlærling",
"Laboratorietekniker",
"Skibsmontør",
"Manuskriptforfatter",
"Teknisk kommunikator",
"Vulkanisør",
"Veterinærsygeplejerske",
"Inseminør",
"Drejer",
"CNC-operatør",
"Jern- og metalsliber",
"Karosserismed",
"Automekaniker",
"Dækmontør",
"Mekaniker",
"Filmklipper",
"Producer",
"Skuespiller",
"Jordbrugsteknolog",
"Miljøtekniker",
"Kort- og landmålingstekniker",
"Fræser",
"Transportchef",
"Porcelænsmaler",
"Robottekniker",
"Personalechef",
"Programchef",
"Chefstrateg",
"Facility manager",
"Administrationschef",
"Kvalitetschef",
"Kontorfuldmægtig",
"Advokatsekretær",
"Direktionssekretær",
"Redaktionssekretær",
"Lægesekretær",
"Administrativ lægesekretær",
"Tolder",
"Teletekniker",
"Elektrofagtekniker",
"Pottemager",
"Glarmester",
"Glasmager",
"Skiltemaler",
"Klaverstemmer",
"Kranfører",
"Truckfører",
"Sadelmager",
"Trykkeriarbejder",
"Tekstiltrykker",
"Elektriker",
"Dirigent",
"Korsanger",
"Ligestillings- og inklusionschef",
"Akupunktør",
"Orgelbygger",
"Personlig træner",
"Forlystelsesmedarbejder",
"Renseriassistent",
"Redaktionschef",
"Distributionschef",
"Lagerchef",
"Import- og eksportchef",
"Supply chain manager",
"Elektronikarbejder",
"Lokomotivfører",
"Togklargører",
"Taxichauffør",
"Risikoanalytiker",
"Værdipapiranalytiker",
"Forsikringsanalytiker",
"Investeringsanalytiker",
"Sceneinstruktør",
"Caster",
"Filminstruktør",
"Stilladsmontør",
"Nedriver",
"Brandmand",
"Tagdækker",
"Stukkatør",
"Isolatør",
"Kloakrørlægger",
"Kloakmester",
"VVS-installatør",
"Gastekniker",
"HVAC-tekniker",
"Arbejdsmiljøkonsulent",
"Fysioterapeut",
"Ernærings- og sundhedskonsulent",
"Audiolog",
"Logopæd",
"Øjenlæge",
"Radiograf",
"Kiropraktor",
"Ergoterapeut",
"Elektroingeniør",
"Fodterapeut",
"Alternativ behandler",
"Lektor",
"Oversygeplejerske",
"Specialsygeplejerske",
"Sygeplejerske",
"Elektronikingeniør",
"Telekommunikationsingeniør",
"Arkitekt",
"Landskabsarkitekt",
"Industriel designer",
"Vaskeriassistent",
"Bryggeriarbejder",
"Datalog",
"Psykiatrisk sygeplejerske",
"Ortopædist",
"Designteknolog",
"Skibsmægler",
"Medicotekniker",
"Finanschef",
"Regnskabschef",
"Filmfotograf",
"Guitarbygger",
"Frisør inden for teater- og TV-branchen",
"Hundefrisør",
"Skilærer",
"Misbrugsbehandler",
"VVS-tekniker",
"Maskinkonstruktør",
"Skibsingeniør",
"Lægesekretærelev",
"Social- og sundhedsassistentelev",
"Filmklipperelev",
"Laborantelev",
"Elektronikfagtekniker-elev",
"Finanselev",
"Kokkeelev",
"Guld- og sølvsmedelev",
"Maskinsnedkerelev",
"Teknisk designerelev",
"Uddannelsesleder",
"Kørselsleder",
"Greenkeeper",
"Kunsthåndværker",
"Neurofysiologiassistent",
"Ministerialbetjent",
"Sekretariatschef",
"Plejehjemsmedhjælper",
"Psykomotorisk terapeut",
"Sundhedsplejerske",
"TV-producer",
"Sejlmager",
"Smørrebrødsjomfru",
"Sanglærer",
"Rengøringsassistent",
"Vinduespudser",
"Bademester",
"Maskinfører",
"Buschauffør",
"Procesoperatør",
"Bygningsmaler",
"Flisemontør",
"Gulvlægger",
"Tæppemontør",
"Hospitalsserviceassistent",
"Arkivmedarbejder",
"HR-assistent",
"Korrespondent",
"Purser",
"Rideskoleassistent",
"Dyrepasser",
"Køkkenmedhjælper",
"Opvasker",
"Omdeler",
"Renovationsarbejder",
"Gadefejer",
"Måleraflæser",
"Pizzabager",
"Fastfood ekspedient",
"Butiksmedhjælper",
"Landbrugsmedhjælper",
"Gartner",
"Skovarbejder",
"Dambrugsarbejder",
"Politiinspektør",
"Speditionsleder",
"Bygningsingeniør",
"Energikonsulent",
"Elektronikfagtekniker",
"Lingvist",
"Tegnsprogstolk",
"Oversætter",
"Tolk",
"Journalist",
"Dramaturg",
"Forlagsredaktør",
"Advokat",
"Dommer",
"Notar",
"Jurist",
"Produktudvikler",
"Industritekniker",
"Laboratorieassistent",
"Biomediciner",
"Apotekerassistent",
"Apotekstekniker",
"Ortopædiingeniør",
"Klinisk tandtekniker",
"Dyreklinikassistent",
"Flyveleder",
"Flyveklarerer",
"Pilot",
"Rejsekonsulent",
"Trafikassistent",
"Billetsælger",
"Rejsebureaumedarbejder",
"Medarbejder på turistkontor",
"Inkassomedarbejder",
"Specialtandlæge",
"Dyrlæge",
"Tandlæge",
"Apoteker",
"CSR-ansvarlig",
"Projektleder",
"Afdelingsleder",
"Salgsdirektør",
"Bygningskonstruktør",
"Afdelingschef",
"Fodermester",
"Havneassistent",
"Farmaceut",
"Artdirector assistent",
"Professor",
"Faglærer",
"Automontør",
"Familieplejer",
"Blomsterdekoratør",
"Sundhedsøkonom",
"Bilsynsassistent",
"Badeassistent",
"Businesscontroller",
"Specialkonsulent",
"FVU-lærer",
"Bageriarbejder",
"Ridelærer",
"Fitness-instruktør",
"Optiker",
"Fotograf",
"Fotojournalist",
"Køkkenchef",
"Regissør",
"Salgschef",
"Elinstallatør",
"Skolekonsulent",
"Læge",
"Byggesagsbehandler",
"Økologikonsulent",
"Restaurantchef",
"Cater",
"Adjunkt",
"Faglig konsulent",
"Forsorgsmedarbejder",
"Pædagogisk konsulent",
"Sygehuslæge",
"Hospitalsmedhjælper",
"Kirkesanger",
"Kantineleder",
"Fagkonsulent",
"Handicaphjælper",
"Aftenskolelærer",
"Projektkoordinator",
"Ligestillingskonsulent",
"Brolæggerarbejde",
"Bygningsstruktør",
"Oliefyrstekniker",
"Motorcykelmekaniker",
"Buschaufførelev",
"Minkfarmmedhjælper",
"Procesteknolog",
"Rengøringsassistent i transportmidler",
"Butiks- og detailslagter",
"Audiologiassistent",
"Skiltemalerlærling",
"Rengøringsassistent i kontor",
"Ventilationstekniker",
"Skibsassistent",
"AV teknikerelev",
"Højskolelærer",
"Detailhandelselev",
"Forretningsudvikler",
"Vinkyper",
"Kulturmedarbejder",
"Zoneterapeut",
"Styrmand",
"Turistchef",
"Anæstesisygeplejerske",
"Støttepædagog",
"Salgskonsulent",
"Eventkoordinatorelev",
"Bygningssnedker",
"Finansmedarbejder",
"Kursuskoordinator",
"Automatiktekniker",
"Bioanalytiker",
"Klubpædagog",
"Vagtcentralleder",
"Flyteknikner",
"Forsyningsoperatør",
"Account manager",
"Datatekniker",
"Logistikchef",
"Tale-hørelærer",
"Plastmager",
"IT-produktchef",
"Erhvervsanalytiker",
"Halinspektør",
"Maskinoperatør",
"Kommunikationsmedarbejder",
"Anlægsstruktør",
"Filmtekniker",
"Elektronikfagteknikerelev",
"Servicetekniker",
"Mejeriingeniør",
"Poder",
"Advokatfuldmægtig",
"Omsorgshjælper",
"Kvalitetsmedarbejder",
"Forlagskonsulent",
"Flyteknikerlærling",
"Skov- og naturtekniker",
"Skolesekretær",
"IT-produktejer",
"Kontorassistent",
"Udviklingskonsulent",
"Pædagogisk faglig koordinator",
]
| 26.158879 | 132 | 0.534596 | from .. import Provider as BaseProvider
class Provider(BaseProvider):
jobs = [
"Lastvognsmekanikerlærling",
"Knallertmekaniker",
"Møbelarkitekt",
"Forsyningsoperatørelev",
"Hospitalsfysiker",
"Økonomicontroller",
"Revisor",
"Skatterevisor",
"Kontrollør",
"Musikpædagog",
"Pantefoged",
"Serveringsmedarbejder",
"Maskinmesteraspirant",
"Sygehusdirektør",
"Laborant",
"Overlæge",
"Designassistent",
"Teknisk chef",
"Socialformidler",
"Overassistent",
"Pædagogisk assistent",
"Pedel",
"Kustode",
"Pædagogmedhjælper",
"Projektmedarbejder",
"Pedelmedhjælper",
"Museumsbetjent",
"Molekylærbiolog",
"Lærervikar",
"Sognehjælper",
"Lysdesigner",
"Instruktørassistent",
"Teatertekniker",
"Researcher",
"Redaktør",
"Teknisk designer",
"Ressourceleder",
"Indkøbschef",
"E-commerce manager",
"Kontraktchef",
"Produktchef",
"Museumsinspektør",
"Kurator",
"Konservator",
"Modelkonstruktør",
"Kommunikationschef",
"Forskningschef",
"Skovrider",
"Fiskeriteknolog",
"Produktionschef",
"Driftsleder",
"Direktør",
"Officer",
"Sergent",
"IT-ingeniør",
"IT-arkitekt",
"IT-revisor",
"Programmør og systemudvikler",
"UX designer",
"Webredaktør",
"Webudvikler",
"Datakonsulent",
"Idrætsinstruktør og -konsulent",
"Efterretningsofficer",
"Miljøkonsulent",
"Campingpladsbestyrer",
"Miljøkoordinator",
"Grafisk tekniker",
"Elektrotekniker",
"Vindmølleoperatør",
"Urmager",
"Byplanlægger",
"Trafikplanlægger",
"GIS-medarbejder",
"Illustrator",
"Mediegrafiker",
"Artdirector",
"Multimediedesigner",
"Praktiserende læge",
"Speciallæge",
"Struktør",
"Pakkerimedarbejder",
"Cykelbud",
"Fabriksbager",
"Møller",
"Guld- og sølvsmed",
"Ciselør",
"Produktionsleder inden for film og teater",
"Centerleder",
"Lufthavnschef",
"Kameramand",
"Tonemester",
"Studietekniker",
"Eventtekniker",
"Produktionstekniker",
"Fødevareteknolog",
"Brygmester",
"Specialist i biomedicin",
"Botaniker",
"Biokemiker",
"Havbiolog",
"Fysiolog",
"Planteforædler",
"Skoleleder",
"Døvekonsulent",
"Import- og eksportmedarbejder",
"Friskolelærer",
"Au pair",
"Børnepasser",
"Landbrugsmaskinemekaniker",
"Trafikinformationsmedarbejder",
"Togfører",
"Guide",
"Kok",
"Vært i restaurant",
"Tjener",
"Bartender",
"Korrekturlæser",
"Postfunktionær",
"Biblioteksassistent",
"Telefonist",
"Kundeservicemedarbejder",
"Natportier",
"Interviewer",
"Vekselbureaumedarbejder",
"Skattefunktionær",
"Forsikringsfunktionær",
"Revisorassistent",
"Lønbogholder",
"Lagerforvalter",
"Overstyrmand",
"Flyklarerer",
"Marketingmedarbejder",
"Kreativ chef",
"Miljøanalytiker",
"Naturvejleder",
"Procesingeniør",
"Logistiker",
"Bankdirektør",
"Civilingeniør",
"Miljøingeniør",
"Maskiningeniør",
"Værkstedsleder",
"Programdirektør",
"Lystekniker",
"IT-supporter",
"IT-tekniker",
"IT-kvalitetsmedarbejder",
"Korleder",
"Marketingchef",
"Destinationschef",
"Ordblindelærer",
"Kursusleder",
"Produktspecialist",
"Områdechef",
"Rengøringsinspektør",
"Smedelærling",
"Stenhuggerlærling",
"Shippingmedarbejder",
"Lager- og logistikelev",
"Stukkatørlærling",
"Automekanikerlærling",
"Beklædningshåndværkerelev",
"Butikselev",
"Datateknikerelev",
"Industrislagterlærling",
"Shippingassistent",
"Konditorlærling",
"Gulvlæggerlærling",
"Køleteknikerlærling",
"Bygningstruktørlærling",
"Rustfast industrimontør",
"Værktøjsmagerlærling",
"Industriteknikerlærling",
"Vagtcentralassistent",
"Juridisk chef",
"Kunstlærer",
"Lærer på skuespillerskole",
"Asfaltør",
"Jordemoder",
"Erhvervsskolelærer",
"Personalekonsulent",
"Job- og virksomhedskonsulent",
"Tekstforfatter",
"Virksomhedsudvikler",
"Byggeleder",
"Departementschef",
"Politidirektør",
"Diplomat",
"Generalsekretær",
"Leder af offentlig forvaltning",
"Konstabel",
"Speditør",
"Flyttearbejder",
"Lager- og logistikmedarbejder",
"Havnearbejder",
"Anlægsarbejder",
"Slagteriarbejder",
"Fiskeindustriarbejder",
"Industrislagter",
"Slagtermester",
"Bager",
"Konditor",
"Mejeriarbejder",
"Mejerist",
"Familievejleder",
"Socialfaglig leder",
"HR-konsulent",
"SSP-medarbejder",
"Havnefoged",
"Lufthavnsoperatør",
"Assistent til salgssupport",
"Frisør",
"Model",
"Demonstratør",
"Call centermedarbejder",
"Viceskoleleder",
"Ortopædiskomager",
"Fiskeribetjent",
"Indkøber",
"Massageterapeut",
"Levnedsmiddelinspektør",
"Ambulancefører",
"Paramediciner",
"Kunstformidler",
"Arkivar",
"Registrar",
"Bibliotekar",
"Økonom",
"Antropolog",
"Arkæolog",
"Motorcykelmekanikerlærling",
"Skibsmekanikerlærling",
"Landbrugsmaskine-mekanikerlærling",
"VVS-lærling",
"Privatpraktiserende tandplejer",
"Glarmesterlærling",
"Ejendomsserviceteknikerelev",
"Audiologiassistentelev",
"Dyrepasserelev",
"Tømrerlærling",
"Autolakererlærling",
"Bygningsmalerlærling",
"Automatikteknikerelev",
"Skorstensfejerlærling",
"Bagerlærling",
"Vagtcentralmedarbejder",
"Murerlærling",
"Elektrikerlærling",
"Rørlægger",
"Flymekanikerlærling",
"Cykelmekanikerlærling",
"Skibsmontørlærling",
"Bygningssnedkerlærling",
"Studentermedhjælp",
"Redder",
"Chaufførelev",
"Slagterlærling",
"Tagdækkerlærling",
"Organist",
"Sagsbehandler",
"Databaseadministrator",
"Bankrådgiver",
"Realkreditmedarbejder",
"Bogholder",
"Bogholderi- og regnskabsassistent",
"Assurandør",
"Valuar",
"Taksator",
"Hardware-udvikler",
"Medicoingeniør",
"Sensortekniker",
"Boghandler",
"Ekspedient",
"Fiskehandler",
"Farvehandler",
"Blomsterbinder",
"Delikatesseassistent",
"Farmakonom",
"Serviceøkonom",
"SOME-medarbejder",
"Pressesekretær",
"Fundraiser",
"Kampagnemedarbejder",
"Kommunikationskonsulent",
"IT-konsulent",
"IT-direktør",
"IT-chef",
"IT-dokumentationschef",
"Chief data officer",
"IT-projektleder",
"Børne- og ungekoordinator",
"Leder af børne- og ungdomsklub",
"Børsmægler",
"Lagerekspedient",
"Sommelier",
"Levnedsmiddelingeniør",
"Vagt",
"Dørmand",
"Barista",
"Tekster",
"Flyinstruktør",
"Helikopterfører",
"Flymaskinist",
"Klimaforsker",
"Handelsskolelærer",
"Møbelpolstrer",
"Børneværnskonsulent",
"Klargører",
"Klubmedarbejder",
"Kontorchef",
"Koordinator",
"Efterskoleforstander",
"Vicerektor",
"Politisk medarbejder",
"Politisk konsulent",
"Kommunal planlægger",
"Fuldmægtig",
"Rådgivende konsulent",
"Business intelligence manager",
"Økonomiassistent",
"Finansanalytiker",
"Gymnasielærer",
"Folkeskolelærer",
"Pædagog",
"Studiesekretær",
"Speciallærer",
"Fotografmedhjælper",
"Erhvervsdykker",
"Danselærer",
"Geograf",
"Kriminolog",
"Sociolog",
"Historiker",
"Filosof",
"Socialrådgiver",
"Politolog",
"Psykolog",
"Socialarbejder",
"Socialpædagog",
"Præst",
"Geotekniker",
"Svejseinspektør",
"Designer",
"Merchandiser",
"Visual merchandiser",
"Scenograf",
"Tandklinikassistent",
"Tandplejer",
"Keramiker",
"Gravør",
"Kunstner",
"Tegner",
"Garver",
"Landinspektør",
"Byggemontagetekniker",
"Brolægger",
"Forskningsbibliotekar",
"Anlægsgartnerarbejder",
"Cafemedarbejder",
"Kontorleder",
"Farmakonomelev",
"Rejsebureauelev",
"Tandplejerelev",
"Tandteknikerelev",
"Frisørelev",
"Receptionistelev",
"Vejrvært",
"Arrangementchef",
"Udviklingschef",
"Indretningsarkitekt",
"Autoteknolog",
"Butiksassistent",
"Skolepædagog",
"Social- og sundhedsassistent",
"Social- og sundhedshjælper",
"Kasseassistent",
"Levnedsmiddeltekniker",
"Maskinsnedker",
"Møbelsnedker",
"Automationsingeniør",
"Produktionsmedarbejder",
"Byggetekniker",
"Reklamechef",
"Sproglærer",
"Tegnsprogslærer",
"Energiingeniør",
"Dagtilbudsleder",
"Vuggestueleder",
"Plejehjemsleder",
"Kommunikationskoordinator",
"Brandchef",
"Flysikkerhedschef",
"Miljø- og sikkerhedschef",
"Bibliotekschef",
"Museumsleder",
"Kunstnerisk leder",
"Kundeservicechef",
"Rigsarkivar",
"Flymekaniker",
"Skibsmekaniker",
"Entreprenørmaskinemekaniker",
"Kranmekaniker",
"Industrimekaniker",
"Cykelmekaniker",
"Skorstensfejer",
"Industrilakerer",
"Autolakerer",
"Murer",
"Stenhugger",
"Betonmager",
"Køkkenmontør",
"Tømrer",
"Skov- og naturteknikerelev",
"Lægemiddelkonsulent",
"Bevægelsespædagog",
"Ernæringsassistent",
"Ungdomsskolelærer",
"PAU-elev",
"IT-underviser",
"VUC-lærer",
"Uddannelses- og erhvervsvejleder",
"Finansrådgiver",
"Investeringsrådgiver",
"Musiklærer",
"Hotelchef",
"Butikschef",
"Regionschef",
"Teaterteknikerelev",
"Speditørelev",
"IT-supporterelev",
"Politielev",
"Vindmølleoperatørelev",
"Gartnerelev",
"Ortopædielev",
"Fotografelev",
"Film- og tvproduktionselev",
"Procesoperatørelev",
"Optikerelev",
"Radio- og TV-fagteknikerelev",
"Handelselev",
"Elektronikoperatørelev",
"Toldelev",
"Plastmagerelev",
"Social- og sundhedshjælperelev",
"Grafikerelev",
"Forsikringselev",
"Revisorelev",
"Shippingelev",
"Regnskabselev",
"Tjenerelev",
"Finmekanikerelev",
"Oliefyrsteknikerelev",
"Urmagerelev",
"Redderelev",
"Teleteknikerelev",
"Industrioperatørelev",
"Landbrugselev",
"Kosmetologelev",
"Asfaltørelev",
"Kontorelev",
"Fitnessinstruktørelev",
"Møbelsnedkerelev",
"Serviceassistentelev",
"Mejerielev",
"Ernæringsassistentelev",
"Neurofysiologiassistentelev",
"Kostumier",
"Buntmager",
"Parykmager",
"Skrædder",
"Skomager",
"Bore- og udvindingsarbejder",
"Offshorearbejder",
"Ordrebehandler",
"Reservedelsekspedient",
"Oldfrue",
"Vicevært",
"Ledsager",
"Bedemandsassistent",
"Bedemand",
"Graver",
"Kosmetolog",
"Stylist",
"Negletekniker",
"Massør",
"Tekstildesigner",
"Kostumedesigner",
"Institutleder på universitet",
"Rektor",
"Information- og videnchef IT",
"Dokumentationsmedarbejder",
"Efterskolelærer",
"Dagplejer",
"Yogalærer",
"Dommerfuldmægtig",
"Hotelmedarbejder",
"Dagplejeleder",
"Kordegn",
"FGU-lærer",
"Værkstedsassistent",
"Økonoma",
"Artist",
"Væksthusgartner",
"Gartneriarbejder",
"Chef for gartneriproduktion",
"Anlægsgartner",
"Staldmester",
"Støberitekniker",
"Skibsbygger",
"Svejser",
"Klejnsmed",
"Laboratorieleder",
"Skovfoged",
"Fiskeassistent",
"Fisker",
"Skytte",
"Landmand",
"Pelsdyravler",
"Fængselsfunktionær",
"Livredder",
"Nødhjælpsarbejder",
"Parkeringsvagt",
"Kældermester",
"Fødevareinspektør",
"Grossist",
"Varemægler",
"Skibsfører",
"Lods",
"Skibsmaskinist",
"Maskinmester",
"Maskinassistent",
"Mejeritekniker",
"Produktionsteknolog",
"Produktionsleder",
"Værkfører",
"Fysiker",
"Astronom",
"Metrolog",
"Meteorolog",
"Kemiker",
"Geofysiker",
"Geolog",
"Statistiker",
"Aktuar",
"Demograf",
"Matematiker",
"Farmakolog",
"Biolog",
"Skovbrugsrådgiver",
"Landbrugskonsulent",
"Agronom",
"Sagsadministrator",
"Detektiv",
"Kontormedhjælper",
"Sekretær",
"Tasteoperatør",
"Bankassistent",
"Croupier",
"Av tekniker",
"Tekniker radio- og TV-udsendelser",
"Webmaster",
"Garderobeassistent",
"Butiksdetektiv",
"Beklædningsdesigner",
"Psykoterapeut",
"Klinisk psykolog",
"Produktionsingeniør",
"Regionsdirektør",
"Havearkitekt",
"Salgs- og kundeansvarlig",
"Systemadministrator",
"IT-sikkerhedskonsulent",
"Eventmanager",
"Eventassistent",
"Ejendomsadministrator",
"Ejendomsmægler",
"Reklamekonsulent",
"Auktionsleder",
"Musiker",
"Danser",
"Koreograf",
"Kirketjener",
"Driftschef",
"Chefkonsulent",
"Turismechef",
"Brandinspektør",
"Testingeniør",
"Materialetekniker",
"Kemiingeniør",
"Økonomichef",
"Cykelhandler",
"Bagermester",
"Politifuldmægtig",
"Musikterapeut",
"Kvalitetsingeniør",
"Hundetræner",
"Beslagsmed",
"Teatermedarbejder",
"Scenefunktionær",
"Sikkerhedschef",
"Plade- og konstruktionssmed",
"Smed",
"Finmekaniker",
"Værktøjsmager",
"Modelsnedker",
"Låsesmed",
"Hundefører",
"Medarbejder på et dyreinternat",
"Kørelærer",
"Instrumentbygger",
"Lydtekniker",
"Tandklinikassistentelev",
"Museumsmedhjælper",
"Bådebyggerlærling",
"Teknisk isolatørelev",
"VVS-montør",
"Blikkenslager",
"Galvanisør",
"Bådebygger",
"Lastvognsmekaniker",
"Knallertmekanikerlærling",
"Laboratorietekniker",
"Skibsmontør",
"Manuskriptforfatter",
"Teknisk kommunikator",
"Vulkanisør",
"Veterinærsygeplejerske",
"Inseminør",
"Drejer",
"CNC-operatør",
"Jern- og metalsliber",
"Karosserismed",
"Automekaniker",
"Dækmontør",
"Mekaniker",
"Filmklipper",
"Producer",
"Skuespiller",
"Jordbrugsteknolog",
"Miljøtekniker",
"Kort- og landmålingstekniker",
"Fræser",
"Transportchef",
"Porcelænsmaler",
"Robottekniker",
"Personalechef",
"Programchef",
"Chefstrateg",
"Facility manager",
"Administrationschef",
"Kvalitetschef",
"Kontorfuldmægtig",
"Advokatsekretær",
"Direktionssekretær",
"Redaktionssekretær",
"Lægesekretær",
"Administrativ lægesekretær",
"Tolder",
"Teletekniker",
"Elektrofagtekniker",
"Pottemager",
"Glarmester",
"Glasmager",
"Skiltemaler",
"Klaverstemmer",
"Kranfører",
"Truckfører",
"Sadelmager",
"Trykkeriarbejder",
"Tekstiltrykker",
"Elektriker",
"Dirigent",
"Korsanger",
"Ligestillings- og inklusionschef",
"Akupunktør",
"Orgelbygger",
"Personlig træner",
"Forlystelsesmedarbejder",
"Renseriassistent",
"Redaktionschef",
"Distributionschef",
"Lagerchef",
"Import- og eksportchef",
"Supply chain manager",
"Elektronikarbejder",
"Lokomotivfører",
"Togklargører",
"Taxichauffør",
"Risikoanalytiker",
"Værdipapiranalytiker",
"Forsikringsanalytiker",
"Investeringsanalytiker",
"Sceneinstruktør",
"Caster",
"Filminstruktør",
"Stilladsmontør",
"Nedriver",
"Brandmand",
"Tagdækker",
"Stukkatør",
"Isolatør",
"Kloakrørlægger",
"Kloakmester",
"VVS-installatør",
"Gastekniker",
"HVAC-tekniker",
"Arbejdsmiljøkonsulent",
"Fysioterapeut",
"Ernærings- og sundhedskonsulent",
"Audiolog",
"Logopæd",
"Øjenlæge",
"Radiograf",
"Kiropraktor",
"Ergoterapeut",
"Elektroingeniør",
"Fodterapeut",
"Alternativ behandler",
"Lektor",
"Oversygeplejerske",
"Specialsygeplejerske",
"Sygeplejerske",
"Elektronikingeniør",
"Telekommunikationsingeniør",
"Arkitekt",
"Landskabsarkitekt",
"Industriel designer",
"Vaskeriassistent",
"Bryggeriarbejder",
"Datalog",
"Psykiatrisk sygeplejerske",
"Ortopædist",
"Designteknolog",
"Skibsmægler",
"Medicotekniker",
"Finanschef",
"Regnskabschef",
"Filmfotograf",
"Guitarbygger",
"Frisør inden for teater- og TV-branchen",
"Hundefrisør",
"Skilærer",
"Misbrugsbehandler",
"VVS-tekniker",
"Maskinkonstruktør",
"Skibsingeniør",
"Lægesekretærelev",
"Social- og sundhedsassistentelev",
"Filmklipperelev",
"Laborantelev",
"Elektronikfagtekniker-elev",
"Finanselev",
"Kokkeelev",
"Guld- og sølvsmedelev",
"Maskinsnedkerelev",
"Teknisk designerelev",
"Uddannelsesleder",
"Kørselsleder",
"Greenkeeper",
"Kunsthåndværker",
"Neurofysiologiassistent",
"Ministerialbetjent",
"Sekretariatschef",
"Plejehjemsmedhjælper",
"Psykomotorisk terapeut",
"Sundhedsplejerske",
"TV-producer",
"Sejlmager",
"Smørrebrødsjomfru",
"Sanglærer",
"Rengøringsassistent",
"Vinduespudser",
"Bademester",
"Maskinfører",
"Buschauffør",
"Procesoperatør",
"Bygningsmaler",
"Flisemontør",
"Gulvlægger",
"Tæppemontør",
"Hospitalsserviceassistent",
"Arkivmedarbejder",
"HR-assistent",
"Korrespondent",
"Purser",
"Rideskoleassistent",
"Dyrepasser",
"Køkkenmedhjælper",
"Opvasker",
"Omdeler",
"Renovationsarbejder",
"Gadefejer",
"Måleraflæser",
"Pizzabager",
"Fastfood ekspedient",
"Butiksmedhjælper",
"Landbrugsmedhjælper",
"Gartner",
"Skovarbejder",
"Dambrugsarbejder",
"Politiinspektør",
"Speditionsleder",
"Bygningsingeniør",
"Energikonsulent",
"Elektronikfagtekniker",
"Lingvist",
"Tegnsprogstolk",
"Oversætter",
"Tolk",
"Journalist",
"Dramaturg",
"Forlagsredaktør",
"Advokat",
"Dommer",
"Notar",
"Jurist",
"Produktudvikler",
"Industritekniker",
"Laboratorieassistent",
"Biomediciner",
"Apotekerassistent",
"Apotekstekniker",
"Ortopædiingeniør",
"Klinisk tandtekniker",
"Dyreklinikassistent",
"Flyveleder",
"Flyveklarerer",
"Pilot",
"Rejsekonsulent",
"Trafikassistent",
"Billetsælger",
"Rejsebureaumedarbejder",
"Medarbejder på turistkontor",
"Inkassomedarbejder",
"Specialtandlæge",
"Dyrlæge",
"Tandlæge",
"Apoteker",
"CSR-ansvarlig",
"Projektleder",
"Afdelingsleder",
"Salgsdirektør",
"Bygningskonstruktør",
"Afdelingschef",
"Fodermester",
"Havneassistent",
"Farmaceut",
"Artdirector assistent",
"Professor",
"Faglærer",
"Automontør",
"Familieplejer",
"Blomsterdekoratør",
"Sundhedsøkonom",
"Bilsynsassistent",
"Badeassistent",
"Businesscontroller",
"Specialkonsulent",
"FVU-lærer",
"Bageriarbejder",
"Ridelærer",
"Fitness-instruktør",
"Optiker",
"Fotograf",
"Fotojournalist",
"Køkkenchef",
"Regissør",
"Salgschef",
"Elinstallatør",
"Skolekonsulent",
"Læge",
"Byggesagsbehandler",
"Økologikonsulent",
"Restaurantchef",
"Cater",
"Adjunkt",
"Faglig konsulent",
"Forsorgsmedarbejder",
"Pædagogisk konsulent",
"Sygehuslæge",
"Hospitalsmedhjælper",
"Kirkesanger",
"Kantineleder",
"Fagkonsulent",
"Handicaphjælper",
"Aftenskolelærer",
"Projektkoordinator",
"Ligestillingskonsulent",
"Brolæggerarbejde",
"Bygningsstruktør",
"Oliefyrstekniker",
"Motorcykelmekaniker",
"Buschaufførelev",
"Minkfarmmedhjælper",
"Procesteknolog",
"Rengøringsassistent i transportmidler",
"Butiks- og detailslagter",
"Audiologiassistent",
"Skiltemalerlærling",
"Rengøringsassistent i kontor",
"Ventilationstekniker",
"Skibsassistent",
"AV teknikerelev",
"Højskolelærer",
"Detailhandelselev",
"Forretningsudvikler",
"Vinkyper",
"Kulturmedarbejder",
"Zoneterapeut",
"Styrmand",
"Turistchef",
"Anæstesisygeplejerske",
"Støttepædagog",
"Salgskonsulent",
"Eventkoordinatorelev",
"Bygningssnedker",
"Finansmedarbejder",
"Kursuskoordinator",
"Automatiktekniker",
"Bioanalytiker",
"Klubpædagog",
"Vagtcentralleder",
"Flyteknikner",
"Forsyningsoperatør",
"Account manager",
"Datatekniker",
"Logistikchef",
"Tale-hørelærer",
"Plastmager",
"IT-produktchef",
"Erhvervsanalytiker",
"Halinspektør",
"Maskinoperatør",
"Kommunikationsmedarbejder",
"Anlægsstruktør",
"Filmtekniker",
"Elektronikfagteknikerelev",
"Servicetekniker",
"Mejeriingeniør",
"Poder",
"Advokatfuldmægtig",
"Omsorgshjælper",
"Kvalitetsmedarbejder",
"Forlagskonsulent",
"Flyteknikerlærling",
"Skov- og naturtekniker",
"Skolesekretær",
"IT-produktejer",
"Kontorassistent",
"Udviklingskonsulent",
"Pædagogisk faglig koordinator",
]
| true | true |
1c4a43a62d1b0c7b36182ef5fdb89f137fc4a846 | 11,479 | py | Python | onmt/translate/greedy_search.py | comydream/OpenNMT-py | 2f3c810069ca03b752d9886782648e576b39a06d | [
"MIT"
] | 1 | 2021-10-01T15:03:35.000Z | 2021-10-01T15:03:35.000Z | onmt/translate/greedy_search.py | urialon/OpenNMT-py | bdca05a3fac8f864b21c86a8ad03c09895212e70 | [
"MIT"
] | null | null | null | onmt/translate/greedy_search.py | urialon/OpenNMT-py | bdca05a3fac8f864b21c86a8ad03c09895212e70 | [
"MIT"
] | null | null | null | import torch
import torch.nn.functional as F
from onmt.translate.decode_strategy import DecodeStrategy
def sample_topp(logits, keep_topp):
sorted_logits, sorted_indices = torch.sort(logits,
descending=True,
dim=1)
cumulative_probs = torch.cumsum(F.softmax(sorted_logits,
dim=-1), dim=-1)
sorted_indices_to_keep = cumulative_probs.lt(keep_topp)
# keep indices until overflowing p
cumsum_mask = sorted_indices_to_keep.cumsum(dim=1)
last_included = cumsum_mask[:, -1:]
last_included.clamp_(0, sorted_indices_to_keep.size()[1] - 1)
sorted_indices_to_keep = sorted_indices_to_keep.scatter_(
1, last_included, 1)
# Set all logits that are not in the top-p to -10000.
# This puts the probabilities close to 0.
keep_indices = sorted_indices_to_keep.scatter(
1,
sorted_indices,
sorted_indices_to_keep,
)
return logits.masked_fill(~keep_indices, -10000)
def sample_topk(logits, keep_topk):
top_values, _ = torch.topk(logits, keep_topk, dim=1)
kth_best = top_values[:, -1].view([-1, 1])
kth_best = kth_best.repeat([1, logits.shape[1]]).float()
# Set all logits that are not in the top-k to -10000.
# This puts the probabilities close to 0.
ignore = torch.lt(logits, kth_best)
return logits.masked_fill(ignore, -10000)
def sample_with_temperature(logits, sampling_temp, keep_topk, keep_topp):
"""Select next tokens randomly from the top k possible next tokens.
Samples from a categorical distribution over the ``keep_topk`` words using
the category probabilities ``logits / sampling_temp``.
Args:
logits (FloatTensor): Shaped ``(batch_size, vocab_size)``.
These can be logits (``(-inf, inf)``) or log-probs (``(-inf, 0]``).
(The distribution actually uses the log-probabilities
``logits - logits.logsumexp(-1)``, which equals the logits if
they are log-probabilities summing to 1.)
sampling_temp (float): Used to scale down logits. The higher the
value, the more likely it is that a non-max word will be
sampled.
keep_topk (int): This many words could potentially be chosen. The
other logits are set to have probability 0.
keep_topp (float): Keep most likely words until the cumulated
probability is greater than p. If used with keep_topk: both
conditions will be applied
Returns:
(LongTensor, FloatTensor):
* topk_ids: Shaped ``(batch_size, 1)``. These are
the sampled word indices in the output vocab.
* topk_scores: Shaped ``(batch_size, 1)``. These
are essentially ``(logits / sampling_temp)[topk_ids]``.
"""
if sampling_temp == 0.0 or keep_topk == 1:
# For temp=0.0, take the argmax to avoid divide-by-zero errors.
# keep_topk=1 is also equivalent to argmax.
topk_scores, topk_ids = logits.topk(1, dim=-1)
if sampling_temp > 0:
topk_scores /= sampling_temp
else:
logits = torch.div(logits, sampling_temp)
if keep_topp > 0:
logits = sample_topp(logits, keep_topp)
if keep_topk > 0:
logits = sample_topk(logits, keep_topk)
dist = torch.distributions.Categorical(logits=logits)
topk_ids = dist.sample().view(-1, 1)
topk_scores = logits.gather(dim=1, index=topk_ids)
return topk_ids, topk_scores
class GreedySearch(DecodeStrategy):
"""Select next tokens randomly from the top k possible next tokens.
The ``scores`` attribute's lists are the score, after applying temperature,
of the final prediction (either EOS or the final token in the event
that ``max_length`` is reached)
Args:
pad (int): See base.
bos (int): See base.
eos (int): See base.
unk (int): See base.
batch_size (int): See base.
global_scorer (onmt.translate.GNMTGlobalScorer): Scorer instance.
min_length (int): See base.
max_length (int): See base.
ban_unk_token (Boolean): See base.
block_ngram_repeat (int): See base.
exclusion_tokens (set[int]): See base.
return_attention (bool): See base.
max_length (int): See base.
sampling_temp (float): See
:func:`~onmt.translate.greedy_search.sample_with_temperature()`.
keep_topk (int): See
:func:`~onmt.translate.greedy_search.sample_with_temperature()`.
keep_topp (float): See
:func:`~onmt.translate.greedy_search.sample_with_temperature()`.
beam_size (int): Number of beams to use.
"""
def __init__(self, pad, bos, eos, unk, batch_size, global_scorer,
min_length, block_ngram_repeat, exclusion_tokens,
return_attention, max_length, sampling_temp, keep_topk,
keep_topp, beam_size, ban_unk_token):
super(GreedySearch, self).__init__(
pad, bos, eos, unk, batch_size, beam_size, global_scorer,
min_length, block_ngram_repeat, exclusion_tokens,
return_attention, max_length, ban_unk_token)
self.sampling_temp = sampling_temp
self.keep_topk = keep_topk
self.keep_topp = keep_topp
self.topk_scores = None
self.beam_size = beam_size
def initialize(self, memory_bank, src_lengths, src_map=None, device=None,
target_prefix=None):
"""Initialize for decoding."""
(fn_map_state, memory_bank,
src_map, target_prefix) = self.initialize_tile(
memory_bank, src_lengths, src_map, target_prefix)
if device is None:
device = self.get_device_from_memory_bank(memory_bank)
super(GreedySearch, self).initialize(
memory_bank, src_lengths, src_map, device, target_prefix)
self.select_indices = torch.arange(
self.batch_size*self.beam_size, dtype=torch.long, device=device)
self.original_batch_idx = fn_map_state(torch.arange(
self.batch_size, dtype=torch.long, device=device), dim=0)
self.beams_scores = torch.zeros((self.batch_size*self.beam_size, 1),
dtype=torch.float, device=device)
return fn_map_state, memory_bank, self.memory_lengths, src_map
@property
def current_predictions(self):
return self.alive_seq[:, -1]
@property
def batch_offset(self):
return self.select_indices
def _pick(self, log_probs):
"""Function used to pick next tokens.
Args:
log_probs (FloatTensor): ``(batch_size, vocab_size)``.
"""
# maybe fix some prediction at this step by modifying log_probs
log_probs = self.target_prefixing(log_probs)
topk_ids, topk_scores = sample_with_temperature(
log_probs, self.sampling_temp, self.keep_topk, self.keep_topp)
return topk_ids, topk_scores
def align_select_indices(self):
nb_finished_beams = (self.is_finished.view(-1).size(0) -
self.select_indices.size(0))
if nb_finished_beams:
self.select_indices = torch.arange(
self.select_indices.size(0), dtype=torch.long,
device=self.select_indices.device)
def advance(self, log_probs, attn):
"""Select next tokens randomly from the top k possible next tokens.
Args:
log_probs (FloatTensor): Shaped ``(batch_size, vocab_size)``.
These can be logits (``(-inf, inf)``) or log-probs
(``(-inf, 0]``). (The distribution actually uses the
log-probabilities ``logits - logits.logsumexp(-1)``,
which equals the logits if they are log-probabilities summing
to 1.)
attn (FloatTensor): Shaped ``(1, B, inp_seq_len)``.
"""
self.align_select_indices()
self.ensure_min_length(log_probs)
self.ensure_unk_removed(log_probs)
self.block_ngram_repeats(log_probs)
topk_ids, self.topk_scores = self._pick(log_probs)
self.beams_scores += self.topk_scores
self.is_finished = topk_ids.eq(self.eos)
self.alive_seq = torch.cat([self.alive_seq, topk_ids], -1)
if self.return_attention:
if self.alive_attn is None:
self.alive_attn = attn
else:
self.alive_attn = torch.cat([self.alive_attn, attn], 0)
self.ensure_max_length()
def update_finished(self):
"""Finalize scores and predictions."""
# shape: (sum(~ self.is_finished), 1)
finished_batches = self.is_finished.view(-1).nonzero(as_tuple=False)
step = len(self)
length_penalty = self.global_scorer.length_penalty(
step, alpha=self.global_scorer.alpha)
for b in finished_batches.view(-1):
b_orig = self.original_batch_idx[b]
score = self.beams_scores[b, 0]/length_penalty
pred = self.alive_seq[b, 1:]
attention = (
self.alive_attn[:, b, :self.memory_lengths[b]]
if self.alive_attn is not None else [])
self.hypotheses[b_orig].append((score, pred, attention))
self.done = self.is_finished.all()
if self.done:
for b in range(self.batch_size):
best_hyp = sorted(
self.hypotheses[b], key=lambda x: x[0], reverse=True)
for score, pred, attn in best_hyp:
self.scores[b].append(score)
self.predictions[b].append(pred)
self.attention[b].append(attn)
return
is_alive = ~self.is_finished.view(-1)
self.alive_seq = self.alive_seq[is_alive]
self.beams_scores = self.beams_scores[is_alive]
if self.alive_attn is not None:
self.alive_attn = self.alive_attn[:, is_alive]
self.select_indices = is_alive.nonzero(as_tuple=False).view(-1)
self.original_batch_idx = self.original_batch_idx[is_alive]
self.maybe_update_target_prefix(self.select_indices)
class GreedySearchLM(GreedySearch):
def update_finished(self):
super(GreedySearchLM, self).update_finished()
self.update_memory_lengths()
def update_memory_lengths(self):
is_alive = ~self.is_finished.view(-1)
self.memory_lengths = self.memory_lengths[is_alive]
def advance(self, log_probs, attn):
super(GreedySearchLM, self).advance(log_probs, attn)
# in LM task memory_lengths is associated with currently generated src
# and therefore needs to follow the generation
self.memory_lengths += 1
def initialize(self, src, src_lengths, src_map=None, device=None,
target_prefix=None):
"""Initialize for decoding."""
if device is None:
device = src.device
(fn_map_state, _, self.memory_lengths,
src_map) = super(GreedySearchLM, self).initialize(
None, src_lengths, src_map, device, target_prefix)
src = fn_map_state(src, dim=1)
return fn_map_state, src, self.memory_lengths, src_map
| 40.996429 | 79 | 0.623748 | import torch
import torch.nn.functional as F
from onmt.translate.decode_strategy import DecodeStrategy
def sample_topp(logits, keep_topp):
sorted_logits, sorted_indices = torch.sort(logits,
descending=True,
dim=1)
cumulative_probs = torch.cumsum(F.softmax(sorted_logits,
dim=-1), dim=-1)
sorted_indices_to_keep = cumulative_probs.lt(keep_topp)
cumsum_mask = sorted_indices_to_keep.cumsum(dim=1)
last_included = cumsum_mask[:, -1:]
last_included.clamp_(0, sorted_indices_to_keep.size()[1] - 1)
sorted_indices_to_keep = sorted_indices_to_keep.scatter_(
1, last_included, 1)
keep_indices = sorted_indices_to_keep.scatter(
1,
sorted_indices,
sorted_indices_to_keep,
)
return logits.masked_fill(~keep_indices, -10000)
def sample_topk(logits, keep_topk):
top_values, _ = torch.topk(logits, keep_topk, dim=1)
kth_best = top_values[:, -1].view([-1, 1])
kth_best = kth_best.repeat([1, logits.shape[1]]).float()
ignore = torch.lt(logits, kth_best)
return logits.masked_fill(ignore, -10000)
def sample_with_temperature(logits, sampling_temp, keep_topk, keep_topp):
if sampling_temp == 0.0 or keep_topk == 1:
topk_scores, topk_ids = logits.topk(1, dim=-1)
if sampling_temp > 0:
topk_scores /= sampling_temp
else:
logits = torch.div(logits, sampling_temp)
if keep_topp > 0:
logits = sample_topp(logits, keep_topp)
if keep_topk > 0:
logits = sample_topk(logits, keep_topk)
dist = torch.distributions.Categorical(logits=logits)
topk_ids = dist.sample().view(-1, 1)
topk_scores = logits.gather(dim=1, index=topk_ids)
return topk_ids, topk_scores
class GreedySearch(DecodeStrategy):
def __init__(self, pad, bos, eos, unk, batch_size, global_scorer,
min_length, block_ngram_repeat, exclusion_tokens,
return_attention, max_length, sampling_temp, keep_topk,
keep_topp, beam_size, ban_unk_token):
super(GreedySearch, self).__init__(
pad, bos, eos, unk, batch_size, beam_size, global_scorer,
min_length, block_ngram_repeat, exclusion_tokens,
return_attention, max_length, ban_unk_token)
self.sampling_temp = sampling_temp
self.keep_topk = keep_topk
self.keep_topp = keep_topp
self.topk_scores = None
self.beam_size = beam_size
def initialize(self, memory_bank, src_lengths, src_map=None, device=None,
target_prefix=None):
(fn_map_state, memory_bank,
src_map, target_prefix) = self.initialize_tile(
memory_bank, src_lengths, src_map, target_prefix)
if device is None:
device = self.get_device_from_memory_bank(memory_bank)
super(GreedySearch, self).initialize(
memory_bank, src_lengths, src_map, device, target_prefix)
self.select_indices = torch.arange(
self.batch_size*self.beam_size, dtype=torch.long, device=device)
self.original_batch_idx = fn_map_state(torch.arange(
self.batch_size, dtype=torch.long, device=device), dim=0)
self.beams_scores = torch.zeros((self.batch_size*self.beam_size, 1),
dtype=torch.float, device=device)
return fn_map_state, memory_bank, self.memory_lengths, src_map
@property
def current_predictions(self):
return self.alive_seq[:, -1]
@property
def batch_offset(self):
return self.select_indices
def _pick(self, log_probs):
log_probs = self.target_prefixing(log_probs)
topk_ids, topk_scores = sample_with_temperature(
log_probs, self.sampling_temp, self.keep_topk, self.keep_topp)
return topk_ids, topk_scores
def align_select_indices(self):
nb_finished_beams = (self.is_finished.view(-1).size(0) -
self.select_indices.size(0))
if nb_finished_beams:
self.select_indices = torch.arange(
self.select_indices.size(0), dtype=torch.long,
device=self.select_indices.device)
def advance(self, log_probs, attn):
self.align_select_indices()
self.ensure_min_length(log_probs)
self.ensure_unk_removed(log_probs)
self.block_ngram_repeats(log_probs)
topk_ids, self.topk_scores = self._pick(log_probs)
self.beams_scores += self.topk_scores
self.is_finished = topk_ids.eq(self.eos)
self.alive_seq = torch.cat([self.alive_seq, topk_ids], -1)
if self.return_attention:
if self.alive_attn is None:
self.alive_attn = attn
else:
self.alive_attn = torch.cat([self.alive_attn, attn], 0)
self.ensure_max_length()
def update_finished(self):
finished_batches = self.is_finished.view(-1).nonzero(as_tuple=False)
step = len(self)
length_penalty = self.global_scorer.length_penalty(
step, alpha=self.global_scorer.alpha)
for b in finished_batches.view(-1):
b_orig = self.original_batch_idx[b]
score = self.beams_scores[b, 0]/length_penalty
pred = self.alive_seq[b, 1:]
attention = (
self.alive_attn[:, b, :self.memory_lengths[b]]
if self.alive_attn is not None else [])
self.hypotheses[b_orig].append((score, pred, attention))
self.done = self.is_finished.all()
if self.done:
for b in range(self.batch_size):
best_hyp = sorted(
self.hypotheses[b], key=lambda x: x[0], reverse=True)
for score, pred, attn in best_hyp:
self.scores[b].append(score)
self.predictions[b].append(pred)
self.attention[b].append(attn)
return
is_alive = ~self.is_finished.view(-1)
self.alive_seq = self.alive_seq[is_alive]
self.beams_scores = self.beams_scores[is_alive]
if self.alive_attn is not None:
self.alive_attn = self.alive_attn[:, is_alive]
self.select_indices = is_alive.nonzero(as_tuple=False).view(-1)
self.original_batch_idx = self.original_batch_idx[is_alive]
self.maybe_update_target_prefix(self.select_indices)
class GreedySearchLM(GreedySearch):
def update_finished(self):
super(GreedySearchLM, self).update_finished()
self.update_memory_lengths()
def update_memory_lengths(self):
is_alive = ~self.is_finished.view(-1)
self.memory_lengths = self.memory_lengths[is_alive]
def advance(self, log_probs, attn):
super(GreedySearchLM, self).advance(log_probs, attn)
self.memory_lengths += 1
def initialize(self, src, src_lengths, src_map=None, device=None,
target_prefix=None):
if device is None:
device = src.device
(fn_map_state, _, self.memory_lengths,
src_map) = super(GreedySearchLM, self).initialize(
None, src_lengths, src_map, device, target_prefix)
src = fn_map_state(src, dim=1)
return fn_map_state, src, self.memory_lengths, src_map
| true | true |
1c4a44bbf72471eab126ed6a6523fab4eb11bffa | 1,995 | py | Python | efm_example.py | xurong-liang/cornac | 6e0a58b3c99de8c1bd685086c8a63b29aef66e28 | [
"Apache-2.0"
] | null | null | null | efm_example.py | xurong-liang/cornac | 6e0a58b3c99de8c1bd685086c8a63b29aef66e28 | [
"Apache-2.0"
] | null | null | null | efm_example.py | xurong-liang/cornac | 6e0a58b3c99de8c1bd685086c8a63b29aef66e28 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 The Cornac Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Example for Explicit Factor Models"""
import cornac
from cornac.datasets import amazon_toy
from cornac.data import SentimentModality
from cornac.eval_methods import RatioSplit
# Load rating and sentiment information
rating = amazon_toy.load_feedback()
sentiment = amazon_toy.load_sentiment()
# Instantiate a SentimentModality, it makes it convenient to work with sentiment information
md = SentimentModality(data=sentiment)
# Define an evaluation method to split feedback into train and test sets
split_data = RatioSplit(
data=rating,
test_size=0.15,
exclude_unknowns=True,
verbose=True,
sentiment=md,
seed=123,
)
# Instantiate the EFM model
efm = cornac.models.EFM(
num_explicit_factors=40,
num_latent_factors=60,
num_most_cared_aspects=15,
rating_scale=5.0,
alpha=0.85,
lambda_x=1,
lambda_y=1,
lambda_u=0.01,
lambda_h=0.01,
lambda_v=0.01,
max_iter=100,
num_threads=1,
trainable=True,
verbose=True,
seed=123,
)
# Instantiate evaluation metrics
rmse = cornac.metrics.RMSE()
ndcg_50 = cornac.metrics.NDCG(k=50)
auc = cornac.metrics.AUC()
# Put everything together into an experiment and run it
experiment = cornac.Experiment(
eval_method=split_data, models=[efm], metrics=[rmse, ndcg_50, auc]
)
experiment.run()
| 28.098592 | 92 | 0.717293 |
import cornac
from cornac.datasets import amazon_toy
from cornac.data import SentimentModality
from cornac.eval_methods import RatioSplit
rating = amazon_toy.load_feedback()
sentiment = amazon_toy.load_sentiment()
md = SentimentModality(data=sentiment)
split_data = RatioSplit(
data=rating,
test_size=0.15,
exclude_unknowns=True,
verbose=True,
sentiment=md,
seed=123,
)
efm = cornac.models.EFM(
num_explicit_factors=40,
num_latent_factors=60,
num_most_cared_aspects=15,
rating_scale=5.0,
alpha=0.85,
lambda_x=1,
lambda_y=1,
lambda_u=0.01,
lambda_h=0.01,
lambda_v=0.01,
max_iter=100,
num_threads=1,
trainable=True,
verbose=True,
seed=123,
)
rmse = cornac.metrics.RMSE()
ndcg_50 = cornac.metrics.NDCG(k=50)
auc = cornac.metrics.AUC()
experiment = cornac.Experiment(
eval_method=split_data, models=[efm], metrics=[rmse, ndcg_50, auc]
)
experiment.run()
| true | true |
1c4a45973be19ede8307caaba8c7bcb5c4ecdae9 | 994 | py | Python | config/urls.py | lawiz22/PLOUC-Backend-master | b93fa2fea8d45df9f19c3c58037e59dad4981921 | [
"MIT"
] | null | null | null | config/urls.py | lawiz22/PLOUC-Backend-master | b93fa2fea8d45df9f19c3c58037e59dad4981921 | [
"MIT"
] | 3 | 2020-06-05T21:24:34.000Z | 2022-03-11T23:50:26.000Z | config/urls.py | lawiz22/PLOUC-Backend-master | b93fa2fea8d45df9f19c3c58037e59dad4981921 | [
"MIT"
] | null | null | null | from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from rest_framework.documentation import include_docs_urls
urlpatterns = [
# API (v1)
url(r'^', include('v1.accounts.urls')),
url(r'^', include('v1.credits.urls')),
url(r'^', include('v1.posts.urls')),
url(r'^', include('v1.music.urls')),
url(r'^', include('v1.private_messages.urls')),
url(r'^', include('v1.replies.urls')),
url(r'^', include('v1.user_roles.urls')),
url(r'^', include('v1.votes.urls')),
# Core
url(r'^admin/', admin.site.urls),
url(r'^', include_docs_urls(title='PLOUC.LIVE API')),
]
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
if settings.DEBUG:
import debug_toolbar
urlpatterns += [
url(r'^__debug__/', include(debug_toolbar.urls)),
]
| 28.4 | 78 | 0.677062 | from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from rest_framework.documentation import include_docs_urls
urlpatterns = [
url(r'^', include('v1.accounts.urls')),
url(r'^', include('v1.credits.urls')),
url(r'^', include('v1.posts.urls')),
url(r'^', include('v1.music.urls')),
url(r'^', include('v1.private_messages.urls')),
url(r'^', include('v1.replies.urls')),
url(r'^', include('v1.user_roles.urls')),
url(r'^', include('v1.votes.urls')),
url(r'^admin/', admin.site.urls),
url(r'^', include_docs_urls(title='PLOUC.LIVE API')),
]
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
if settings.DEBUG:
import debug_toolbar
urlpatterns += [
url(r'^__debug__/', include(debug_toolbar.urls)),
]
| true | true |
1c4a45acae68baad54b3c997b9a3965d7f7d11af | 13,496 | py | Python | arch/models/model.py | bigphoton/arch | 95a197d6b89bc2316b0d88b2b1345cbbb90088ec | [
"Unlicense"
] | null | null | null | arch/models/model.py | bigphoton/arch | 95a197d6b89bc2316b0d88b2b1345cbbb90088ec | [
"Unlicense"
] | null | null | null | arch/models/model.py | bigphoton/arch | 95a197d6b89bc2316b0d88b2b1345cbbb90088ec | [
"Unlicense"
] | null | null | null | """
Functions and objects describing optical components.
"""
import abc
import sympy
from ..connectivity import Connectivity
import arch.port as port
import numpy as np
import math
class Model(abc.ABC):
"""
Model base class. One of `block` or `ports` must be defined.
name: name of this model for indexing, string
block: block of which this model is part (optional)
ports: ports connected to this model (optional)
kwargs: keyword argument dict passed to subclass Model.define method
"""
def __init__(self, name, block=None, ports=None, **kwargs):
self.name = name
if block is not None and ports is None:
self.ports = list(block.ports)
elif ports is not None and block is None:
self.ports = list(ports)
elif ports is not None and block is not None:
raise AttributeError("One and only one of either `block` or `ports` "
"may be set.")
else:
self.ports = list()
self.__properties = set()
self.define(**kwargs)
@classmethod
def compound(cls, name, models, connectivity):
"""
Method to be implemented by subclasses. Subclasses should call the `compound` method
of `super` if they are unable to compound the input models (see snippet below).
try:
<Subclass compounding code here>
except NotImplementedError:
return super().compound(name=name, models=models, connectivity=connectivity)
"""
print ("Compounding in Model")
return NumericModel.compound(name, models, connectivity)
@property
def lineage(self):
"""
Return list of models in this model's chain of inheritance.
"""
def list_bases(c):
if issubclass(c, Model):
all_bases = [c]
for base in c.__bases__:
all_bases.extend(list_bases(base))
return all_bases
else:
return []
return list_bases(self.__class__)
@property
def properties(self):
"""
List of properties of model which change how the model is compounded or simulated.
Properties (list elements) should normally be strings.
"""
return self.__properties
@property
def in_ports(self):
return [p for p in self.ports if p.direction == port.direction.inp]
@property
def out_ports(self):
return [p for p in self.ports if p.direction == port.direction.out]
@abc.abstractmethod
def define(self, **kwargs):
"""
Method overridden by subclasses to implement the model. kwargs are
passed directly from __init__.
"""
pass
@property
def port_names(self):
return {str(e) for e in self.ports}
@property
def default_input_state(self):
"""Dictionary of default values keyed by input port"""
return {p:p.default for p in self.in_ports}
def __repr__(self):
return "<"+self.__class__.__module__+"."+self.__class__.__name__+" '"+self.name+"'>"
class NumericModel(Model):
"""
General numeric model.
out_func: function of dict keyed by input ports, returning dict keyed by output ports
"""
def define(self, out_func=lambda x:x, **kwargs):
self.properties.add("numeric")
out = out_func(self.default_input_state).keys()
described_out_ports = set(out_func(self.default_input_state).keys())
if not set(self.out_ports).issubset(described_out_ports):
print(self.out_ports)
print(described_out_ports)
raise AttributeError("Model output ports do not match ports"
" described by out_func. "
"Ports missing from `out_func` are {:}. ".format(
[p for p in self.out_ports if p not in described_out_ports]) )
self.out_func = out_func
@classmethod
def compound(cls, name, models=[], connectivity=Connectivity()):
print("Compounding in NumericalModel")
# Filter the connectivity to only cover these models
connectivity = connectivity.filtered_by_models(models)
# Get ports from models
ports = [p for m in models for p in m.ports]
# Filter external ports
ex_ports = [p for p in ports if p not in connectivity]
ex_out_ports = [p for p in ex_ports if p.direction == port.direction.out]
def _have_prereqs(model, state):
"""Does `state` contain all the prerequisite inputs for `model`"""
return all([p in state for p in model.in_ports])
def out_func(state):
mods = set(models)
# Initialise ports within loops
loops = connectivity.loops
state = {e:e.default for l in loops for e in l if isinstance(e, port.var)} | state
# Substitute ready model values until all models are substituted
while mods:
ready_mods = {mod for mod in mods if _have_prereqs(mod, state)}
for mod in ready_mods:
state |= mod.out_func(state)
state |= {pi:state[po] for po,pi in connectivity if po in state}
mods -= ready_mods
return state
return NumericModel(name=name, ports=ex_ports, out_func=out_func)
class SymbolicModel(Model):
"""
General symbolic model.
"""
def define(self, out_exprs=None, **kwargs):
self.properties.add("symbolic")
if out_exprs is not None:
self.out_exprs = out_exprs
@property
def out_exprs(self):
return self.__out_exprs
@out_exprs.setter
def out_exprs(self, new_out_exprs):
self.__out_exprs = new_out_exprs
# Refresh out_funcs
try:
self._out_func_lambda = sympy.lambdify(self.in_ports,
[self.out_exprs[p] for p in self.out_ports])
except KeyError as e:
raise KeyError(f"Output port '{e}' not described by `out_exprs` {self.out_exprs}")
def out_func(self, in_state):
"""
Compute output state from input state.
in_state: dictionary of port values keyed by port
return: dictionary of port values (including outputs) keyed by port
"""
# Since our lambda func (and sympy.lambdify) deals in arg *vectors*, derive them from the
# input dict, and derive the output dict from them.
in_state_vec = [in_state[p] for p in self.in_ports]
out_state_vec = self._out_func_lambda(*in_state_vec)
#out_state_dict = in_state | {self.out_ports[i]:out_state_vec[i] for i in range(len(out_state_vec))}
out_state_dict = {self.out_ports[i]:out_state_vec[i] for i in range(len(out_state_vec))}
return out_state_dict
@classmethod
def compound(cls, name, models=[], connectivity=Connectivity(), iter_max=10):
try:
# Filter the connectivity to only cover these models
connectivity = connectivity.filtered_by_models(models)
# Get ports from models
ports = [p for m in models for p in m.ports]
# Filter external ports
ex_ports = [p for p in ports if p not in connectivity]
ex_out_ports = [p for p in ex_ports if p.direction == port.direction.out]
ex_in_ports = [p for p in ex_ports if p.direction == port.direction.inp]
def _have_prereqs(model, state):
"""Does `state` contain all the prerequisite inputs for `model`"""
return all([p in state for p in model.in_ports])
# Substitute
state = {p:p for p in ex_in_ports}
mods = set(models)
i = 0
while mods and i < iter_max:
i += 1
ready_mods = {mod for mod in mods if _have_prereqs(mod, state)}
for mod in ready_mods:
state |= {op:oe.subs(state) for op,oe in mod.out_exprs.items()}
state |= {pi:state[po] for po,pi in connectivity if po in state}
mods -= ready_mods
# Check
if i == iter_max:
ls = connectivity.loops
print("Found loops:",list(ls))
raise NotImplementedError(
f"Reached max iteration limit ({iter_max}) but all models do not "
f"yet have their prerequisite inputs. Remaining models are {mods}")
extra_symbols = {s for oe in state.values()
for s in oe.free_symbols if s in ex_out_ports}
if extra_symbols:
raise AttributeError("Extra symbols found after substitution: {:}. Either "
"relabel as compound input port, or adjust internal connectivity "
"accordingly.".format(extra_symbols))
return SymbolicModel(name=name, ports=ex_ports, out_exprs=state)
except NotImplementedError:
return super().compound(name=name, models=models, connectivity=connectivity)
##############
## UNSORTED ##
##############
import sympy
from sympy import Matrix, sqrt, I, exp
import arch.port as port
from sympy import ImmutableMatrix, Matrix
class Linear(SymbolicModel):
"""
Linear optical model for classical and quantum optics.
unitary_matrix: square sympy Matrix of dimension n; unitary or lossy unitary.
"""
def define(self, unitary_matrix=None, **kwargs):
super().define(**kwargs)
self.properties.update({"optical", "time-independent"})
self.in_optical_ports = [p for p in self.in_ports if p.kind == port.kind.optical]
self.out_optical_ports = [p for p in self.out_ports if p.kind == port.kind.optical]
if unitary_matrix is None:
unitary_matrix = sympy.eye(len(self.out_optical_ports))
self.U = ImmutableMatrix(unitary_matrix)
if not self.U.is_square:
raise AttributeError("Linear model matrix (unitary_matrix) must be square.")
self.n_ins = self.U.rows
self.n_outs = self.U.rows
if len(self.in_optical_ports) != self.n_ins:
raise AttributeError(f"Number of input optical ports "
f"{len(self.in_optical_ports)} does not match dimension of model matrix "
f"({self.n_ins}) of model {self}:{self.name}. Add ports before adding "
f"model. Input ports were {self.in_ports} ({self.in_optical_ports} "
f"optical), output ports were {self.out_ports} ({self.out_optical_ports} "
f"optical).")
if len(self.out_optical_ports) != self.n_outs:
raise AttributeError("Number of output names {:} does not match dimension of "
"model matrix {:}. Add ports before adding model.".format(
len(self.out_optical_ports), self.n_outs))
# TODO: Should override `out_func` to use matrix multiplication for the optical ports
self.out_exprs = {op:oe for op,oe in
zip(self.out_optical_ports, self.U * Matrix(self.in_optical_ports) ) }
@classmethod
def compound(cls, name, models, connectivity):
try:
if all([isinstance(m,Linear) for m in models]):
if connectivity.has_loops:
raise NotImplementedError("Unable to hybridise models of type '{:}' "
"containing loops".format(cls))
# Put models in causal order
models = connectivity.order_models(models)
# Filter the connectivity to only cover these models
connectivity = connectivity.filtered_by_models(models)
# Get ports from models
ports = [p for m in models for p in m.ports]
# Filter external ports
ex_ports = [p for p in ports if p not in connectivity]
ex_out_ports = [p for p in ex_ports if p.direction == port.direction.out]
ex_in_ports = [p for p in ex_ports if p.direction == port.direction.inp]
# Map modes
# TODO: This routine is very expensive, possible to optimise?
modes = dict()
np = 0
# Pre-populate modes with port order
iops = [p for p in ex_in_ports if p.kind == port.kind.optical]
oops = [p for p in ex_out_ports if p.kind == port.kind.optical]
assert len(iops) == len(oops), ("Numbers of input and output optical ports does "
"not match")
for ip,op in zip(iops, oops):
modes[np] = {ip, op}
np += 1
# Map
for model in models:
for ip,op in zip(model.in_optical_ports, model.out_optical_ports):
matched = False
for mode,mode_ports in modes.items():
if (ip in mode_ports) or any([connectivity.test(xp,mp)
for mp in mode_ports for xp in [ip,op]]):
# If ports connect to ports of any known mode,
# associate them with this mode
mode_ports |= {ip, op}
matched = True
break
if not matched:
# If ports match no known mode, add a new mode,
# and associate these ports with it
modes[np] = {ip,op}
np += 1
# Invert modes[]
mode_of_port = {p:m for m in modes for p in modes[m]}
# Initial matrix
U = sympy.eye(np)
# Accumulate model matrix
for m in models:
# Map old matrix rows to new ones
mode_map = [mode_of_port[p] for p in m.in_optical_ports]
U0m = m.U
n = U0m.rows
Um = Matrix(Matrix.diag(sympy.eye(np), U0m, sympy.eye(np - n)))
# Orient matrix modes to ports
for i,j in enumerate(mode_map):
Um.row_swap(i+np,j)
Um.col_swap(i+np,j)
# Delete temp row/cols
for _ in range(np):
Um.row_del(np)
Um.col_del(np)
# print("Um:")
# sympy.pprint(Um)
# Accumulate
U = U * Um
# print("U:")
# sympy.pprint(U)
return Linear(name=name, ports=ex_ports, unitary_matrix=U)
raise NotImplementedError("Linear unable to compound input models {:}".format(
[m for m in models]))
except NotImplementedError:
return super().compound(name=name, models=models, connectivity=connectivity)
class LinearGroupDelay(Linear):
"""
Linear optical model including lumped group delay.
"""
def define(self, delay=0, **kwargs):
super().define(**kwargs)
self.properties.add("discrete-time")
try:
self.properties.remove("time-independent")
except KeyError:
pass
self.delay = delay
for port in self.ports:
port.data['delay'] = self.delay
@classmethod
def compound(cls, name, models, connectivity):
new_mod = Linear.compound(name,
models=models, connectivity=connectivity)
class SourceModel(SymbolicModel):
"""
Model for sources.
"""
def define(self, **kwargs):
super().define(**kwargs)
self.properties.add("source")
self.out_optical_ports = [p for p in self.out_ports if p.kind == port.kind.optical]
| 28.116667 | 102 | 0.680498 |
import abc
import sympy
from ..connectivity import Connectivity
import arch.port as port
import numpy as np
import math
class Model(abc.ABC):
def __init__(self, name, block=None, ports=None, **kwargs):
self.name = name
if block is not None and ports is None:
self.ports = list(block.ports)
elif ports is not None and block is None:
self.ports = list(ports)
elif ports is not None and block is not None:
raise AttributeError("One and only one of either `block` or `ports` "
"may be set.")
else:
self.ports = list()
self.__properties = set()
self.define(**kwargs)
@classmethod
def compound(cls, name, models, connectivity):
print ("Compounding in Model")
return NumericModel.compound(name, models, connectivity)
@property
def lineage(self):
def list_bases(c):
if issubclass(c, Model):
all_bases = [c]
for base in c.__bases__:
all_bases.extend(list_bases(base))
return all_bases
else:
return []
return list_bases(self.__class__)
@property
def properties(self):
return self.__properties
@property
def in_ports(self):
return [p for p in self.ports if p.direction == port.direction.inp]
@property
def out_ports(self):
return [p for p in self.ports if p.direction == port.direction.out]
@abc.abstractmethod
def define(self, **kwargs):
pass
@property
def port_names(self):
return {str(e) for e in self.ports}
@property
def default_input_state(self):
return {p:p.default for p in self.in_ports}
def __repr__(self):
return "<"+self.__class__.__module__+"."+self.__class__.__name__+" '"+self.name+"'>"
class NumericModel(Model):
def define(self, out_func=lambda x:x, **kwargs):
self.properties.add("numeric")
out = out_func(self.default_input_state).keys()
described_out_ports = set(out_func(self.default_input_state).keys())
if not set(self.out_ports).issubset(described_out_ports):
print(self.out_ports)
print(described_out_ports)
raise AttributeError("Model output ports do not match ports"
" described by out_func. "
"Ports missing from `out_func` are {:}. ".format(
[p for p in self.out_ports if p not in described_out_ports]) )
self.out_func = out_func
@classmethod
def compound(cls, name, models=[], connectivity=Connectivity()):
print("Compounding in NumericalModel")
connectivity = connectivity.filtered_by_models(models)
ports = [p for m in models for p in m.ports]
ex_ports = [p for p in ports if p not in connectivity]
ex_out_ports = [p for p in ex_ports if p.direction == port.direction.out]
def _have_prereqs(model, state):
return all([p in state for p in model.in_ports])
def out_func(state):
mods = set(models)
loops = connectivity.loops
state = {e:e.default for l in loops for e in l if isinstance(e, port.var)} | state
while mods:
ready_mods = {mod for mod in mods if _have_prereqs(mod, state)}
for mod in ready_mods:
state |= mod.out_func(state)
state |= {pi:state[po] for po,pi in connectivity if po in state}
mods -= ready_mods
return state
return NumericModel(name=name, ports=ex_ports, out_func=out_func)
class SymbolicModel(Model):
def define(self, out_exprs=None, **kwargs):
self.properties.add("symbolic")
if out_exprs is not None:
self.out_exprs = out_exprs
@property
def out_exprs(self):
return self.__out_exprs
@out_exprs.setter
def out_exprs(self, new_out_exprs):
self.__out_exprs = new_out_exprs
try:
self._out_func_lambda = sympy.lambdify(self.in_ports,
[self.out_exprs[p] for p in self.out_ports])
except KeyError as e:
raise KeyError(f"Output port '{e}' not described by `out_exprs` {self.out_exprs}")
def out_func(self, in_state):
in_state_vec = [in_state[p] for p in self.in_ports]
out_state_vec = self._out_func_lambda(*in_state_vec)
out_state_dict = {self.out_ports[i]:out_state_vec[i] for i in range(len(out_state_vec))}
return out_state_dict
@classmethod
def compound(cls, name, models=[], connectivity=Connectivity(), iter_max=10):
try:
connectivity = connectivity.filtered_by_models(models)
ports = [p for m in models for p in m.ports]
ex_ports = [p for p in ports if p not in connectivity]
ex_out_ports = [p for p in ex_ports if p.direction == port.direction.out]
ex_in_ports = [p for p in ex_ports if p.direction == port.direction.inp]
def _have_prereqs(model, state):
return all([p in state for p in model.in_ports])
state = {p:p for p in ex_in_ports}
mods = set(models)
i = 0
while mods and i < iter_max:
i += 1
ready_mods = {mod for mod in mods if _have_prereqs(mod, state)}
for mod in ready_mods:
state |= {op:oe.subs(state) for op,oe in mod.out_exprs.items()}
state |= {pi:state[po] for po,pi in connectivity if po in state}
mods -= ready_mods
if i == iter_max:
ls = connectivity.loops
print("Found loops:",list(ls))
raise NotImplementedError(
f"Reached max iteration limit ({iter_max}) but all models do not "
f"yet have their prerequisite inputs. Remaining models are {mods}")
extra_symbols = {s for oe in state.values()
for s in oe.free_symbols if s in ex_out_ports}
if extra_symbols:
raise AttributeError("Extra symbols found after substitution: {:}. Either "
"relabel as compound input port, or adjust internal connectivity "
"accordingly.".format(extra_symbols))
return SymbolicModel(name=name, ports=ex_ports, out_exprs=state)
except NotImplementedError:
return super().compound(name=name, models=models, connectivity=connectivity)
import sympy
from sympy import Matrix, sqrt, I, exp
import arch.port as port
from sympy import ImmutableMatrix, Matrix
class Linear(SymbolicModel):
def define(self, unitary_matrix=None, **kwargs):
super().define(**kwargs)
self.properties.update({"optical", "time-independent"})
self.in_optical_ports = [p for p in self.in_ports if p.kind == port.kind.optical]
self.out_optical_ports = [p for p in self.out_ports if p.kind == port.kind.optical]
if unitary_matrix is None:
unitary_matrix = sympy.eye(len(self.out_optical_ports))
self.U = ImmutableMatrix(unitary_matrix)
if not self.U.is_square:
raise AttributeError("Linear model matrix (unitary_matrix) must be square.")
self.n_ins = self.U.rows
self.n_outs = self.U.rows
if len(self.in_optical_ports) != self.n_ins:
raise AttributeError(f"Number of input optical ports "
f"{len(self.in_optical_ports)} does not match dimension of model matrix "
f"({self.n_ins}) of model {self}:{self.name}. Add ports before adding "
f"model. Input ports were {self.in_ports} ({self.in_optical_ports} "
f"optical), output ports were {self.out_ports} ({self.out_optical_ports} "
f"optical).")
if len(self.out_optical_ports) != self.n_outs:
raise AttributeError("Number of output names {:} does not match dimension of "
"model matrix {:}. Add ports before adding model.".format(
len(self.out_optical_ports), self.n_outs))
self.out_exprs = {op:oe for op,oe in
zip(self.out_optical_ports, self.U * Matrix(self.in_optical_ports) ) }
@classmethod
def compound(cls, name, models, connectivity):
try:
if all([isinstance(m,Linear) for m in models]):
if connectivity.has_loops:
raise NotImplementedError("Unable to hybridise models of type '{:}' "
"containing loops".format(cls))
models = connectivity.order_models(models)
connectivity = connectivity.filtered_by_models(models)
ports = [p for m in models for p in m.ports]
ex_ports = [p for p in ports if p not in connectivity]
ex_out_ports = [p for p in ex_ports if p.direction == port.direction.out]
ex_in_ports = [p for p in ex_ports if p.direction == port.direction.inp]
modes = dict()
np = 0
iops = [p for p in ex_in_ports if p.kind == port.kind.optical]
oops = [p for p in ex_out_ports if p.kind == port.kind.optical]
assert len(iops) == len(oops), ("Numbers of input and output optical ports does "
"not match")
for ip,op in zip(iops, oops):
modes[np] = {ip, op}
np += 1
for model in models:
for ip,op in zip(model.in_optical_ports, model.out_optical_ports):
matched = False
for mode,mode_ports in modes.items():
if (ip in mode_ports) or any([connectivity.test(xp,mp)
for mp in mode_ports for xp in [ip,op]]):
mode_ports |= {ip, op}
matched = True
break
if not matched:
modes[np] = {ip,op}
np += 1
mode_of_port = {p:m for m in modes for p in modes[m]}
U = sympy.eye(np)
for m in models:
mode_map = [mode_of_port[p] for p in m.in_optical_ports]
U0m = m.U
n = U0m.rows
Um = Matrix(Matrix.diag(sympy.eye(np), U0m, sympy.eye(np - n)))
for i,j in enumerate(mode_map):
Um.row_swap(i+np,j)
Um.col_swap(i+np,j)
for _ in range(np):
Um.row_del(np)
Um.col_del(np)
U = U * Um
return Linear(name=name, ports=ex_ports, unitary_matrix=U)
raise NotImplementedError("Linear unable to compound input models {:}".format(
[m for m in models]))
except NotImplementedError:
return super().compound(name=name, models=models, connectivity=connectivity)
class LinearGroupDelay(Linear):
def define(self, delay=0, **kwargs):
super().define(**kwargs)
self.properties.add("discrete-time")
try:
self.properties.remove("time-independent")
except KeyError:
pass
self.delay = delay
for port in self.ports:
port.data['delay'] = self.delay
@classmethod
def compound(cls, name, models, connectivity):
new_mod = Linear.compound(name,
models=models, connectivity=connectivity)
class SourceModel(SymbolicModel):
def define(self, **kwargs):
super().define(**kwargs)
self.properties.add("source")
self.out_optical_ports = [p for p in self.out_ports if p.kind == port.kind.optical]
| true | true |
1c4a46e3681387e261c445f0fe0ee20614d7a18e | 26,847 | py | Python | src/virtual-wan/azext_vwan/vendored_sdks/v2021_03_01/v2021_03_01/aio/operations/_nat_gateways_operations.py | haroonf/azure-cli-extensions | 61c044d34c224372f186934fa7c9313f1cd3a525 | [
"MIT"
] | 207 | 2017-11-29T06:59:41.000Z | 2022-03-31T10:00:53.000Z | src/virtual-wan/azext_vwan/vendored_sdks/v2021_03_01/v2021_03_01/aio/operations/_nat_gateways_operations.py | haroonf/azure-cli-extensions | 61c044d34c224372f186934fa7c9313f1cd3a525 | [
"MIT"
] | 4,061 | 2017-10-27T23:19:56.000Z | 2022-03-31T23:18:30.000Z | src/virtual-wan/azext_vwan/vendored_sdks/v2021_03_01/v2021_03_01/aio/operations/_nat_gateways_operations.py | haroonf/azure-cli-extensions | 61c044d34c224372f186934fa7c9313f1cd3a525 | [
"MIT"
] | 802 | 2017-10-11T17:36:26.000Z | 2022-03-31T22:24:32.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class NatGatewaysOperations:
"""NatGatewaysOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2021_03_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
nat_gateway_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-03-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'natGatewayName': self._serialize.url("nat_gateway_name", nat_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/natGateways/{natGatewayName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
nat_gateway_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes the specified nat gateway.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param nat_gateway_name: The name of the nat gateway.
:type nat_gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
nat_gateway_name=nat_gateway_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'natGatewayName': self._serialize.url("nat_gateway_name", nat_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/natGateways/{natGatewayName}'} # type: ignore
async def get(
self,
resource_group_name: str,
nat_gateway_name: str,
expand: Optional[str] = None,
**kwargs: Any
) -> "_models.NatGateway":
"""Gets the specified nat gateway in a specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param nat_gateway_name: The name of the nat gateway.
:type nat_gateway_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: NatGateway, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2021_03_01.models.NatGateway
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NatGateway"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-03-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'natGatewayName': self._serialize.url("nat_gateway_name", nat_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('NatGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/natGateways/{natGatewayName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
nat_gateway_name: str,
parameters: "_models.NatGateway",
**kwargs: Any
) -> Optional["_models.NatGateway"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.NatGateway"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-03-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'natGatewayName': self._serialize.url("nat_gateway_name", nat_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'NatGateway')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('NatGateway', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('NatGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/natGateways/{natGatewayName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
nat_gateway_name: str,
parameters: "_models.NatGateway",
**kwargs: Any
) -> AsyncLROPoller["_models.NatGateway"]:
"""Creates or updates a nat gateway.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param nat_gateway_name: The name of the nat gateway.
:type nat_gateway_name: str
:param parameters: Parameters supplied to the create or update nat gateway operation.
:type parameters: ~azure.mgmt.network.v2021_03_01.models.NatGateway
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either NatGateway or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2021_03_01.models.NatGateway]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.NatGateway"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
nat_gateway_name=nat_gateway_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('NatGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'natGatewayName': self._serialize.url("nat_gateway_name", nat_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/natGateways/{natGatewayName}'} # type: ignore
async def update_tags(
self,
resource_group_name: str,
nat_gateway_name: str,
parameters: "_models.TagsObject",
**kwargs: Any
) -> "_models.NatGateway":
"""Updates nat gateway tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param nat_gateway_name: The name of the nat gateway.
:type nat_gateway_name: str
:param parameters: Parameters supplied to update nat gateway tags.
:type parameters: ~azure.mgmt.network.v2021_03_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:return: NatGateway, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2021_03_01.models.NatGateway
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NatGateway"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-03-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_tags.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'natGatewayName': self._serialize.url("nat_gateway_name", nat_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('NatGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/natGateways/{natGatewayName}'} # type: ignore
def list_all(
self,
**kwargs: Any
) -> AsyncIterable["_models.NatGatewayListResult"]:
"""Gets all the Nat Gateways in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either NatGatewayListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2021_03_01.models.NatGatewayListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NatGatewayListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-03-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_all.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('NatGatewayListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/natGateways'} # type: ignore
def list(
self,
resource_group_name: str,
**kwargs: Any
) -> AsyncIterable["_models.NatGatewayListResult"]:
"""Gets all nat gateways in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either NatGatewayListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2021_03_01.models.NatGatewayListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NatGatewayListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-03-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('NatGatewayListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/natGateways'} # type: ignore
| 49.080439 | 191 | 0.665139 | from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class NatGatewaysOperations:
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
nat_gateway_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-03-01"
accept = "application/json"
url = self._delete_initial.metadata['url'] path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'natGatewayName': self._serialize.url("nat_gateway_name", nat_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {} query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {} header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/natGateways/{natGatewayName}'}
async def begin_delete(
self,
resource_group_name: str,
nat_gateway_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
polling = kwargs.pop('polling', True) cls = kwargs.pop('cls', None) lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
nat_gateway_name=nat_gateway_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'natGatewayName': self._serialize.url("nat_gateway_name", nat_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/natGateways/{natGatewayName}'}
async def get(
self,
resource_group_name: str,
nat_gateway_name: str,
expand: Optional[str] = None,
**kwargs: Any
) -> "_models.NatGateway":
cls = kwargs.pop('cls', None) error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-03-01"
accept = "application/json"
url = self.get.metadata['url'] path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'natGatewayName': self._serialize.url("nat_gateway_name", nat_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {} query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
header_parameters = {} header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('NatGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/natGateways/{natGatewayName}'}
async def _create_or_update_initial(
self,
resource_group_name: str,
nat_gateway_name: str,
parameters: "_models.NatGateway",
**kwargs: Any
) -> Optional["_models.NatGateway"]:
cls = kwargs.pop('cls', None) error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-03-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
url = self._create_or_update_initial.metadata['url'] path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'natGatewayName': self._serialize.url("nat_gateway_name", nat_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {} query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {} header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} body_content = self._serialize.body(parameters, 'NatGateway')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('NatGateway', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('NatGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/natGateways/{natGatewayName}'}
async def begin_create_or_update(
self,
resource_group_name: str,
nat_gateway_name: str,
parameters: "_models.NatGateway",
**kwargs: Any
) -> AsyncLROPoller["_models.NatGateway"]:
polling = kwargs.pop('polling', True) cls = kwargs.pop('cls', None) lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
nat_gateway_name=nat_gateway_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('NatGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'natGatewayName': self._serialize.url("nat_gateway_name", nat_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/natGateways/{natGatewayName}'}
async def update_tags(
self,
resource_group_name: str,
nat_gateway_name: str,
parameters: "_models.TagsObject",
**kwargs: Any
) -> "_models.NatGateway":
cls = kwargs.pop('cls', None) error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-03-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
url = self.update_tags.metadata['url'] path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'natGatewayName': self._serialize.url("nat_gateway_name", nat_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {} query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {} header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('NatGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/natGateways/{natGatewayName}'}
def list_all(
self,
**kwargs: Any
) -> AsyncIterable["_models.NatGatewayListResult"]:
cls = kwargs.pop('cls', None) error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-03-01"
accept = "application/json"
def prepare_request(next_link=None):
header_parameters = {} header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
url = self.list_all.metadata['url'] path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {} query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('NatGatewayListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/natGateways'}
def list(
self,
resource_group_name: str,
**kwargs: Any
) -> AsyncIterable["_models.NatGatewayListResult"]:
cls = kwargs.pop('cls', None) error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-03-01"
accept = "application/json"
def prepare_request(next_link=None):
header_parameters = {} header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
url = self.list.metadata['url'] path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {} query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('NatGatewayListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/natGateways'} | true | true |
1c4a47a2ef218891b453fdd516b5d165a1dedf97 | 6,511 | py | Python | FACTScontrol.py | amahoro12/anne | 9b68c71c491bde4f57c2cbbf78a377239a9026d8 | [
"MIT"
] | null | null | null | FACTScontrol.py | amahoro12/anne | 9b68c71c491bde4f57c2cbbf78a377239a9026d8 | [
"MIT"
] | null | null | null | FACTScontrol.py | amahoro12/anne | 9b68c71c491bde4f57c2cbbf78a377239a9026d8 | [
"MIT"
] | null | null | null | import pandapower.control as ct
## Creates custom PI-controllers for the shunt and series FACTS device used in the test network.
# SHUNT CONTROLLER
class ShuntFACTS(ct.basic_controller.Controller):
def __init__(self, net, busVoltageInd, convLim, shuntIndex=0, q_mvar_rating=50, max_iter=30, in_service=True,
recycle=False, order=0, level=0, **kwargs):
# construct through superclass
super().__init__(net, in_service=in_service, recycle=recycle, order=order, level=level,
initial_powerflow=True, **kwargs)
# Initialise class variables
self.shuntIndex = shuntIndex
self.busVoltageInd = busVoltageInd
self.ref = 1.0 # reference value to reach
self.convLim = convLim # limit threshold for convergence
self.meas = self.net.res_bus.vm_pu[busVoltageInd]
self.applied = False
self.q_mvar_max = q_mvar_rating
self.q_mvar_min = -q_mvar_rating
self.iter_counter = 0 # count number of iterations
self.maxed_counter = 0 # To count iterations if maxed out and cant converge to v_ref
self.max_iter = max_iter # maximum umber of iterations
self.v_delta = 0
self.v_delta_accum = 0
# return boolean for if controled has converged to ref value
def is_converged(self):
self.meas = self.net.res_bus.vm_pu[self.busVoltageInd]
# Converged if within limit or output maxed for three iterations without convergence
if abs(self.meas - self.ref) < self.convLim or self.maxed_counter >= 4 or self.iter_counter == self.max_iter:
self.applied = True
return self.applied
# In case the controller is not yet converged, the control step is executed.
def control_step(self):
# Measurement
self.meas = self.net.res_bus.vm_pu[self.busVoltageInd]
self.v_delta = self.meas - self.ref
# Control Coefficients
K_p = 10 # Factor 10 is to cap at rating when v_delta is +/- 0.1 pu.
K_i = 15
# PI-control equation
self.net.shunt.q_mvar[self.shuntIndex] = K_p * self.q_mvar_max * (
self.v_delta) + K_i * self.q_mvar_max * self.v_delta_accum
# Make sure output don't exceed rating
if self.net.shunt.q_mvar[self.shuntIndex] + 0.00001 >= self.q_mvar_max:
self.net.shunt.q_mvar[self.shuntIndex] = self.q_mvar_max
self.maxed_counter += 1
elif self.net.shunt.q_mvar[self.shuntIndex] - 0.00001 <= self.q_mvar_min:
self.net.shunt.q_mvar[self.shuntIndex] = self.q_mvar_min
self.maxed_counter += 1
# Update for posible next iter of control
self.v_delta_accum += self.v_delta
self.iter_counter += 1
# Finalize function MIGHT BE NEEDED IF RESET OF SOME CLASS VARIABLES NEEDED: DEPENDS ON HOW CALLED IN MAIN MODEL
def finalize_control(self):
self.applied = False
self.v_delta_accum = 0
self.iter_counter = 0 # count number of iterations
self.maxed_counter = 0 # To count iterations if maxed out and cant converge to v_ref
# Series CONTROLLER
class SeriesFACTS(ct.basic_controller.Controller):
def __init__(self, net, lineLPInd, convLim, x_line_pu, max_iter=30, switchInd=1, serIndex=0, x_comp_rating=0.4,
in_service=True, recycle=False, order=0, level=0, **kwargs):
# construct through superclass
super().__init__(net, in_service=in_service, recycle=recycle, order=order, level=level,
initial_powerflow=True, **kwargs)
# Initialise class variables
self.switchInd = switchInd
self.x_line_pu = x_line_pu
self.serIndex = serIndex
self.lineLPInd = lineLPInd
self.ref = 50 # reference value to reach
self.convLim = convLim # limit threshold for convergence
self.meas = 0
self.applied = False
self.x_comp_max = x_comp_rating
self.x_comp_min = -x_comp_rating
self.iter_counter = 0 # count number of iterations
self.maxed_counter = 0 # To count iterations if maxed out and cant converge to v_ref
self.max_iter = max_iter
self.lp_delta = 0
self.lp_delta_accum = 0
# return boolean for if controled has converged to ref value
def is_converged(self):
self.meas = self.net.res_line.loading_percent[self.lineLPInd]
# Converged if within limit or output maxed for three iterations without convergence
if abs(self.meas - self.ref)/100 < self.convLim or self.maxed_counter >= 4 or self.iter_counter == self.max_iter:
self.applied = True
return self.applied
# In case the controller is not yet converged, the control step is executed.
def control_step(self):
# Make sure it is enabled, set to False
self.net.switch.closed[self.switchInd] = False
# Measurement
self.meas = self.net.res_line.loading_percent[self.lineLPInd]
self.lp_delta = (self.meas - self.ref) / 100 # div by 100 to get value between 0-1
# Control Coefficients
K_p = 20
K_i = 15
# PI-control equation
op = self.x_line_pu * (K_p * self.x_comp_max * (self.lp_delta) + K_i * self.x_comp_max * self.lp_delta_accum)
# Make sure output don't exceed rating
if op + 0.00001 >= self.x_line_pu * self.x_comp_max:
op = self.x_line_pu * self.x_comp_max
self.maxed_counter += 1
elif op - 0.00001 <= self.x_line_pu * self.x_comp_min:
op = self.x_line_pu * self.x_comp_min
self.maxed_counter += 1
# Bypassing series device if impedance close to 0 and set output to last value.
if abs(op) < 0.0001: # Helping with convergence
self.net.switch.closed[self.switchInd] = True # ACTUAL network
else:
# Set output of device if not bypassed
self.net.impedance.loc[self.serIndex, ['xft_pu', 'xtf_pu']] = op
# Update for posible next iter of control
self.lp_delta_accum += self.lp_delta
self.iter_counter += 1
# Finalize function MIGHT BE NEEDED IF RESET OF SOME CLASS VARIABLES NEEDED: DEPENDS ON HOW CALLED IN MAIN MODEL
def finalize_control(self):
self.applied = False
self.lp_delta_accum = 0
self.iter_counter = 0 # count number of iterations
self.maxed_counter = 0 # To count iterations if maxed out and cant converge to v_ref | 43.993243 | 121 | 0.657196 | import pandapower.control as ct
class ShuntFACTS(ct.basic_controller.Controller):
def __init__(self, net, busVoltageInd, convLim, shuntIndex=0, q_mvar_rating=50, max_iter=30, in_service=True,
recycle=False, order=0, level=0, **kwargs):
super().__init__(net, in_service=in_service, recycle=recycle, order=order, level=level,
initial_powerflow=True, **kwargs)
self.shuntIndex = shuntIndex
self.busVoltageInd = busVoltageInd
self.ref = 1.0 self.convLim = convLim self.meas = self.net.res_bus.vm_pu[busVoltageInd]
self.applied = False
self.q_mvar_max = q_mvar_rating
self.q_mvar_min = -q_mvar_rating
self.iter_counter = 0 self.maxed_counter = 0 self.max_iter = max_iter self.v_delta = 0
self.v_delta_accum = 0
def is_converged(self):
self.meas = self.net.res_bus.vm_pu[self.busVoltageInd]
if abs(self.meas - self.ref) < self.convLim or self.maxed_counter >= 4 or self.iter_counter == self.max_iter:
self.applied = True
return self.applied
def control_step(self):
self.meas = self.net.res_bus.vm_pu[self.busVoltageInd]
self.v_delta = self.meas - self.ref
K_p = 10 K_i = 15
self.net.shunt.q_mvar[self.shuntIndex] = K_p * self.q_mvar_max * (
self.v_delta) + K_i * self.q_mvar_max * self.v_delta_accum
if self.net.shunt.q_mvar[self.shuntIndex] + 0.00001 >= self.q_mvar_max:
self.net.shunt.q_mvar[self.shuntIndex] = self.q_mvar_max
self.maxed_counter += 1
elif self.net.shunt.q_mvar[self.shuntIndex] - 0.00001 <= self.q_mvar_min:
self.net.shunt.q_mvar[self.shuntIndex] = self.q_mvar_min
self.maxed_counter += 1
# Update for posible next iter of control
self.v_delta_accum += self.v_delta
self.iter_counter += 1
# Finalize function MIGHT BE NEEDED IF RESET OF SOME CLASS VARIABLES NEEDED: DEPENDS ON HOW CALLED IN MAIN MODEL
def finalize_control(self):
self.applied = False
self.v_delta_accum = 0
self.iter_counter = 0 # count number of iterations
self.maxed_counter = 0 # To count iterations if maxed out and cant converge to v_ref
# Series CONTROLLER
class SeriesFACTS(ct.basic_controller.Controller):
def __init__(self, net, lineLPInd, convLim, x_line_pu, max_iter=30, switchInd=1, serIndex=0, x_comp_rating=0.4,
in_service=True, recycle=False, order=0, level=0, **kwargs):
# construct through superclass
super().__init__(net, in_service=in_service, recycle=recycle, order=order, level=level,
initial_powerflow=True, **kwargs)
# Initialise class variables
self.switchInd = switchInd
self.x_line_pu = x_line_pu
self.serIndex = serIndex
self.lineLPInd = lineLPInd
self.ref = 50 # reference value to reach
self.convLim = convLim # limit threshold for convergence
self.meas = 0
self.applied = False
self.x_comp_max = x_comp_rating
self.x_comp_min = -x_comp_rating
self.iter_counter = 0 # count number of iterations
self.maxed_counter = 0 # To count iterations if maxed out and cant converge to v_ref
self.max_iter = max_iter
self.lp_delta = 0
self.lp_delta_accum = 0
# return boolean for if controled has converged to ref value
def is_converged(self):
self.meas = self.net.res_line.loading_percent[self.lineLPInd]
# Converged if within limit or output maxed for three iterations without convergence
if abs(self.meas - self.ref)/100 < self.convLim or self.maxed_counter >= 4 or self.iter_counter == self.max_iter:
self.applied = True
return self.applied
# In case the controller is not yet converged, the control step is executed.
def control_step(self):
# Make sure it is enabled, set to False
self.net.switch.closed[self.switchInd] = False
# Measurement
self.meas = self.net.res_line.loading_percent[self.lineLPInd]
self.lp_delta = (self.meas - self.ref) / 100 # div by 100 to get value between 0-1
# Control Coefficients
K_p = 20
K_i = 15
# PI-control equation
op = self.x_line_pu * (K_p * self.x_comp_max * (self.lp_delta) + K_i * self.x_comp_max * self.lp_delta_accum)
# Make sure output don't exceed rating
if op + 0.00001 >= self.x_line_pu * self.x_comp_max:
op = self.x_line_pu * self.x_comp_max
self.maxed_counter += 1
elif op - 0.00001 <= self.x_line_pu * self.x_comp_min:
op = self.x_line_pu * self.x_comp_min
self.maxed_counter += 1
if abs(op) < 0.0001: self.net.switch.closed[self.switchInd] = True else:
self.net.impedance.loc[self.serIndex, ['xft_pu', 'xtf_pu']] = op
self.lp_delta_accum += self.lp_delta
self.iter_counter += 1
def finalize_control(self):
self.applied = False
self.lp_delta_accum = 0
self.iter_counter = 0 self.maxed_counter = 0 | true | true |
1c4a482bb9e01f84eb19da55a5549ca75bbd457d | 128,109 | py | Python | tests/arm_tests.py | SantiagoRomani/gdb_arm | 9e3c2eec2c41337b2a88222a87ad0b2f418111c7 | [
"MIT"
] | 2 | 2021-05-20T09:30:31.000Z | 2022-02-23T03:34:51.000Z | tests/arm_tests.py | SantiagoRomani/gdb_arm | 9e3c2eec2c41337b2a88222a87ad0b2f418111c7 | [
"MIT"
] | null | null | null | tests/arm_tests.py | SantiagoRomani/gdb_arm | 9e3c2eec2c41337b2a88222a87ad0b2f418111c7 | [
"MIT"
] | null | null | null | """ Groups of tests for gdb_arm """
from num_analyzer import NumberAnalyzer
from string_analyzer import CharAnalyzer
from string_analyzer import StringAnalyzer
from data_analyzer import DataAnalyzer
from adr_analyzer import AddressAnalyzer
from reg_analyzer import RegisterAnalyzer
from reg_analyzer import RegisterBitsAnalyzer
from reg_analyzer import RegisterListAnalyzer
from imm_analyzer import ImmediateOpAnalyzer
from imm_analyzer import ImmediateRSAnalyzer
from op2_analyzer import Op2Analyzer
from opdat_analyzer import OpdatAnalyzer
from instdat_analyzer import InstdatAnalyzer
from instmul_analyzer import InstmulAnalyzer
from instjmp_analyzer import InstjmpAnalyzer
from opldst_analyzer import Opldst2Analyzer
from opldst_analyzer import Opldst3Analyzer
from instmem_analyzer import InstmemAnalyzer
from instmsc_analyzer import InstmscAnalyzer
from arm_analyzer import ArmAnalyzer
number_analyzer = NumberAnalyzer()
char_analyzer = CharAnalyzer()
string_analyzer = StringAnalyzer()
data_analyzer = DataAnalyzer()
address_analyzer = AddressAnalyzer()
register_analyzer = RegisterAnalyzer()
regbit_analyzer = RegisterBitsAnalyzer()
reglst_analyzer = RegisterListAnalyzer()
immediate_op_analyzer = ImmediateOpAnalyzer()
immediate_sr_analyzer = ImmediateRSAnalyzer()
op2_analyzer = Op2Analyzer()
opdat_analyzer = OpdatAnalyzer()
instdat_analyzer = InstdatAnalyzer()
instmul_analyzer = InstmulAnalyzer()
instjmp_analyzer = InstjmpAnalyzer()
opldst2_analyzer = Opldst2Analyzer()
opldst3_analyzer = Opldst3Analyzer()
instmem_analyzer = InstmemAnalyzer()
instmsc_analyzer = InstmscAnalyzer()
arm_analyzer = ArmAnalyzer()
hex_test = [('', [], -1001), # T10.0.0 error: empty input
(' ', [], -1001), # T10.0.1 > T10.0.0 error: white spaces
('0x', [], -1005), # T10.0.3 > T10.2.0 error: leading '0x', missing hex digits
(' 0x', [], -1005), # T10.0.1 > T10.0.3 > T10.2.0 / idem with leading white space
('0x1', [1], 1000), # T10.0.3 > T10.2.1 hex number: single digit
(' 0x1', [1], 1000), # T10.0.1 > T10.0.3 > T10.2.1 / idem with white leading space
(' 0xA', [10], 1000), # T10.0.1 > T10.0.3 > T10.2.1 / idem with a letter digit
('0x01', [1], 1000), # T10.0.3 > T10.2.1 / with leading zeros
(' 0x001', [1], 1000), # T10.0.1 > T10.0.3 > T10.2.1 / idem with leading spaces
('0x10', [16], 1000), # T10.0.3 > T10.2.1 / two digits
('0x2864', [10340], 1000), # T10.0.3 > T10.2.1 / four digits
('0xF3AE', [62382], 1000), # T10.0.3 > T10.2.1 / four digits, with hex letters
('0xb14a', [45386], 1000), # T10.0.3 > T10.2.1 / (lower case hex letters)
('0xb14A', [45386], 1000), # T10.0.3 > T10.2.1 / (mixed lower / upper case)
('0xR124', [], -1005), # T10.0.3 > T10.2.2 error: illegal digits (first one)
('0x51V4', [], -1005), # T10.0.3 > T10.2.2 / (third one)
('0x514W', [], -1005), # T10.0.3 > T10.2.2 / (last one)
('0x10002EF0', [268447472], 1000), # T10.0.3 > T10.2.1 big hex number: eight digits
('0x10002EF00', [], -1006) # T10.0.3 > T10.2.1+override too long number: nine digits (>=2^32)
]
dec_test = [('0', [0], 1000), # T10.0.4 > T10.3.0 dec/oct number: the zero
(' 0', [0], 1000), # T10.0.1 > T10.0.4 > T10.3.0 / idem with leading space
('1', [1], 1000), # T10.0.7 > T10.5.1 dec number: single digit
(' 1', [1], 1000), # T10.0.1 > T10.0.7 > T10.5.1 / idem with white space
('-1', [-1], 1000), # T10.0.5 > T10.4.1 / negative number
(' -1', [-1], 1000), # T10.0.1 > T10.0.5 > T10.4.1 / negative num. with leading spaces
('10', [10], 1000), # T10.0.7 > T10.5.1 / two digits
('2864', [2864], 1000), # T10.0.7 > T10.5.1 / four digits
('-2864', [-2864], 1000), # T10.0.5 > T10.4.1 / four digits negative number
('+2864', [2864], 1000), # T10.0.6 > T10.6.1 / four digits positive number
('r12', [], -1001), # T10.0.8 error: illegal digits (first one)
('5V6', [], -1004), # T10.0.6 > T10.5.2 / (second one)
('514W', [], -1004), # T10.0.6 > T10.5.2 / (last one)
('-', [], -1004), # T10.0.5 > T10.4.0 / no digits digit after '-'
('+', [], -1004), # T10.0.6 > T10.6.0 / no digits digit after '+'
('-r12', [], -1004), # T10.0.5 > T10.4.2 / illegal first digit after '-'
('+r12', [], -1004), # T10.0.6 > T10.6.2 / illegal first digit after '-'
('-5V6', [], -1004), # T10.0.5 > T10.4.2 / illegal middle digit after '-'
('4684474720', [], -1006), # T10.0.6 > T10.5.1+override long dec number (>=2^32)
('-2147483649', [], -1006) # T10.0.5 > T10.4.1+override long neg. dec number (<-2^31)
]
oct_test = [('000', [0], 1000), # T10.0.4 > T10.3.1 oct number: zeroes
(' 00', [0], 1000), # T10.0.1 > T10.0.4 > T10.3.1 / idem with leading space
('01', [1], 1000), # T10.0.4 > T10.3.1 oct number: single digit
(' 01', [1], 1000), # T10.0.1 > T10.0.4 > T10.3.1 / idem with white space
('001', [1], 1000), # T10.0.4 > T10.3.1 / several zeros before digit
('010', [8], 1000), # T10.0.4 > T10.3.1 oct number: two digits
('02764', [1524], 1000), # T10.0.4 > T10.3.1 / four digits
('02864', [], -1003), # T10.0.4 > T10.3.2 error: malformed octal number
('0r12', [], -1003), # T10.0.4 > T10.3.2 error: illegal digits (first one after first 0)
('05V6', [], -1003), # T10.0.4 > T10.3.2 / (second one)
('0514W', [], -1003), # T10.0.4 > T10.3.2 / (last one)
('00r12', [], -1003), # T10.0.4 > T10.3.2 / illegal first digit after several 0s
('063710000000', [], -1006) # T10.0.4 > T10.3.1+override long oct number (>=2^32)
]
bin_test = [('0b', [], -1002), # T10.0.2 > T10.1.0 error: leading '0b', missing bin digits
(' 0b', [], -1002), # T10.0.1 > T10.0.2 > T10.1.0 / idem with leading white space
('0b1', [1], 1000), # T10.0.2 > T10.1.1 bin number: single bit
(' 0b1', [1], 1000), # T10.0.1 > T10.0.2 > T10.1.1 / idem with white space
(' 0b0', [0], 1000), # T10.0.1 > T10.0.2 > T10.1.1 / idem white space & zero bit
('0b01', [1], 1000), # T10.0.2 > T10.1.1 / leading zero
(' 0b001', [1], 1000), # T10.0.1 > T10.0.2 > T10.1.1 / leading spaces & leading zeros
('0b10', [2], 1000), # T10.0.2 > T10.1.1 two bits
('0b0110', [6], 1000), # T10.0.2 > T10.1.1 four bits
('0bR101', [], -1002), # T10.0.2 > T10.1.2 error: illegal bits (first one)
('0b01V4', [], -1002), # T10.0.2 > T10.1.2 / (third one)
('0b110W', [], -1002), # T10.0.2 > T10.1.2 / (last one)
('0b0140', [], -1002), # T10.0.2 > T10.1.2 / (non-binary digit)
('0b10000000000000001000000000000000', [2147516416], 1000), # T10.0.2 > T10.1.1 32 bits
('0b100000000000000010000000000000001', [], -1006) # T10.0.2 > T10.1.1+override 33 bits
]
chr_test = [('', [], -1101), # T11.0.0 error: no single quote
("'", [], -1101), # T11.0.2 > T11.1.0 error: open single quote, missing char
(' n\'', [], -1101), # T11.0.1 > T11.0.3 error: missing quote before characters
("''", [], -1102), # T11.0.2 > T11.1.1 error: empty single quotes
("' ", [32], -1104), # T11.0.2 > T11.1.2 > T11.2.0 error: unclosed single quoted char
("' 0", [32], -1105), # T11.0.2 > T11.1.2 > T11.2.2 error: more than one character
("' '", [32], 1000), # T11.0.2 > T11.1.2 > T11.2.1 successful single char capture
(" ' '", [32], 1000), # T11.0.1 > T11.0.2 > T11.1.2 > T11.2.1 / idem with leading space
('" "', [], -1101), # T11.0.3 error: missing single quote
('\'\"\'', [34], 1000), # T11.0.2 > T11.1.2 > T11.2.1 capture double quote as single char
('\'\n\'', [], -1103) # T11.0.2 > T11.1.3 illegal character in single quotes
]
str_test = [('', [], -1201), # T12.0.0 error: no double quote
("'", [], -1201), # T12.0.3 error: unexpected single quote
('"', [], -1201), # T12.0.2 > T12.1.0 error: open double quote, missing string
(' n\"', [], -1201), # T12.0.1 > T12.0.3 error: missing quote before characters
('""', [], -1202), # T12.0.2 > T12.1.1 error: empty double quotes
('" ', [32], -1204), # T12.0.2 > T12.1.2 > T12.2.0 error: unclosed double quotes
('" 0', [32, 48], -1204), # T12.0.2 > T12.1.2 > T12.2.2 > T12.2.0 / idem with two chars
('" "', [32], 1000), # T12.0.2 > T12.1.2 > T12.2.1 successful single-char string
(' " "', [32], 1000), # T12.0.1 > T12.0.2 > T12.1.2 > T12.2.1 / idem with leading space
('"0123456789"', [48, 49, 50, 51, 52, 53, 54, 55, 56, 57], 1000), # T12.0.2 > T12.1.2 > T12.2.2 > T12.2.1
('"abcdefghijklmnopqrstuvwxyz"', [97, 98, 99, 100, 101, 102, 103, # alphabetic digits
104, 105, 106, 107, 108, 109, 110, 111, 112,
113, 114, 115, 116, 117, 118, 119, 120, 121,
122], 1000), # lower case letters
('"ABCDEFGHIJKLMNOPQRSTUVWXYZ"', [65, 66, 67, 68, 69, 70, 71, 72,
73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83,
84, 85, 86, 87, 88, 89, 90], 1000), # upper case letters
('"!#$%&\'()*+,-./:;<=>?@[\\]^_`{|}~"', [33, 35, 36, 37, 38, 39,
40, 41, 42, 43, 44, 45, 46, 47, 58, 59, 60,
61, 62, 63, 64, 91, 92, 93, 94, 95, 96, 123,
124, 125, 126], 1000), # punctuation letters
('\"\'\"', [39], 1000), # T12.0.2 > T12.1.2 > T12.2.1 capture single quote as a string
('\"\n\"', [], -1203), # T12.0.2 > T12.1.3 illegal character after double quote
('\" \n\"', [32], -1203) # T12.0.2 > T12.1.2 > T12.2.2 > T12.2.3 idem after a valid char
]
dat_test = [('', [], -2101), # T21.0.0 error: missing data directive
(' ', [], -2101), # T21.0.1 > T21.0.0 idem with leading space
('.', [], -2101), # T21.0.2 > T21.1.0 error: missing directive after '.'
('f', [], -2101), # T21.0.3 error: missing '.'
('.f', [], -2104), # T21.0.2 > T21.1.6 error: unknown data directive
('.byte', [], -2102), # T21.0.2 > T21.1.1a error: missing data values
('.byte ', [1], -2102), # T21.0.2 > T21.1.1b > T21.2.0 error: missing data values
('.byte2', [], -2103), # T21.0.2 > T21.1.1c error: missing space after directive
('.byte 2', [1, 2], 1000), # T21.0.2 > T21.1.1b > T21.2.1a success: get one byte
('.byte 20', [1, 20], 1000), # T21.0.2 > T21.1.1b > T21.2.1a idem with two digits
('.byte -20', [1, 236], 1000), # T21.0.2 > T21.1.1b > T21.2.1a idem with negative number
('.byte 2000', [1], -2107), # T21.0.2 > T21.1.1b > T21.2.1a + override data >= 2**8
('.byte -200', [1], -2107), # T21.0.2 > T21.1.1b > T21.2.1a + override data < -2**7
('.byte 45r', [1], -1004), # T21.0.2 > T21.1.1b > T21.2.1a + override unexpected decimal digit
('.byte 45,', [1, 45], -2102), # T21.0.2 > T21.1.1b > T21.2.1b > T21.2.0 error: missing data
('.byte 45, ', [1, 45], -2106), # T21.0.2 > T21.1.1b > T21.2.1b > T21.2.3 unrecognizeable info
('.byte 200, 0xF4', [1, 200, 244], 1000), # T21.0.2 > T21.1.1b > T21.2.1b > T21.2.1a get two bytes
('.byte \'2\'', [1, 50], 1000), # T21.0.2 > T21.1.1b > T21.2.2a success: get one char
('.byte \'2\', \'F\'', [1, 50, 70], 1000), # T21.0.2 > T21.1.1b > T21.2.2b > T21.2.2a get two chars
('.byte \'2\', 0123', [1, 50, 83], 1000), # T21.0.2 > T21.1.1b > T21.2.2b > T21.2.1a one char + one num.
('.byte \'2\' , 0123', [1, 50, 83], 1000), # T21.0.2 > T21.1.1b > T21.2.2b > T21.2.1a with extra space
('.byte \'2\', 0123 ', [1, 50, 83], 1000), # T21.0.2 > T21.1.1b > T21.2.2b > T21.2.1a with trailing space
('.byte 0b110, \'e\'', [1, 6, 101], 1000), # T21.0.2 > T21.1.1b > T21.2.1b > T21.2.2a one num. + one char
('.byte 0b110 , \'e\'', [1, 6, 101], 1000), # T21.0.2 > T21.1.1b > T21.2.1b > T21.2.2a with extra space
('.byte 0b110, \'e\' ', [1, 6, 101], 1000),
# T21.0.2 > T21.1.1b > T21.2.1b > T21.2.2a with trailing space
('.byte \'e\' c', [1], -2105), # T21.0.2 > T21.1.1b > T21.2.1b > T21.2.1c wrong delimiter
('.byte \'e\', c', [1, 101], -2106), # T21.0.2 > T21.1.1b > T21.2.1b > T21.2.3 unrecognizeable info
('.byte c', [1], -2106), # T21.0.2 > T21.1.1b > T21.2.3 unrecognizeable info
('.hword', [], -2102), # T21.0.2 > T21.1.2a error: missing data values
('.hword ', [2], -2102), # T21.0.2 > T21.1.2b > T21.3.0 error missing halfwords
('.hword2', [], -2103), # T21.0.2 > T21.1.2c error: missing space after directive
('.hword 2000', [2, 2000], 1000), # T21.0.2 > T21.1.2b > T21.3.1a success: capture a halfword
('.hword 2000, 0b0010', [2, 2000, 2], 1000), # T21.0.2 > T21.1.2b > T21.3.1b > T21.3.1a two halfwords
('.hword 02000, -1, 0xF00A', [2, 1024, 65535, 61450], 1000), # success: three halfwords
('.hword \'e\'', [2], -2106), # T21.0.2 > T21.1.2b > T21.3.2 unrecognizeable info
('.hword 045r', [2], -1003), # T21.0.2 > T21.1.2b > T21.3.1a + override unexpected hexa digit
('.hword 45,', [2, 45], -2102), # T21.0.2 > T21.1.2b > T21.3.1b > T21.3.0 error: missing data
('.hword 2 , -0123 ', [2, 2, 0xFF85], 1000), # T21.0.2 > T21.1.2b > T21.3.1b > T21.3.1a extra space
('.hword -45000', [2], -2107), # T21.0.2 > T21.1.2b > T21.3.1a + overrride error: data < -2**15
('.word', [], -2102), # T21.0.2 > T21.1.3a error: missing data values
('.word ', [4], -2102), # T21.0.2 > T21.1.3b > T21.4.0 error missing words
('.wordh', [], -2103), # T21.0.2 > T21.1.3c error: missing space after directive
('.word 2000', [4, 2000], 1000), # T21.0.2 > T21.1.3b > T21.4.1a success: capture a word
('.word -2147483648, 0b0010', [4, 2147483648, 0b0010], 1000), # T21.0.2 > T21.1.3b > T21.4.1b > T21.4.1a
('.word 020000000, -1, 0x1F00A', [4, 0o20000000, 4294967295, 0x1F00A], 1000), # three words
('.word r45', [4], -2106), # T21.0.2 > T21.1.3b > T21.4.2 unrecognizeable info
('.word 0b45', [4], -1002), # T21.0.2 > T21.1.3b > T21.4.1a + override unexpected binary digit
('.word 0x4X5', [4], -1005), # T21.0.2 > T21.1.3b > T21.4.1a + override unexpected hexa digit
('.word 0x400000000', [4], -1006), # T21.0.2 > T21.1.3b > T21.4.1a + override too long value (>2^32)
('.word 45,', [4, 45], -2102), # T21.0.2 > T21.1.3b > T21.4.1b > T21.4.0 error: missing data
('.word 2 , -0123 ', [4, 2, 4294967173], 1000), # T21.0.2 > T21.1.3b > T21.4.1b > T21.4.1a
('.word 4294967295', [4, 4294967295], 1000), # T21.0.2 > T21.1.3b > T21.4.1a success: maximum int
('.ascii', [], -2102), # T21.0.2 > T21.1.4a error: missing string
('.asciz', [], -2102), # T21.0.2 > T21.1.5a error: missing string
('.ascii ', [1], -2102), # T21.0.2 > T21.1.4b > T21.5.0 : missing string
('.asciz ', [1], -2102), # T21.0.2 > T21.1.5b > T21.6.0 : missing string
('.ascii5', [], -2103), # T21.0.2 > T21.1.4c error: missing space after directive
('.asciz8', [], -2103), # T21.0.2 > T21.1.5c error: missing space after directive
('.ascii \' \'', [1, 32], 1000), # T21.0.2 > T21.1.4b > T21.5.1a success: get one char
('.asciz \' \'', [1, 32, 0], 1000), # T21.0.2 > T21.1.5b > T21.6.1a success: get one char + '\0'
('.ascii \'a\', \'b\' ,\'c\' , \'d\' ', [1, 97, 98, 99, 100], 1000), # > T21.5.1b > T21.5.1a
('.asciz \'a\', \'b\' ,\'c\' , \'d\' ', [1, 97, 0, 98, 0, 99, 0, 100, 0], 1000), # > T21.6.1b > T21.6.1a
('.ascii "0123456789"', [1, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57], 1000), # T21.0.2 > T21.1.4b > T21.5.2a
('.asciz "abcdef"', [1, 97, 98, 99, 100, 101, 102, 0], 1000), # T21.0.2 > T21.1.5b > T21.6.2a
('.ascii \"b\", \"a\"', [1, 98, 97], 1000), # T21.0.2 > T21.1.4b > T21.5.2b > T21.5.2a
('.asciz \"a\", \"b\"', [1, 97, 0, 98, 0], 1000), # T21.0.2 > T21.1.5b > T21.6.2b > T21.6.2a
('.ascii \"b\", \'a\'', [1, 98, 97], 1000), # T21.0.2 > T21.1.4b > T21.5.2b > T21.5.1a
('.asciz \'a\', \"b\"', [1, 97, 0, 98, 0], 1000), # T21.0.2 > T21.1.5b > T21.6.1b > T21.6.2a
('.ascii \' ', [1], -1104), # T21.0.2 > T21.1.4b > T21.5.1a + override unclosed char
('.ascii \" ', [1], -1204), # T21.0.2 > T21.1.4b > T21.5.2a + override unclosed string
('.asciz \' ', [1], -1104), # T21.0.2 > T21.1.5b > T21.6.1a + override unclosed char
('.asciz \" ', [1], -1204), # T21.0.2 > T21.1.5b > T21.6.2a + override unclosed string
('.ascii \'\'', [1], -1102), # T21.0.2 > T21.1.4b > T21.5.1a + override empty char
('.ascii \"\"', [1], -1202), # T21.0.2 > T21.1.4b > T21.5.2a + override empty string
('.asciz \'\'', [1], -1102), # T21.0.2 > T21.1.5b > T21.6.1a + override empty char
('.asciz \"\"', [1], -1202), # T21.0.2 > T21.1.5b > T21.6.2a + override empty string
('.ascii \' 0\'', [1], -1105), # T21.0.2 > T21.1.4b > T21.5.2a + override more than one character
('.asciz \' 0\'', [1], -1105), # T21.0.2 > T21.1.5b > T21.6.2a + override idem after .ascii
('.ascii \'a\', \"bc , \'d\"', [1, 97, 98, 99, 32, 44, 32, 39, 100], 1000), # > T21.5.1b > T21.5.2a
('.asciz \',\', \",,\"', [1, 44, 0, 44, 44, 0], 1000), # T21.0.2 > T21.1.5b > T21.6.1a success capture ','
('.ascii \'\t\'', [1], -1103), # T21.0.2 > T21.1.4b > T21.5.1c + override illegal character ''
('.asciz \'\t\'', [1], -1103), # T21.0.2 > T21.1.5b > T21.6.1c + override idem after .ascii
('.ascii \"\t\"', [1], -1203), # T21.0.2 > T21.1.4b > T21.5.2c + override illegal character ""
('.asciz \" \t\"', [1], -1203), # T21.0.2 > T21.1.5b > T21.6.2c + override idem after valid char
('.ascii \'"\'a', [1], -2105), # T21.0.2 > T21.1.4b > T21.5.1c unexpected separator
('.ascii \"\'a\"b', [1], -2105), # T21.0.2 > T21.1.4b > T21.5.2c unexpected separator
('.asciz \'"\'a', [1], -2105), # T21.0.2 > T21.1.5b > T21.6.1c unexpected separator
('.asciz \"\'a\"b', [1], -2105), # T21.0.2 > T21.1.5b > T21.6.2c unexpected separator
('.ascii \' a\'', [1], -1105), # T21.0.2 > T21.1.4b > T21.5.2a + override more than one character
('.asciz \' a\'', [1], -1105), # T21.0.2 > T21.1.5b > T21.6.2a + override idem after .ascii
('.ascii a\'', [1], -2106), # T21.0.2 > T21.1.4b > T21.5.3 non recognizable info
('.asciz a\'', [1], -2106), # T21.0.2 > T21.1.5b > T21.6.3 non recognizable info
(' .asciz \'a\'', [1, 97, 0], 1000) # T21.0.1 > T21.0.2 > T21.1.5b > T21.6.1a success with leading space
]
adr_test = [('', [], -2001), # T20.0.0 error: missing address
(' ', [], -2001), # T20.0.1 > T20.0.0 idem white leading space
('0x', [], -2002), # T20.0.2 > T20.1.0 error: '0x' but missing hex digits
('x0', [], -2001), # T20.0.3 error: missing address start
(' 0x8001', [], -2003), # T20.0.1 > T20.0.2 > T20.1.1a address but missing trailing space
('0xF3AE ', [0xF3AE], 1000), # T20.0.2 > T20.0.2 > T20.1.1b success address with trailing space
('0xR124', [], -2003), # T20.0.2 > T20.1.2 illegal address (first digit)
('0x51V4', [], -2003), # T20.0.2 > T20.1.1c illegal address (in-the-middle)
('0x514W', [], -2003), # T20.0.2 > T20.1.1c illegal address (last one)
('0xF0002E00 ', [0xF0002E00], 1000), # T20.0.2 > T20.1.1b big hex address: eight digits
('0x10002EF00 ', [], -2004) # T20.0.2 > T20.1.1b + override long hex address (> 2^32)
]
reg_test = [('', [], -1301), # T13.0.0 error: missing register
(' ', [], -1301), # T13.0.1 > T13.0.0 / idem with leading space
('1', [], -1302), # T13.0.4 error: unknown register identifier
('r', [], -1303), # T13.0.2 > T13.1.0 error: missing register number
('ra', [], -1304), # T13.0.2 > T13.1.2 error: wrong reg number
('r1a', [], -1304), # T13.0.2 > T13.1.2 error: wrong reg number
('r-1', [], -1304), # T13.0.2 > T13.1.1 + override : negative reg number
('r16', [], -1304), # T13.0.2 > T13.1.1 + override : too high reg number
('r12', [12], 1000), # T13.0.2 > T13.1.1 success: two digit reg number
('r0', [0], 1000), # T13.0.2 > T13.1.1 success: one digit reg number
('sp', [13], 1000), # T13.0.3 success: stack pointer
('lr', [14], 1000), # T13.0.3 success: link register
('pc', [15], 1000) # T13.0.3 success: program counter
]
rbt_test = [('', [], -1401), # T14.0.0 error: missing register
(' ', [], -1401), # T14.0.1 > T14.0.0 / idem with leading space
('1', [], -1302), # T14.0.2c + override unknown register identifier
('r', [], -1303), # T14.0.2a + override missing register number
('ra', [], -1304), # T14.0.2a + override wrong reg number
('r1a', [], -1304), # T14.0.2c + override wrong reg number
('r-1', [], -1303), # T14.0.2b + override negative reg number
('r16', [], -1304), # T14.0.2a + override too high reg number
('r0', [0x1], 1000), # T14.0.2a success: single register
('r15', [0x8000], 1000), # T14.0.2a : maximum single reg value
('r0-r5', [0x3F], 1000), # T14.0.2b > T14.1.1 success: reg range (min, max)
('r12-r2', [0x1FFC], 1000), # T14.0.2b > T14.1.1 : (max, min)
('lr-pc', [0xC000], 1000), # T14.0.2b > T14.1.1 : (symbolic)
('sp-r12', [0x3000], 1000), # T14.0.2b > T14.1.1 : (symbolic & numeric, two bits)
('sp-r13', [0x2000], 1000), # T14.0.2b > T14.1.1 : (symbolic & numeric, one bit)
('r4-', [0x10], -1403), # T14.0.2b > T14.1.0 error: missing second reg in range list
('r8-1', [0x100], -1302), # T14.0.2a > T14.1.1 + override wrong second reg
('r9-r16', [0x200], -1304) # T14.0.2a > T14.1.1 + override too high second reg number
]
rlt_test = [('', [], -1501), # T15.0.0 error: missing register list
(' ', [], -1501), # T15.0.1 > T15.0.0 : idem with leading space
('1', [], -1502), # T15.0.3 error: missing '{'
('{', [], -1503), # T15.0.2 > T15.1.0 error: missing registers
('{1', [], -1302), # T15.0.2 > T15.1.1a + override : unknown register identifier
('{r', [], -1303), # T15.0.2 > T15.1.1a + override : missing register number
('{ra', [], -1304), # T15.0.2 > T15.1.1a + override : wrong reg number
('{r1a', [], -1304), # T15.0.2 > T15.1.1a + override : wrong reg number
('{r-1', [], -1303), # T15.0.2 > T15.1.1a + override : negative reg number
('{r16', [], -1304), # T15.0.2 > T15.1.1a + override : too high reg number
('{r0', [], -1503), # T15.0.2 > T15.1.1a error: unclosed single register
('{r0}', [0x1], 1000), # T15.0.2 > T15.1.1c success: single register
('{r0-r5}', [0x3F], 1000), # T15.0.2 > T15.1.1c success: single range
('{r0-r5 }', [0x3F], 1000), # : idem with trailing space
('{r12-r2, lr', [0x1FFC], -1503), # > T15.1.1b > T15.1.1a error: missing '}' after list
('{r12 - r2, lr}', [0x5FFC], 1000), # > T15.1.1b > T15.1.1c success: range + single register
('{ pc, r1 -r2, sp- r12, r5}', [0xB026], 1000), # : several ranges, with spaces
('{r4-}', [], -1403), # > T15.1.1a + override : missing second reg in range list
('{r14, r8-1', [0x4000], -1302), # > T15.1.1a + override : wrong second reg
('{r9-r16, r13}', [], -1304), # > T15.1.1a + override : too high second reg number
('{r14,r8}', [0x4100], 1000), # success: no space after ','
('{ r9 , r13 }', [0x2200], 1000), # success: extra spaces
('{r14,}', [0x4000], -1504), # > T15.1.1b > T15.1.2 error: missing register after ','
('{r14, }', [0x4000], -1504), # > T15.1.1b > T15.1.2 : missing register after ', '
('{r9-r15, sp13}', [0xFE00], -1402) # > T15.1.1b + override : unrecognized register id
]
imo_test = [('', [], -1601), # T16.0.0 error: missing immediate value
(' ', [], -1601), # T16.0.1 > T16.0.0 idem with leading space
('2', [], -1602), # T16.0.3 error: missing '#'
('#', [], -1603), # T16.0.2 > T16.1.0 error: missing value after '#'
('# ', [], -1604), # T16.0.2 > T16.1.1 error: unexpected space after '#'
('#f', [], -1605), # T16.0.2 > T16.1.4 error: unrecognizable info after '#'
('#20', [20], 1000), # T16.0.2 > T16.1.2 > T16.2.0 success: simple byte value
('#\'f\'', [102], 1000), # T16.0.2 > T16.1.3 > T16.2.0 success: simple char value
('#-20', [], -1606), # T16.0.2 > T16.1.2 + override : impossible fixup for negative number
('#2000', [0xE7D], 1000), # T16.0.2 > T16.1.2 > T16.2.0 success fixup: in-the-middle bits
('#0xC0000034', [0x1D3], 1000), # T16.0.2 > T16.1.2 > T16.2.0 success fixup: split bits
('#0xFF000000', [0x4FF], 1000), # T16.0.2 > T16.1.2 > T16.2.0 success fixup: maximum rotation
('#0xFF0000FF', [], -1606), # T16.0.2 > T16.1.2 + override : impossible fixup for 16 bits
('#0x102', [], -1606), # T16.0.2 > T16.1.2 + override : impossible fixup for odd rotations
('#0x104', [0xF41], 1000), # T16.0.2 > T16.1.2 > T16.2.0 success fixup: odd immediate mask
('#0x108', [0xF42], 1000), # T16.0.2 > T16.1.2 > T16.2.0 : even immediate mask
('#45r', [], -1004), # T16.0.2 > T16.1.2 + override : unexpected decimal digit
('#\'e\' c', [101], -1607), # T16.0.2 > T16.1.3 > T16.2.1 error: unexpected text after imm val.
('#0b111111100000000000', [0xBFE], 1000), # T16.0.2 > T16.1.2 > T16.2.0 success fixup: binary
('#0b1002000', [], -1002), # T16.0.2 > T16.1.2 + override : invalid binary digit
('#012000000005', [0x255], 1000), # T16.0.2 > T16.1.2 > T16.2.0 success fixup: octal
('#012000900005', [], -1003), # T16.0.2 > T16.1.2 + override : invalid octal digit
('#45d', [], -1004), # T16.0.2 > T16.1.2 + override : invalid decimal digit
('#0x4X5', [], -1005), # T16.0.2 > T16.1.2 + override : invalid hexa digit
('#0x400000000', [], -1006), # T16.0.2 > T16.1.2 + override : too long value (>2^32)
('#0x08000002', [0x382], 1000), # T16.0.2 > T16.1.2 > T16.2.0 success fixup: MSB = 1 at IM
('#\'', [], -1605), # T16.0.2 > T16.1.4 error: unclosed char
('#\' ', [], -1104), # T16.0.2 > T16.1.3 + override : unclosed char
('#\'\'', [], -1102), # T16.0.2 > T16.1.3 + override : empty char
('#\' 0\'', [], -1105), # T16.0.2 > T16.1.3 + override : more than one character
('#\'\t\'', [], -1103), # T16.0.2 > T16.1.3 + override : illegal character ''
('#\"t\"', [], -1605), # T16.0.2 > T16.1.4 error: illegal character '"'
(' #\'a\'', [97], 1000) # T16.0.1 > T16.0.2 > T16.1.3 > T16.2.0 success with leading space
]
ims_test = [('', [], -1701), # T17.0.0 error: missing immediate value
(' ', [], -1701), # T17.0.1 > T17.0.0 idem with leading space
('2', [], -1702), # T17.0.3 error: missing '#'
('#', [], -1703), # T17.0.2 > T17.1.0 error: missing value after '#'
('# ', [], -1704), # T17.0.2 > T17.1.1 error: unexpected space after '#'
('#f', [], -1705), # T17.0.2 > T17.1.3 error: unrecognizable info after '#'
('#2', [2], 1000), # T17.0.2 > T17.1.2 > T17.2.0 success: valid number of shifts
('#-20', [], -1706), # T17.0.2 > T17.1.2 + override : negative number of shifts
('#040', [], -1706), # T17.0.2 > T17.1.2 + override : too high number of shifts
('#0x1C', [28], 1000), # T17.0.2 > T17.1.2 > T17.2.0 success: hexa number
('#0b10101', [21], 1000), # T17.0.2 > T17.1.2 > T17.2.0 success: binary number
('#0b10020', [], -1002), # T17.0.2 > T17.1.2 + override : invalid binary digit
('#019', [], -1003), # T17.0.2 > T17.1.2 + override : invalid octal digit
('#4d', [], -1004), # T17.0.2 > T17.1.2 + override : invalid decimal digit
('#0xX', [], -1005), # T17.0.2 > T17.1.2 + override : invalid hexa digit
(' #0x1F', [31], 1000) # T17.0.1 > T17.0.2 > T17.1.2 > T17.2.0 success with leading space
]
op2_test = [('', [], -2201), # T22.0.0 error: missing second operand
(' ', [], -2203), # T22.0.3 idem with leading space
('2', [], -2203), # T22.0.3 error: missing '#'
('#', [], -1603), # T22.0.1 + override : missing value after '#'
('# ', [], -1604), # T22.0.1 + override : unexpected space after '#'
('#f', [], -1605), # T22.0.1 + override : unrecognizable info after '#'
('#20', [0x02000014], 1000), # T22.0.1 success: simple byte value
('#\'f\'', [0x02000066], 1000), # T22.0.1 success: simple char value
('#-20', [], -1606), # T22.0.1 + override : impossible fixup for negative number
('#0xC0000034', [0x020001D3], 1000), # T22.0.1 success fixup: split bits
('#0x102', [], -1606), # T22.0.1 + override : impossible fixup for odd rotations
('#\'e\' c', [], -1607), # T22.0.1 + override : unexpected text after imm val.
('#0b1002000', [], -1002), # T22.0.1 + override : invalid binary digit
('#012000900005', [], -1003), # T22.0.1 + override : invalid octal digit
('#45d', [], -1004), # T22.0.1 + override : invalid decimal digit
('#0x4X5', [], -1005), # T22.0.1 + override : invalid hexa digit
('#0x400000000', [], -1006), # T22.0.1 + override : too long value (2^32)
('#\'', [], -1605), # T22.0.1 + override : unclosed char
('#\' ', [], -1104), # T22.0.1 + override : unclosed char
('#\'\'', [], -1102), # T22.0.1 + override : empty char
('#\' 0\'', [], -1105), # T22.0.1 + override : more than one character
('#\'\t\'', [], -1103), # T22.0.1 + override : illegal character ''
('#\"t\"', [], -1605), # T22.0.1 + override : illegal character '"'
(' #\'a\'', [0x02000061], 1000), # T22.0.1 success with leading space
('r', [], -1303), # T22.0.2a + override : missing register number
('ra', [], -1304), # T22.0.2a + override : wrong reg number
('r1a', [], -1304), # T22.0.2a + override : wrong reg number
('r-1', [], -1304), # T22.0.2a + override : negative reg number
('r16', [], -1304), # T22.0.2a + override : too high reg number
('r12', [12], 1000), # T22.0.2a success: single reg
('r0 ', [0], 1000), # T22.0.2a success: single reg with trailing space
(' sp', [13], 1000), # T22.0.2a success: single reg with leading space
('r1,', [1], -2204), # T22.0.2b > T22.1.0 error: missing shift mode
('r2, ', [2], -2204), # T22.0.2b > T22.1.1 > T22.1.0 : idem with trailing space
('r3, lslx', [3], -2206), # T22.0.2b > T22.1.1 > T22.1.2c : missing space after shift mode
('r3, r0', [3], -2206), # T22.0.2b > T22.1.1 > T22.1.2c : missing space after shift mode
('r3, #0', [3], -2206), # T22.0.2b > T22.1.1 > T22.1.2c : missing space after shift mode
('r4, xl', [4], -2206), # T22.0.2b > T22.1.1 > T22.1.3 : unrecognized shift mode
('r5, lsl', [5], -2205), # T22.0.2b > T22.1.1 > T22.1.2a : missing space after shift mode
('r6, lsr ', [6], -2205), # > T22.1.2b > T22.2.0 : missing info after shift mode
('r7, asr x', [7], -2207), # > T22.1.2b > T22.2.3 : wrong info after shift mode
('r8, ror r', [8], -1303), # > T22.1.2b > T22.2.1 + override: missing register number
('r9, lsl ra', [9], -1304), # > T22.1.2b > T22.2.1 + override: wrong reg number
('r10, lsr r1a', [10], -1304), # > T22.1.2b > T22.2.1 + override: wrong reg number
('r11, asr r-1', [11], -1304), # > T22.1.2b > T22.2.1 + override: negative reg number
('r12, ror r16', [12], -1304), # > T22.1.2b > T22.2.1 + override: too high reg number
('r13, lsl r12', [0xC1D], 1000), # > T22.1.2b > T22.2.1 success: LSL reg
('sp, lsr r0 ', [0x3D], 1000), # > T22.1.2b > T22.2.1 : LSR reg with trailing space
('r1,asr lr', [0xE51], 1000), # > T22.1.2b > T22.2.1 : ASR reg no space after ','
('r8, ror #', [8], -1703), # > T22.1.2b > T22.2.2 + override: missing value after '#'
('r9, lsl # ', [9], -1704), # > T22.1.2b > T22.2.2 + override: unexpected space after '#'
('r10, lsr #f', [10], -1705), # > T22.1.2b > T22.2.2 + override: unrecognizable info after '#'
('r11, asr #2', [0x14B], 1000), # > T22.1.2b > T22.2.2 success: valid number of shifts
('r12, ror #-20', [12], -1706), # > T22.1.2b > T22.2.2 + override: negative number of shifts
('r13, lsl #040', [13], -1706), # > T22.1.2b > T22.2.2 + override: too high number of shifts
('pc, lsr #0x1C ', [0xE2F], 1000), # > T22.1.2b > T22.2.2 success LSR imm with trailing space
('r1,asr #0b10101', [0xAC1], 1000), # > T22.1.2b > T22.2.2 : ASR bin imm, no space after ','
('r8, ror #0b10020', [8], -1002), # > T22.1.2b > T22.2.2 + override: invalid binary digit
('r9, lsl #019', [9], -1003), # > T22.1.2b > T22.2.2 + override: invalid octal digit
('r10, lsr #4d', [10], -1004), # > T22.1.2b > T22.2.2 + override: invalid decimal digit
('r11, asr #0xX', [11], -1005), # > T22.1.2b > T22.2.2 + override: invalid hexa digit
(' r12 , ror #0x1F ', [0xFEC], 1000), # > T22.1.2b > T22.2.2 success with lead/trail spaces
('r13, lsl r12 a', [13], -1304), # > T22.1.2b > T22.2.1 + override: unexpected text after parse
('r12, ror #0x1F b', [12], -1005) # > T22.1.2b > T22.2.2 + override: idem for immediate parsing
]
opd_test = [('', [], -2301), # T23.0.0 error: missing operands
(' ', [], -2303), # T23.0.2 error: idem with leading space
('2', [], -1302), # T23.0.1a + override : unrecognizable register
('2,', [], -1302), # T23.0.1b + override : unrecognizable operand with ','
('r', [], -1303), # T23.0.1a + override : missing register number
('ra', [], -1304), # T23.0.1a + override : wrong reg number
('r16', [], -1304), # T23.0.1a + override : too high reg number
('r12', [], -2302), # T23.0.1a error: good dest reg, missing other ops
('r0 ', [], -2302), # T23.0.1a error: missing ',' after dest reg
('r1,', [0x1000], -2304), # T23.0.1b > T23.1.0 error: missing source operands
('r2, ', [0x2000], -2306), # T23.0.1b > T23.1.3 error: missing source operands
('r3, 3', [0x3000], -2306), # T23.0.1b > T23.1.3 error: wrong source op 1
('r4, ra', [0x4000], -1304), # T23.0.1b > T23.1.1 > T23.1.2a + override : wrong reg number
('r5, r1a', [0x5000], -1304), # T23.0.1b > T23.1.1 > T23.1.2a + override : wrong reg number
('r6, r-1', [0x6000], -1304), # T23.0.1b > T23.1.1 > T23.1.2a + override : negative reg number
('r7, r16', [0x7000], -1304), # T23.0.1b > T23.1.1 > T23.1.2a + override : too high reg number
('r8, r12', [0x8800C], 1000), # T23.0.1b > T23.1.1 success: two registers
('r9,r1 ', [0x99001], 1000), # T23.0.1b > T23.1.1 success: idem with no space after ','
(' sp , lr ', [0xDD00E], 1000), # T23.0.1b > T23.1.1 success: idem with extra spaces
('r10, r1,', [0x0A000], -2204), # T23.0.1b > T23.1.1 + override : missing shift register
('r11, r2, ', [0x0B000], -2204), # T23.0.1b > T23.1.1 + override : idem with space
('r12, r3, 3', [0x3C000], -2308), # T23.0.1b > T23.1.2b > T23.2.2 error: wrong op 2
('r13, r4, ra', [0x4D000], -1304), # T23.0.1b > T23.1.2b > T23.2.1 + override : wrong reg number
('r14, r5, r1a', [0x5E000], -1304), # T23.0.1b > T23.1.2b > T23.2.1 + override : wrong reg number
('r15, r6, r-1', [0x6F000], -1304), # T23.0.1b > T23.1.2b > T23.2.1 + override : negative reg number
('r0, r7, r16', [0x70000], -1304), # T23.0.1b > T23.1.2b > T23.2.1 + override : too high reg number
('r1, r8, r12', [0x8100C], 1000), # T23.0.1b > T23.1.2b > T23.2.1 success: three registers
('r2,r9,r1 ', [0x92001], 1000), # T23.0.1b > T23.1.2a : idem with no space after ','
('r3, #', [0x03000], -1603), # T23.0.1b > T23.1.1 + override : missing value after '#'
('r4, # ', [0x04000], -1604), # T23.0.1b > T23.1.1 + override : unexpected space after '#'
('r5, #f', [0x05000], -1605), # T23.0.1b > T23.1.1 + override : unrecognizable info after '#'
('r6, #20', [0x02066014], 1000), # T23.0.1b > T23.1.1 success: dest reg + immediate value
('r7, #\'f\'', [0x02077066], 1000), # T23.0.1b > T23.1.1 success: dest reg + immediate char
('r8, #-20', [0x08000], -1606), # T23.0.1b > T23.1.1 + override : impossible fixup for negative num.
('r9,#0xC0000034', [0x020991D3], 1000), # T23.0.1b > T23.1.1 success fixup: split bits
('r10, #0x102', [0x0A000], -1606), # T23.0.1b > T23.1.1 + override : impossible fixup for odd rotations
('r11, #\'e\' c', [0xB000], -1607), # T23.0.1b > T23.1.1 + override : unexpected text after imm val.
('r12, #0b1002000', [0x0C000], -1002), # T23.0.1b > T23.1.1 + override : invalid binary digit
('r13, #012000900005', [0x0D000], -1003), # > T23.1.1 + override : invalid octal digit
('r14, #45d', [0x0E000], -1004), # T23.0.1b > T23.1.1 + override : invalid decimal digit
('r15, #0x4X5', [0x0F000], -1005), # T23.0.1b > T23.1.1 + override : invalid hexa digit
('r0, #\'', [0x0], -1605), # T23.0.1b > T23.1.1 + override : unclosed char
('r1, #\' ', [0x01000], -1104), # T23.0.1b > T23.1.1 + override : unclosed char
('r2, #\'\'', [0x02000], -1102), # T23.0.1b > T23.1.1 + override : empty char
('r3, #\' 0\'', [0x03000], -1105), # T23.0.1b > T23.1.1 + override : more than one character
('r4, #\'\t\'', [0x04000], -1103), # T23.0.1b > T23.1.1 + override : illegal character ''
('r5, lslx', [0x05000], -2306), # T23.0.1b > T23.1.3 error: unrecognized source operand
('r5, r10, lslx', [0xA5000], -2308), # T23.0.1b > T23.1.2b > T23.2.2 error: wrong second operand
('r5, r10, r1', [0xA5001], 1000), # T23.0.1b > T23.1.2b > T23.2.1 success: three registers
('r5, r10, #2', [0x20A5002], 1000), # T23.0.1b > T23.1.2b > T23.2.1 success: two regs, one immediate
('r6, r1, xl', [0x16000], -2308), # T23.0.1b > T23.1.2b > T23.2.2 error: wrong second operand
('r7, r2, lsl', [0x07000], -2205), # T23.0.1b > T23.1.1 + override : missing space after shift mode
('r8, r3, lsr ', [0x08000], -2205), # T23.0.1b > T23.1.1 + override : missing info after shift mode
('r9, r4, asr x', [0x09000], -2207), # T23.0.1b > T23.1.1 + override : wrong info after shift mode
('r10, r5, ror r', [0x0A000], -1303), # T23.0.1b > T23.1.1 + override : missing register number
('r11, r6, lsl ra', [0x0B000], -1304), # T23.0.1b > T23.1.1 + override : wrong reg number
('r12, r7, ror r16', [0x0C000], -1304), # T23.0.1b > T23.1.1 + override : too high reg number
('r13, r8, lsl r12', [0xDDC18], 1000), # T23.0.1b > T23.1.1 success: LSL reg
('r14, sp, lsr r0 ', [0xEE03D], 1000), # T23.0.1b > T23.1.1 : LSR reg with trailing space
('r15, r1,asr lr', [0xFFE51], 1000), # T23.0.1b > T23.1.1 : ASR reg no space after ','
('r0, r8, ror #', [0], -1703), # T23.0.1b > T23.1.1 + override : missing value after '#'
('r1, r9, lsl # ', [0x01000], -1704), # T23.0.1b > T23.1.1 + override : unexpected space after '#'
('r2, r10, lsr #f', [0x02000], -1705), # T23.0.1b > T23.1.1 + override : unrecognizable info after '#'
('r3, r11, asr #2', [0x3314B], 1000), # T23.0.1b > T23.1.1 success: valid number of shifts
('r4, r12, ror #-20', [0x04000], -1706), # > T23.1.1 + override : negative number of shifts
('r5, r13, lsl #040', [0x05000], -1706), # > T23.1.1 + override : too high number of shifts
('r5, r13, lsl #00', [0x05500D], 1000), # > T23.1.1 success: true LSL #0
('r6, pc, lsr #0x1C ', [0x66E2F], 1000), # > T23.1.1 success LSR imm with trailing space
('r6, pc, lsr #0x0 ', [0x6600F], 1000), # > T23.1.1 converting LSR #0 into LSL #0
('r7,r1,asr #0b10101', [0x77AC1], 1000), # > T23.1.1 : ASR bin imm, no space after ','
('r7,r1,asr #0b0', [0x77001], 1000), # > T23.1.1 converting ASR #0 into LSL #0
('r8, r13, lsl r12 a', [0x08000], -1304), # > T23.1.1 + override : unexpected text after parse
('r9, r12, ror #0x1F b', [0x09000], -1005), # > T23.1.1 + override : idem for immediate parsing
('r9, r12, ror #0x1F', [0x99FEC], 1000), # > T23.1.1 success ROR with 31 shifts
('r9, r12, ror #0x0', [0x9906C], 1000), # > T23.1.1 coding ROR #0 as RRX
('r13, r7, r8, lsl r12 ', [0x7DC18], 1000), # > T23.1.2 > T23.2.1 success: three regs, last shift reg
('r14 , r8 , sp , lsr r10', [0x8EA3D], 1000), # > T23.1.2 > T23.2.1 : idem with trailing spaces
('r15,r9,r1,asr lr', [0x9FE51], 1000), # > T23.1.2 > T23.2.1 : idem with space after ','
('r13, r7, r8, lsl #12 ', [0x7D608], 1000), # > T23.1.2 > T23.2.1 success: three regs, last shift imm
('r14 , r8 , sp , lsr #10', [0x8E52D], 1000), # > T23.1.2 > T23.2.1 : idem with trailing spaces
('r15,r9,r1,asr #31', [0x9FFC1], 1000), # > T23.1.2 > T23.2.1 : idem with space after ','
('r15,r9,r1,asr r32', [0x9F000], -1304), # > T23.1.2 > T23.2.1 + override : wrong range reg number
('r15,r9,r1,asr #32', [0x9F000], -1706), # > T23.1.2 > T23.2.1 + override : invalid number of shifts
('r15,r9,r1,asr r', [0x9F000], -1303), # > T23.1.2 > T23.2.1 + override : missing reg number
('r15,r9,r1,asr ', [0x9F000], -2205) # > T23.1.2 > T23.2.1 + override : missing info after shift
]
idt_test = [('', [], -3101), # T31.0.0 error: missing data instruction
(' ', [], -3101), # T31.0.1 > T31.0.0 error: idem with leading space
('2', [], -3103), # T31.0.3 error: unrecognizable instruction
('and', [], -3102), # T31.0.2a error: missing operands after instr.
('eor ', [4, 0xE0200000], -3102), # T31.0.2b > T31.3.0 error: missing operands after instr.
('sub 2,', [4, 0xE0400000], -1302), # T31.0.2b > T31.3.1 + override : unrecognizable operand with ','
('rsb r', [4, 0xE0600000], -1303), # T31.0.2b > T31.3.1 + override : missing register number
('add r16', [4, 0xE0800000], -1304), # T31.0.2b > T31.3.1 + override : too high reg number
('adc r12', [4, 0xE0A00000], -2302), # T31.0.2b > T31.3.1 + override : good dest reg, missing other ops
('sbc ', [4, 0xE0C00000], -2303), # T31.0.2b > T31.3.1 + override : missing dest reg
('rsc r1,', [4, 0xE0E00000], -2304), # T31.0.2b > T31.3.1 + override : missing source operands
('orr r2, ', [4, 0xE1800000], -2306), # T31.0.2b > T31.3.1 + override : missing source operands
('bic r3, 3', [4, 0xE1C00000], -2306), # T31.0.2b > T31.3.1 + override : wrong source op 1
('and r12, r3, 3', [4, 0xE0000000], -2308), # > T31.3.1 + override : wrong op 2
('eor r3, #', [4, 0xE0200000], -1603), # > T31.3.1 + override : missing value after '#'
('sub r4, # ', [4, 0xE0400000], -1604), # > T31.3.1 + override : unexpected space after '#'
('rsb r5, #f', [4, 0xE0600000], -1605), # > T31.3.1 + override : unrecognizable info after '#'
('add r10, #0x102', [4, 0xE0800000], -1606), # > T31.3.1 + override : impossible fixup for odd rotations
('adc r11, #\'e\' c', [4, 0xE0A00000], -1607), # > T31.3.1 + override : unexpected text after imm val.
('sbc r10, r1,', [4, 0xE0C00000], -2204), # > T31.3.1 + override : missing shift register
('rsc r7, r2, lsl', [4, 0xE0E00000], -2205), # > T31.3.1 + override : missing space after shift mode
('orr r9, r4, asr x', [4, 0xE1800000], -2207), # > T31.3.1 + override : wrong info after shift mode
('bic r0, r8, ror #', [4, 0xE1C00000], -1703), # > T31.3.1 + override : missing value after '#'
('and r1, r9, lsl # ', [4, 0xE0000000], -1704), # > T31.3.1 + override : unexpected space after '#'
('eor r2, r10, lsr #f', [4, 0xE0200000], -1705), # > T31.3.1 + override : unrecognizable info after '#'
('sub r4, r12, ror #-20', [4, 0xE0400000], -1706), # > T31.3.1 + override : negative number of shifts
('rsb r12, #0b1002000', [4, 0xE0600000], -1002), # > T31.3.1 + override : invalid binary digit
('add r13, #012000900005', [4, 0xE0800000], -1003), # > T31.3.1 + override : invalid octal digit
('adc r14, #45d', [4, 0xE0A00000], -1004), # > T31.3.1 + override : invalid decimal digit
('sbc r15, #0x4X5', [4, 0xE0C00000], -1005), # > T31.3.1 + override : invalid hexa digit
('rsc r2, #\'\'', [4, 0xE0E00000], -1102), # > T31.3.1 + override : empty char
('orr r4, #\'\t\'', [4, 0xE1800000], -1103), # > T31.3.1 + override : illegal character ''
('bic r1, #\' ', [4, 0xE1C00000], -1104), # > T31.3.1 + override : unclosed char
('and r3, #\' 0\'', [4, 0xE0000000], -1105), # > T31.3.1 + override : more than one character
('eors', [4, 0xE0200000], -3102), # T31.0.2c > T31.1.2a error: data operands
('eoral', [4, 0xE0200000], -3102), # T31.0.2c > T31.1.1a error: data operands
('tsts', [4, 0xE1100000], -3102), # T31.0.2c > T31.1.2a : missing operands
('tsts ', [4, 0xE1100000], -3102), # T31.0.2c > T31.1.2b > T31.3.0 : missing operands
('teqst', [4, 0xE1300000], -3105), # T31.0.2c > T31.1.2c error: wrong text after instruction
('cmpxx', [4, 0xE1500000], -3104), # T31.0.2c > T31.1.3 error: unknown instruction condition
('cmneq', [4, 0xE1700000], -3102), # T31.0.2c > T31.1.1a error: missing ops after pred.inst.
('movne ', [4, 0x11A00000], -3102), # T31.0.2c > T31.1.1b > T31.3.0 : idem after space
('mvncss', [4, 0x21E00000], -3102), # T31.0.2c > T31.1.1c > T31.2.1a : idem after set flag
('mvncsx', [4, 0x21E00000], -3105), # T31.0.2c > T31.1.1c > T31.2.2 : wrong text after pred.inst
('mvncssx', [4, 0x21E00000], -3105), # T31.0.2c > T31.1.1c > T31.2.1c : wrong text after pred.inst + flag
('andhss', [4, 0x20000000], -3102), # T31.0.2c > T31.1.1c > T31.2.1a : missing operands after set flag
('andhss ', [4, 0x20100000], -3102), # T31.0.2c > T31.1.1c > T31.2.1b > T31.3.0 : after set flag + space
('eorccx', [4, 0x30200000], -3105), # T31.0.2c > T31.1.1c > T31.2.2 : wrong text after pred.inst
('sublosx', [4, 0x30400000], -3105), # T31.0.2c > T31.1.1c > T31.2.1c : wrong text after pred.inst + flag
('cmp', [], -3102), # T31.0.2a error: missing operands after instr.
('cmn ', [4, 0xE1700000], -3102), # T31.0.2b > T31.3.0 error: missing operands after instr.
('mov 2,', [4, 0xE1A00000], -1302), # T31.0.2b > T31.3.1 + override : unrecognizable operand with ','
('mvn r', [4, 0xE1E00000], -1303), # T31.0.2b > T31.3.1 + override : missing register number
('tst r16', [4, 0xE1100000], -1304), # T31.0.2b > T31.3.1 + override : too high reg number
('teq r12', [4, 0xE1300000], -2302), # T31.0.2b > T31.3.1 + override : good dest reg, missing other ops
('cmp ', [4, 0xE1500000], -2303), # T31.0.2b > T31.3.1 + override : missing source 1 reg
('cmn r1,', [4, 0xE1700000], -2304), # T31.0.2b > T31.3.1 + override : missing source operands
('mov r2, ', [4, 0xE1A00000], -2306), # T31.0.2b > T31.3.1 + override : missing source operands
('mvn r3, 3', [4, 0xE1E00000], -2306), # T31.0.2b > T31.3.1 + override : wrong source op 1
('tst r3, #', [4, 0xE1100000], -1603), # > T31.3.1 + override : missing value after '#'
('teq r4, # ', [4, 0xE1300000], -1604), # > T31.3.1 + override : unexpected space after '#'
('cmp r5, #f', [4, 0xE1500000], -1605), # > T31.3.1 + override : unrecognizable info after '#'
('mov r10, #0x102', [4, 0xE1A00000], -1606), # > T31.3.1 + override : impossible fixup for odd rotations
('mvn r11, #\'e\' c', [4, 0xE1E00000], -1607), # > T31.3.1 + override : unexpected text after imm val.
('tst r7, r2, lsl', [4, 0xE1100000], -2205), # > T31.3.1 + override : missing space after shift mode
('teq r9, r4, asr x', [4, 0xE1300000], -2207), # > T31.3.1 + override : wrong info after shift mode
('cmp r0, r8, ror #', [4, 0xE1500000], -1703), # > T31.3.1 + override : missing value after '#'
('cmn r1, r9, lsl # ', [4, 0xE1700000], -1704), # > T31.3.1 + override : unexpected space after '#'
('mov r2, r10, lsr #f', [4, 0xE1A00000], -1705), # > T31.3.1 + override : unrecognizable info after '#'
('mvn r4, r12, ror #-20', [4, 0xE1E00000], -1706), # > T31.3.1 + override : negative number of shifts
('tst r12, #0b1002000', [4, 0xE1100000], -1002), # > T31.3.1 + override : invalid binary digit
('teq r13, #012000900005', [4, 0xE1300000], -1003), # > T31.3.1 + override : invalid octal digit
('cmp r14, #45d', [4, 0xE1500000], -1004), # > T31.3.1 + override : invalid decimal digit
('cmn r15, #0x4X5', [4, 0xE1700000], -1005), # > T31.3.1 + override : invalid hexa digit
('mov r2, #\'\'', [4, 0xE1A00000], -1102), # > T31.3.1 + override : empty char
('mvn r4, #\'\t\'', [4, 0xE1E00000], -1103), # > T31.3.1 + override : illegal character ''
('tst r1, #\' ', [4, 0xE1100000], -1104), # > T31.3.1 + override : unclosed char
('teq r3, #\' 0\'', [4, 0xE1300000], -1105), # > T31.3.1 + override : more than one character
('eorsx', [4, 0xE0200000], -3105), # T31.0.2c > T31.1.2c error: wrong text after 's'
('eorx', [4, 0xE0200000], -3104), # T31.0.2c > T31.1.3 error: wrong text after inst.
('rsb r5, r10, #2', [4, 0xE26A5002], 1000), # T31.0.2b > T31.3.1 success: two regs, one immediate
('add r13, r8, lsl r12', [4, 0xE08DDC18], 1000), # T31.0.2b > T31.3.1 : LSL reg
('adc r14, sp, lsr r0 ', [4, 0xE0AEE03D], 1000), # T31.0.2b > T31.3.1 : LSR reg with trailing space
('sbc r15, r1,asr lr', [4, 0xE0CFFE51], 1000), # T31.0.2b > T31.3.1 : ASR reg no space after ','
('rsc r6, pc, lsr #0x1C ', [4, 0xE0E66E2F], 1000), # T31.0.2b > T31.3.1 : LSR imm with trailing space
('rsc r6, pc, lsr #0x0 ', [4, 0xE0E6600F], 1000), # : LSR #0 -> LSL #0
('orrs r7,r1,asr #0b10101', [4, 0xE1977AC1], 1000), # > T31.1.2b > T31.3.1:ASR bin imm, no space after ','
('orrs r7,r1,asr #0b0', [4, 0xE1977001], 1000), # : ASR #0 -> LSL #0
('bicmi r13, r7, r8, lsl r12 ', [4, 0x41C7DC18], 1000), # > T31.1.1b > T31.3.1 : three regs, shift reg
('andpls r14 , r8 , sp , lsr r10', [4, 0x5018EA3D], 1000), # > T31.1.1c > T31.2.1b > T31.3.1 : cond. + 's'
('eorvss r15,r9,#\'f\'', [4, 0x6239F066], 1000), # > T31.1.1c > T31.2.1b > T31.3.1 : cond.+'s'+ imm.
('subvc r9,#0xC0000034', [4, 0x724991D3], 1000), # T31.0.2c > T31.1.1b > T31.3.1 : one reg + one imm.
('rsbhis r8 , sp , lsr #10', [4, 0x8078852D], 1000), # > T31.1.1c > T31.2.1b > T31.3.1: reg + shifted reg
('addls r9,r1,asr r15', [4, 0x90899F51], 1000), # > T31.1.1b > T31.3.1 : idem with no 's'
('tst r7,r1, #0b10101', [4, 0xE1100000], -2310), # T31.0.2b > T31.3.1 + override : 3 ops with 'tst'
('teq r13,r7,r8,lsl r12', [4, 0xE1300000], -2310), # T31.0.2b > T31.3.1 + override : 3 ops with 'teq'
('cmppl r14,r8,sp,lsr r10', [4, 0x51500000], -2310), # T31.0.2b > T31.3.1 + override : 3 ops with 'cmp'
('cmnvss r15,r9,#\'f\'', [4, 0x61700000], -2310), # T31.0.2b > T31.3.1 + override : 3 ops with 'cmn'
('movvc r1,r9, #0xC000', [4, 0x71A00000], -2311), # T31.0.2b > T31.3.1 + override : 3 ops with 'mov'
('mvnhis r8, lr, sp, lsr pc', [4, 0x81F00000], -2311), # > T31.3.1 + override : 3 os with 'mvn'
('tst r7, #0b10101', [4, 0xE3170015], 1000), # T31.0.2b > T31.3.1 : 'tst' + reg + imm
('teqlss r7,r8,lsl r12', [4, 0x91370C18], 1000), # > T31.1.1c > T31.2.1b > T31.3.1: 'teq'+reg+shifted reg
('cmpge r14, r8', [4, 0xA15E0008], 1000), # > T31.1.1c > T31.3.1 : 'cmp' + reg + reg
('cmnlt r15, #\'f\'', [4, 0xB37F0066], 1000), # > T31.1.1c > T31.3.1 : 'cmn' + reg + char
('movgts r1, #0xC000', [4, 0xC3B01903], 1000), # > T31.1.1c > T31.2.1b > T31.3.1: 'mov' + reg + imm
('mvnle lr, sp, lsr #15', [4, 0xD1E0E7AD], 1000), # > T31.1.1c > T31.3.1 : 'mvn'+reg+shifted reg
('mov r2, #-1', [4, 0xE3E02000], 1000), # T31.0.2b > T31.3.1 : 'mov' + reg + NOT imm
('mvn r3, #0xFFF00FFF', [4, 0xE3A03AFF], 1000), # T31.0.2b > T31.3.1 : 'mvn' + reg + NOT imm
('and r4, #-200', [4, 0xE3C440C7], 1000), # T31.0.2b > T31.3.1 : 'and' + reg + NOT imm
('bic r5, #0xFFC03FFF', [4, 0xE20559FF], 1000), # T31.0.2b > T31.3.1 : 'bic' + reg + NOT imm
('add r6, #-300', [4, 0xE2466F4B], 1000), # T31.0.2b > T31.3.1 : 'add' + reg + NOT imm
('sub r7, #0xFF100000', [4, 0xE287760F], 1000), # T31.0.2b > T31.3.1 : 'mvn' + reg + NOT imm
('cmp r8, #-1000', [4, 0xE3780FFA], 1000), # T31.0.2b > T31.3.1 : 'cmp' + reg + NOT imm
('cmn r9, #0xFFC04000', [4, 0xE35909FF], 1000) # T31.0.2b > T31.3.1 : 'cmn' + reg + NOT imm
]
iml_test = [('', [], -3201), # T32.0.0 error: missing multiplication instr.
(' ', [], -3201), # T32.0.1 > T32.0.0 error: idem with leading space
('2', [], -3203), # T32.0.3 error: unrecognizable instruction
('mul', [], -3202), # T32.0.2a error: missing operands after instr.
('mla ', [4, 0xE0200090], -3202), # T32.0.2b > T32.3.0 error: missing operands after instr.
('umull 2,', [4, 0xE0800090], -1302), # T32.0.2b > T32.3.1b + override : unrecognizable operand with ','
('smull r', [4, 0xE0C00090], -1303), # T32.0.2b > T32.3.1b + override : missing register number
('umlal r16', [4, 0xE0A00090], -1304), # T32.0.2b > T32.3.1b + override : too high reg number
('smlal r12', [4, 0xE0E00090], -3202), # T32.0.2b > T32.3.1a error: good dest reg, missing other ops
('mul ', [4, 0xE0000090], -1301), # T32.0.2b > T32.3.1a + override : missing reg1
('mla r1,', [4, 0xE0210090], -3202), # T32.0.2b > T32.3.1b > T32.4.0 : missing source operands
('umull r2, ', [4, 0xE0802090], -1301), # > T32.4.1b + override : missing reg2
('smull r3, gu', [4, 0xE0C03090], -1302), # > T32.4.1b + override : wrong op 2
('umlal r12, r3, e3', [4, 0xE0A3C090], -1302), # > T32.5.1b + override : wrong op 3
('smlal r3, r4, r5, ', [4, 0xE0E43095], -1301), # > T32.6.1 + override : missing reg4
('mul r3, r4, r5, r6', [4, 0xE0030594], -3207), # > T32.6.1 + override : four regs with 'mul'
('mla r3, r4, r5', [4, 0xE0230594], -3202), # > T32.6.1 + override : three regs with 'mla'
('mul r3, r4, r5', [4, 0xE0030594], 1000), # > T32.5.1a success: three regs with 'mul'
('mla r3, r4, r5, r6', [4, 0xE0236594], 1000), # > T32.6.1 success: four regs with 'mla'
('umull r10, r11, r12, r13', [4, 0xE08BAD9C], 1000), # > T32.6.1 : four regs with 'umull'
('umlal r1, r11, r2, r3', [4, 0xE0AB1392], 1000), # > T32.6.1 : four regs with 'umlal'
('smull r10, r11, lr, r10', [4, 0xE0CBAA9E], 1000), # > T32.6.1 : four regs with 'smull'
('smlal sp, lr, r0, r7', [4, 0xE0EED790], 1000), # > T32.6.1 : four regs with 'smlal'
('mul pc, r0, r7', [4, 0xE0000090], -3208), # > T32.5.1a + override : use of PC as Rd
('mul r0, pc, r8', [4, 0xE0000090], -3208), # > T32.5.1a + override : use of PC as Rm
('mla r0, r7, pc', [4, 0xE0200097], -3208), # > T32.5.1a + override : use of PC as Rs
('umlal r10, pc, r6, r9', [4, 0xE0A0A090], -3208), # + override : use of PC as RdHi
('smlal pc, r9, r8, r7', [4, 0xE0E00090], -3208), # + override : use of PC as RdLo
('mul r3, r3, r5', [4, 0xE0030593], 1000), # + warning : Rd should be different from Rm
('mla r5, r5, r5, r1', [4, 0xE0251595], 1000), # + warning : Rd should be different from Rm
('mla r3, r4, r3, r4', [4, 0xE0234394], 1000), # success : should work
('mla r3, r4, r3, r3', [4, 0xE0233394], 1000), # success : should work
('umull r6, r7, r7, r6', [4, 0xE0876697], 1000), # + warning : RdHi, RdLo and Rm must all be dif
('smull r9, r10, r9,r9', [4, 0xE0CA9999], 1000), # + warning : RdHi, RdLo and Rm must all be dif
('umlal r6, r6, r7, r6', [4, 0xE0A66697], 1000), # + warning : RdHi and RdLo must be different
('smlal r8, r9, r10,r8', [4, 0xE0E9889A], 1000), # success : should work
('muleq', [4, 0xE0000090], -3202), # T32.0.2c > T32.1.1a error : cond & missing ops
('muls', [4, 0xE0000090], -3202), # T32.0.2c > T32.1.2a error : 's'' & missing ops
('mulz', [4, 0xE0000090], -3204), # T32.0.2c > T32.1.3 error : wrong text after
('muleqs', [4, 0x00000090], -3202), # > T32.1.1c > T32.2.1a error : missing ops
('muleqsz', [4, 0x00000090], -3205), # > T32.1.2b > T32.2.1c error : missing ops
('smull r3, r4', [4, 0xE0C03090], -3202), # > T32.4.1a error : missing ops
('smull r3, r4,', [4, 0xE0C43090], -3202), # > T32.5.0 error : missing ops
('smull r3, r4, r5', [4, 0xE0C43095], -3202), # > T32.5.1a error : missing ops
('smull r3, r4, r5,', [4, 0xE0C43095], -3202), # > T32.6.0 error : missing ops
('muleq r3, r4, r5', [4, 0x00030594], 1000), # T32.0.2c > T32.1.1b > success : 'mul' + cond
('mlanes r3, r4, r5, r6', [4, 0x10336594], 1000), # > T32.1.1c > T32.2.1b > : 'mla' + cond + 's'
('umulls r10, r11, r12, r13', [4, 0xE09BAD9C], 1000), # T32.0.2c > T32.1.2b > : 'umull' + 's'
('umlalle r1, r11, r2, r3', [4, 0xD0AB1392], 1000), # T32.0.2c > T32.1.1b > : 'umlal' + cond
('smulllex r10, r11, lr, r10', [4, 0xD0C00090], -3205), # T32.0.2c > T32.1.1c > T32.2.2 : error after cond
('smlalsy sp, lr, r0, r7', [4, 0xE0E00090], -3205) # T32.0.2c > T32.1.2c : error after 's'
]
ibr_test = [('', [], -3301), # T33.0.0 error: missing branch instr.
(' ', [], -3301), # T33.0.1 > T33.0.0 error: idem with leading space
('2', [], -3303), # T33.0.5 error: unrecognizable instruction
('blo', [], -3302), # T33.0.2a error: missing offset after instr.
('bleq ', [4, 0x0B000000], -3302), # T33.0.2b > T33.3.0 : missing offset after instr.
('blox', [4], -3303), # T33.0.2c error: unexpected text after instr.
('bx', [], -3304), # T33.0.3a error: missing reg after instr.
('blx ', [4, 0xE12FFF30], -3304), # T33.0.3b > T33.4.0 error: missing reg after instr.
('blxo', [4, 0xE12FFF30], -3303), # T33.0.3c > T33.2.2 error: unexpected text after instr.
('b', [], -3302), # T33.0.4a error: missing offset after instr.
('bl ', [4, 0xEB000000], -3302), # T33.0.4b > T33.3.0 error: missing offset after instr.
('bly', [4, 0xEB000000], -3303), # T33.0.4c > T33.1.2 error: unexpected text after instr.
('beq', [4, 0xEA000000], -3302), # T33.0.4c > T33.1.1a error: missing offset after instr.
('blne ', [4, 0x1B000000], -3302), # T33.0.4c > T33.1.1b > T 33.3.0 : missing offset after instr.
('blnex', [4, 0x1B000000], -3303), # T33.0.4c > T33.1.1c : unexpected text after instr.
('bxeq', [4, 0xE12FFF10], -3302), # T33.0.3c > T33.2.1a error: missing offset after instr.
('blxeq ', [4, 0x012FFF30], -3304), # T33.0.3c > T33.2.1b > T 33.4.0 : missing offset after instr.
('blxeqx', [4, 0x012FFF30], -3303), # T33.0.3c > T33.2.1c : unexpected text after instr.
('blt f', [4, 0xBA000000], -3305), # T33.0.2b > T33.3.2 error: wrong offset
('bls 0b12', [4, 0x9A000000], -1002), # T33.0.2b > T33.3.1 + override : unexpected binary digit
('blls 0192', [4, 0x9B000000], -1003), # > T33.1.1b > T33.3.1 + override: unexpected octal digit
('bllo -192a', [4, 0x3B000000], -1004), # > T33.1.1b > T33.3.1 + override: unexpected decimal digit
('blvc 0xA3G0', [4, 0x7B000000], -1005), # > T33.1.1b > T33.3.1 + override: unexpected hexa digit
('bvc 0xA30000000', [4, 0x7A000000], -1006), # > T33.3.1 + override: too long hex address
('bxvc 0xA300', [4, 0x712FFF10], -1302), # > T33.2.1b > T33.4.1 + override: unrecognized reg
('blxcc r', [4, 0x312FFF30], -1303), # > T33.2.1b > T33.4.1 + override: missing reg number
('bxcc rf', [4, 0x312FFF10], -1304), # > T33.2.1b > T33.4.1 + override: wrong reg number
('bxmi r16', [4, 0x412FFF10], -1304), # > T33.2.1b > T33.4.1 + override: wrong reg number
('bx r6', [4, 0xE12FFF16], 1000), # T33.0.3b > T33.4.1 success: 'bx' jump
('blxpl r6', [4, 0x512FFF36], 1000), # > T33.2.1b > T33.4.1 success: 'blx' jump
('blxlt r15', [4, 0xB12FFF3F], 1000), # > T33.2.1b > T33.4.1 warning: use of pc (r15)
('b 0xA300', [4, 0xEA0028C0], 1000), # T33.0.4b > T33.3.1 success: 'b' jump
('bl -1300', [4, 0xEBFFFEBB], 1000), # T33.0.4b > T33.3.1 success: 'bl' negative jump
('blt 073000000', [4, 0xBA3B0000], 1000), # > T33.3.1 success: 'blt' octal jump
('bleq 0x730000', [4, 0x0B1CC000], 1000), # > T33.3.1 success: 'bleq' hexa jump
('bhi 0xA30000', [4, 0x8A28C000], 1000), # > T33.3.1 success: 'b' jump
('blgt 0x1302', [4, 0xCB000000], -3307), # > T33.3.1 + override : misaligned address
('bllt 0x73000000', [4, 0xBB000000], -3308), # > T33.3.1 + override : out of range offset
('blal -73000000', [4, 0xEB000000], -3308), # > T33.3.1 + override : out of range negative offset
('bal -7300001', [4, 0xEA000000], -3307) # > T33.3.1 + override : misaligned negative address
]
am2_test = [('', [], -2401), # T24.0.0 error: missing addressing mode
(' ', [], -2401), # T24.0.1 > T24.0.0 error: idem with leading space
('2', [], -2402), # T24.0.3 error: missing '['
('[', [], -2403), # T24.0.2 > T24.1.0 error: missing info after '['
('[2', [], -2403), # T24.0.2 > T24.1.2 : unrecognizable register
('[r', [], -1303), # T24.0.2 > T24.1.1a + override : missing register number
('[ra', [], -1304), # T24.0.2 > T24.1.1a + override : wrong reg number
('[r16', [], -1304), # T24.0.2 > T24.1.1a + override : too high reg number
('[r12', [], -2404), # T24.0.2 > T24.1.1a error: good base reg, missing closure
('[r0 ', [], -2404), # T24.0.2 > T24.1.1a error: missing ',' after base reg
('[r1,', [0x01810000], -2405), # T24.0.2 > T24.1.1b > T24.2.0 : missing displacement
('[r2]!', [0x01820000], -2410), # T24.0.2 > T24.1.1c > T24.7.2 : unexpected text after ']'
('[r3, 3', [0x01830000], -2406), # > T24.1.1b > T24.2.1 > T24.2.6 : wrong displacement
('[r4, ra', [0x01840000], -1304), # > T24.2.1 > T24.2.5a + override: wrong reg number
('[r5, r1a', [0x01850000], -1304), # > T24.2.1 > T24.2.5a + override: wrong reg number
('[r6, +r1', [0x01860000], -2404), # > T24.2.1 > T24.2.2 > T24.3.1a : check positive reg displ.
('[r7, -r6', [0x01070000], -2404), # > T24.2.1 > T24.2.3 > T24.3.1a : check negative reg displ.
('[r8, -', [0x01080000], -2405), # > T24.2.3 > T24.3.0 : EOSeq after '-'
('[r8, -3.2', [0x01080000], -2406), # > T24.2.3 > T24.3.2 : wrong reg after '-'
('[r5, r10, ', [0x0385000A], -2407), # > T24.2.5b > T24.5.1 > T24.5.0 : missing shift mode
('[r7, r2, lsl', [0x03870002], -2408), # > T24.2.5b > T24.5.1 > T24.5.2a: missing space after shift
('[r8, r3, lsr ', [0x03880003], -2408), # > T24.5.2b > T24.6.0 : missing info after shift mode
('[r10, r5, ror r', [0x038A0005], -1702), # > T24.5.2b > T24.6.2 : idem
('[r1, r9, lsl # ', [0x03810009], -1704), # > T24.5.2b > T24.6.1a + override : unexpected space after '#'
('[r3, r11, asr #2', [0x0383000B], -2404), # > T24.5.2b > T24.6.1a : valid scaled reg, missing ']'
('[r8, #', [0x01880000], -2405), # > T24.2.1 > T24.2.4 > T24.4.0 : missing displacement
('[r4, # ', [0x01840000], -2406), # > T24.2.1 > T24.2.4 > T24.4.2 : unexpected space after '#'
('[r5, #\'f\'', [0x01850000], -2406), # > T24.2.1 > T24.2.4 > T24.4.2 : unrecognizable info after '#'
('[r6, #20', [0x01860000], -2404), # > T24.2.1 > T24.2.4 > T24.4.1a : base + imm. displ., missing ']'
('[r8, #-20', [0x01880000], -2404), # > T24.2.1 > T24.2.4 > T24.4.1a : idem for negative imm. displ.
('[r9,#0xC0000034]', [0x1890000], -2411), # > T24.4.1b + override : too long immediate displacement
('[r12, #0b1002000]', [0x018C0000], -1002), # + override : invalid binary digit
('[r13, #012000900005]', [0x018D0000], -1003), # + override : invalid octal digit
('[r14, #45d]', [0x018E0000], -1004), # + override : invalid decimal digit
('[r15, #0x4X5]', [0x018F0000], -1005), # + override : invalid hexa digit
('[ r6, #+0]', [0x01860000], 1000), # > T24.2.4 > T24.4.1b > T24.7.0 : success base + imm. displ.
('[r6, #20]', [0x01860014], 1000), # > T24.2.4 > T24.4.1b > T24.7.0 : success base + imm. displ.
('[r7, #+4095]', [0x01870FFF], 1000), # > T24.2.4 > T24.4.1b > T24.7.0 : maximum positive imm. displ.
('[r8, #-20]', [0x01080014], 1000), # > T24.2.4 > T24.4.1b > T24.7.0 : base + negative imm. displ.
('[r9, #-4095]', [0x01090FFF], 1000), # > T24.2.4 > T24.4.1b > T24.7.0 : minimum negative imm. displ.
('[r10]', [0x018A0000], 1000), # T24.0.2 > T24.1.1c > T24.7.0 : success base only
('[sp ]', [0x018D0000], 1000), # T24.0.2 > T24.1.1c > T24.7.0 : idem with trailing space
('[r9,r1]', [0x03890001], 1000), # > T24.1.1b > T24.2.5c > T24.7.0: success base + reg. displacement
('[ sp , lr ]', [0x038D000E], 1000), # > T24.1.1b > T24.2.5c > T24.7.0: idem with extra spaces
('[r1, +r6]', [0x03810006], 1000), # > T24.2.2 > T24.3.1c > T24.7.0 : check positive reg displ.
('[r6, -r7]', [0x03060007], 1000), # > T24.2.3 > T24.3.1c > T24.7.0 : check negative reg displ.
('[r5, r15]', [0x01850000], -2412), # > T24.2.5b + override : PC not allowed as Rm
('[r5, r10, ]', [0x0385000A], -2409), # > T24.2.5b > T24.5.1 > T24.5.3 : missing shift mode
('[r5, r10, lslx]', [0x0385000A], -2409), # > T24.2.5b > T24.5.1 > T24.5.3 : wrong shift mode
('[r7, +r2, lsl]', [0x03870002], -2409), # > T24.3.1b > T24.5.1 > T24.5.2c : missing space after shift
('[r8, -r3, lsr ]', [0x03080003], -2409), # > T24.3.1b > T24.6.2 : missing info after shift mode
('[r9, r4, asr x]', [0x03890004], -1702), # > T24.5.2b > T24.6.2 : wrong info after shift mode
('[r0, r8, ror #]', [0x03800008], -1703), # > T24.5.2b > T24.6.1a + override : missing value after '#'
('[r2, r10, lsr #f]', [0x0382000A], -1705), # > T24.5.2b > T24.6.1a + override : unrecogn. info after '#'
('[r4, r12, ror #-20]', [0x0384000C], -1706), # > T24.6.1b + override : negative number of shifts
('[r5, r13, lsl #040]', [0x0385000D], -1706), # > T24.6.1b + override : too high number of shifts
('[r5, r13, lsl #0]', [0x0385000D], 1000), # > T24.6.1b > T24.7.0 : true LSL #0
('[r6, lr, lsr #0x1C] ', [0x03860E2E], 1000), # > T24.6.1b > T24.7.1> T24.7.0: success with trailing space
('[r5, r13, lsl #00]', [0x0385000D], 1000), # > T24.6.1b > T24.7.0 : true LSL #0
('[r6, sp, lsr #0x0 ]', [0x0386000D], 1000), # > T24.6.1b > T24.7.0 : converting LSR #0 into LSL #0
('[r7,-r1,asr #0b10101]', [0x03070AC1], 1000), # : ASR bin imm, no space after ','
('[r7,+r1,asr #0b0]', [0x03870001], 1000), # : converting ASR #0 into LSL #0
('[r9, r12, ror #0x1F]', [0x03890FEC], 1000), # : success ROR with 31 shifts
('[r9, r12, ror #0x0]', [0x0389006C], 1000) # : coding ROR #0 as RRX
]
am3_test = [('', [], -2501), # T25.0.0 error: missing addressing mode
(' ', [], -2501), # T25.0.1 > T25.0.0 error: idem with leading space
('2', [], -2502), # T25.0.3 error: missing '['
('[', [], -2503), # T25.0.2 > T25.1.0 error: missing info after '['
('[2', [], -2503), # T25.0.2 > T25.1.2 : unrecognizable register
('[r', [], -1303), # T25.0.2 > T25.1.1a + override : missing register number
('[ra', [], -1304), # T25.0.2 > T25.1.1a + override : wrong reg number
('[r16', [], -1304), # T25.0.2 > T25.1.1a + override : too high reg number
('[r12', [], -2504), # T25.0.2 > T25.1.1a error: good base reg, missing closure
('[r0+', [], -1304), # T25.0.2 > T25.1.1a + override : missing ',' after base reg
('[r1,', [0x01C10000], -2505), # T25.0.2 > T25.1.1b > T25.2.0 : missing displacement
('[r2]!', [0x01C20000], -2510), # T25.0.2 > T25.1.1c > T25.7.2 : unexpected text after ']'
('[r3, 3', [0x01C30000], -2506), # > T25.1.1b > T25.2.1 > T25.2.6 : wrong displacement
('[r4, ra', [0x01C40000], -1304), # > T25.2.1 > T25.2.5a + override: wrong reg number
('[r5, r1a', [0x01C50000], -1304), # > T25.2.1 > T25.2.5a + override: wrong reg number
('[r6, +r1', [0x01C60000], -2504), # > T25.2.1 > T25.2.2 > T25.3.1a : check positive reg displ.
('[r7, -r6', [0x01470000], -2504), # > T25.2.1 > T25.2.3 > T25.3.1a : check negative reg displ.
('[r8, -', [0x01480000], -2505), # > T25.2.3 > T25.3.0 : EOSeq after '-'
('[r8, -3.2', [0x01480000], -2506), # > T25.2.3 > T25.3.2 : wrong reg after '-'
('[r5, r10, ', [0x01C50000], -2513), # > T25.2.5b : scaled reg. displ. not allowed
('[r7, r2, lsl', [0x01C70000], -2513), # > T24.2.5b : idem
('[r8, #', [0x01C80000], -2505), # > T25.2.1 > T25.2.4 > T25.4.0 : missing displacement
('[r4, # ', [0x01C40000], -2506), # > T25.2.1 > T25.2.4 > T25.4.2 : unexpected space after '#'
('[r5, #\'f\'', [0x01C50000], -2506), # > T25.2.1 > T25.2.4 > T25.4.2 : unrecognizable info after '#'
('[r6, #20', [0x01C60000], -2504), # > T25.2.1 > T25.2.4 > T25.4.1a : base + imm. displ., missing ']'
('[r9, #0x134]', [0x1C90000], -2511), # > T25.4.1b + override : too long immediate displacement
('[r12, #0b0001103]', [0x01CC0000], -1002), # + override : invalid binary digit
('[r13, #012009005]', [0x01CD0000], -1003), # + override : invalid octal digit
('[r14, #4+5]', [0x01CE0000], -1004), # + override : invalid decimal digit
('[r15, #0xX45]', [0x01CF0000], -1005), # + override : invalid hexa digit
('[ r6, #+0]', [0x01C60000], 1000), # > T25.2.4 > T25.4.1b > T25.7.0 : success base + imm. displ.
('[r6 ,#195]', [0x01C60C03], 1000), # > T25.2.4 > T25.4.1b > T25.7.0 : success base + imm. displ.
(' [r7, #+255]', [0x01C70F0F], 1000), # > T25.2.4 > T25.4.1b > T25.7.0 : maximum positive imm. displ.
('[r8, # -80]', [0x01480500], 1000), # > T25.2.4 > T25.4.1b > T25.7.0 : base + negative imm. displ.
('[r9, #-255 ]', [0x01490F0F], 1000), # > T25.2.4 > T25.4.1b > T25.7.0 : minimum negative imm. displ.
('[r9,# - 25]', [0x01490109], 1000), # > T25.2.4 > T25.4.1b > T25.7.0 : negative with white spaces
('[r9, # + 25]', [0x01C90109], 1000), # > T25.2.4 > T25.4.1b > T25.7.0 : positive with white spaces
('[r10]', [0x01CA0000], 1000), # T25.0.2 > T25.1.1c > T25.7.0 : success base only
('[sp ]', [0x01CD0000], 1000), # T25.0.2 > T25.1.1c > T25.7.0 : idem with trailing space
('[r9,r1]', [0x01890001], 1000), # > T25.1.1b > T25.2.5c > T25.7.0: success base + reg. displacement
('[ sp , lr ]', [0x018D000E], 1000), # > T25.1.1b > T25.2.5c > T25.7.0: idem with extra spaces
('[r1, +r6]', [0x01810006], 1000), # > T25.2.2 > T25.3.1c > T25.7.0 : check positive reg displ.
('[r1, + r6]', [0x01810006], 1000), # > T25.2.2 > T25.3.1c > T25.7.0 : idem with white space
('[r6, -r7]', [0x01060007], 1000), # > T25.2.3 > T25.3.1c > T25.7.0 : check negative reg displ.
('[r6,- r7] ', [0x01060007], 1000), # > T25.3.1c > T25.7.1 > T25.7.0 : idem with white space
('[r5, r15]', [0x01C50000], -2512), # > T25.2.5b + override : PC not allowed as Rm
('[r5, r10+]', [0x01C50000], -1304), # > T25.2.5b + override : wrong text after reg. number
('[r5, +r10,]', [0x01C50000], -2513) # > T25.2.2 > T25.3.1b : scaled reg. displ. not allowed
]
im2_test = [('', [], -3401), # T34.0.0 error: missing memory transfer inst.
(' ', [], -3401), # T34.0.1 > T34.0.0 error: idem with leading space
('2', [], -3402), # T34.0.3 error: missing 'ld' or 'st'
('ld', [4, 0xE0000000], -3402), # T34.0.2 > T34.1.0 error: missing inst. continuation
('st ', [4, 0xE0000000], -3402), # T34.0.2 > T34.1.4 error: missing inst. continuation
('str', [4, 0xE0000000], -3403), # T34.0.2 > T34.1.1 > T34.2.0 : missing space after inst.
('ldr ', [4, 0xE4100000], -3405), # > T34.1.1 > T34.2.1 > T34.5.0 : missing destination register
('sts', [4, 0xE0000000], -3408), # T34.0.2 > T34.1.2 + override : 's' not allowed for store inst.
('ldx', [4, 0xE0000000], -3402), # T34.0.2 > T34.1.4 : unrecognized mem. transfer inst.
('ldrb', [4, 0xE0000000], -3403), # > T34.1.1 > T34.2.2 > T34.3.0 : missing space after inst.
('strb ', [4, 0xE4400000], -3405), # > T34.2.2 > T34.3.1 > T34.5.0 : missing destination register
('ldrby', [4, 0xE0000000], -3404), # > T34.2.2 > T34.3.2 : wrong text after inst.
('ldrb e', [4, 0xE4500000], -1302), # > T34.3.1 > T34.5.1a + override: unknown reg
('str r', [4, 0xE4000000], -1303), # > T34.2.1 > T34.5.1a + override: missing reg number
('ldr rb', [4, 0xE4100000], -1304), # > T34.2.1 > T34.5.1a + override: wrong reg number
('ldrb r1', [4, 0xE4500000], -3406), # > T34.2.1 > T34.5.1a error: missing ',' after dest. reg
('strb r2,', [4, 0xE4402000], -3407), # > T34.5.1b > T34.6.0 error: missing info after dest. reg
('streq', [4, 0x00000000], -3403), # > T34.2.3 > T34.4.0 : missing space after inst.
('ldrne ', [4, 0x14100000], -3405), # > T34.2.3 > T34.4.1 > T34.5.0 : missing destination register
('strles', [4, 0xD0000000], -3408), # > T34.2.3 > T34.4.4 + override : 's' not allowed for store inst.
('ldrlox', [4, 0x30000000], -3404), # > T34.2.3 > T34.4.5 : unrecognized mem. transfer inst.
('ldrmib', [4, 0x40000000], -3403), # > T34.2.3 > T34.4.2 > T34.3.0 : missing space after inst.
('strmib ', [4, 0x44400000], -3405), # > T34.4.2 > T34.3.1 > T34.5.0 : missing destination register
('ldrhsbx', [4, 0x20000000], -3404), # > T34.4.2 > T34.3.2 : wrong text after inst.
('ldrhsb r2, 2', [4, 0x24502000], -2402), # > T34.6.1 > T34.6.3 + override : missing '['
('strvcb r3, [', [4, 0x74403000], -2403), # > T34.6.3 + override : missing info after '['
('ldrge r4, [2', [4, 0xA4104000], -2403), # > T34.6.3 + override : unrecognizable register
('strltb r5,[r', [4, 0xB4405000], -1303), # > T34.6.3 + override : missing register number
('ldrvc r6, [r16', [4, 0x74106000], -1304), # + override : too high reg number
('ldr lr, [r12', [4, 0xE410E000], -2404), # + override : good base reg, missing closure
('str sp, [r0 ', [4, 0xE400D000], -2404), # + override : missing ',' after base reg
('ldrb r15, [r1,', [4, 0xE450F000], -2405), # + override : missing displacement
('strb pc, [r2]!', [4, 0xE440F000], -2410), # + override : unexpected text after ']'
('ldrvsb r4,[r3, 3', [4, 0x64504000], -2406), # + override : wrong displacement
('strge r5, [r5, r1a', [4, 0xA4005000], -1304), # + override : wrong reg number
('ldrltb r6, [r5, r10, ', [4, 0xB4506000], -2407), # + override : missing shift mode
('strlsb r7, [r7, r2, lsl', [4, 0x94407000], -2408), # + override : missing space after shift
('strgt r9, [r8, r3, lsr ', [4, 0xC4009000], -2408), # + override : missing info after shift mode
('ldr r11, [r10, r5, ror r', [4, 0xE410B000], -1702), # + override : idem
('ldrb r12, [r1, r9, lsl # ', [4, 0xE450C000], -1704), # + override : unexpected space after '#'
('strb r13,[r9,#0xC0000034]', [4, 0xE440D000], -2411), # + override : too long immediate displacement
('ldr r0, [r12, #0b1002000]', [4, 0xE4100000], -1002), # + override : invalid binary digit
('strhi r1, [r13, #018000005]', [4, 0x84001000], -1003), # + override : invalid octal digit
('strlob r2, [r14, #5d4]', [4, 0x34402000], -1004), # + override : invalid decimal digit
('ldrplb r3, [r15, #0x4r]', [4, 0x54503000], -1005), # + override : invalid hexa digit
('ldrb r3, [r15, #0x400000000]', [4, 0xE4503000], -1006), # + override : too big number
('ldrcsb r4, [ r6, #+0]', [4, 0x25D64000], 1000), # > T34.6.3 : success base + imm. displ.
('ldr r5, [r6, #20]', [4, 0xE5965014], 1000), # : success base + imm. displ.
('str r6,[r7, #+4095]', [4, 0xE5876FFF], 1000), # : maximum positive imm. displ.
('ldreqb r7, [r8, #-20]', [4, 0x05587014], 1000), # : base + negative imm. displ.
('strccb r8, [r9, #-4095] ', [4, 0x35498FFF], 1000), # : minimum negative imm. displ.
('ldr r9, [r10]', [4, 0xE59A9000], 1000), # : success base only
('str r10,[r9,+r1]', [4, 0xE789A001], 1000), # : success base + reg. displacement
('str r10, [r5, r15]', [4, 0xE400A000], -2412), # + override : PC not allowed as Rm
('strb r11, [r0, r8, ror #]', [4, 0xE440B000], -1703), # + override : missing value after '#'
('ldrle r12, [r2, r10, lsr #f]', [4, 0xD410C000], -1705), # + override : unrecogn. info after '#'
('strmib r13, [r4, r12, ror #-20]', [4, 0x4440D000], -1706), # override : negative number of shifts
('ldrplb r14, [r5, r13, lsl #040]', [4, 0x5450E000], -1706), # override : too high number of shifts
('ldrvs r15,[r6, lr, lsr #0x1C] ', [4, 0x6796FE2E], 1000), # : success with trailing space
('str r0, [r5, r13, lsl #00]', [4, 0xE785000D], 1000), # : true LSL #0
('ldr r1, [r6, sp, lsr #0x0 ]', [4, 0xE796100D], 1000), # : converting LSR #0 into LSL #0
('str r2, [r7,-r1,asr #0b10101]', [4, 0xE7072AC1], 1000), # : ASR bin imm, no space after ','
('ldr r3 ,[r7,+r1,asr #0b0]', [4, 0xE7973001], 1000), # : converting ASR #0 into LSL #0
('ldrb r4,[r9, r12, ror #0x1F]', [4, 0xE7D94FEC], 1000), # : success ROR with 31 shifts
('strb r5, [r9, r12, ror #0x0]', [4, 0xE7C9506C], 1000) # : coding ROR #0 as RRX
]
im3_test = [('lds', [4, 0xE0000000], -3404), # T34.0.2 > T34.1.2 > T34.8.0 error: wrong memory transfer inst.
('strz', [4, 0xE0000000], -3404), # T34.0.2 > T34.1.1 > T34.2.6 error: wrong memory transfer inst.
('strs', [4, 0xE0000000], -3408), # > T34.1.1 > T34.2.5 + override : 's' not allowed for store inst.
('strh', [4, 0xE00000B0], -3403), # > T34.1.1 > T34.2.4 > T34.9.0 error: missing space after inst.
('ldrs', [4, 0xE0000000], -3404), # > T34.1.1 > T34.2.5 > T34.10.0 : wrong memory transfer inst.
('ldrh ', [4, 0xE01000B0], -3405), # > T34.2.4 > T34.9.1 > T34.11.0 : missing destination reg
('ldrsb', [4, 0xE01000D0], -3403), # > T34.2.5 > T34.10.1 > T34.9.0 : missing space after inst.
('ldrsh', [4, 0xE01000F0], -3403), # > T34.2.5 > T34.10.1 > T34.9.0 : missing space after inst.
('ldrsi', [4, 0xE0000000], -3404), # > T34.2.5 > T34.10.2 : missing space after inst.
('ldrsb ', [4, 0xE01000D0], -3405), # > T34.10.1 > T34.9.1 > T34.11.0: missing destination reg
('ldrsb e', [4, 0xE01000D0], -1302), # > T34.11.1a + override : wrong text after inst.
('ldrsbt', [4, 0xE01000D0], -3404), # > T34.10.1 > T34.9.2 : wrong memory transfer inst.
('ldsb', [4, 0xE01000D0], -3403), # > T34.8.2 > T34.9.0 : missing space after inst.
('ldsh ', [4, 0xE01000F0], -3405), # > T34.8.2 > T34.9.1 > T34.11.0 : missing destination reg
('ldsu ', [4, 0xE0000000], -3404), # T34.0.2 > T34.1.2 > T34.8.3 : wrong memory transfer inst.
('strneh', [4, 0x100000B0], -3403), # > T34.2.3 > T34.4.3 > T34.9.0 : missing space after inst.
('ldscc', [4, 0x30000000], -3404), # > T34.1.2 > T34.8.1 > T34.10.0 : wrong memory transfer inst.
('ldreqs', [4, 0x00000000], -3404), # > T34.2.3 > T34.4.4 > T34.10.0 : wrong memory transfer inst.
('ldrlssb', [4, 0x901000D0], -3403), # > T34.4.4 > T34.10.1 > T34.9.0 : missing space after inst.
('ldshsb r2', [4, 0x201000D0], -3406), # > T34.9.1 > T34.11.1a error: missing ',' after destination reg
('ldrhsh r2,', [4, 0x201020B0], -3407), # > T34.11.1b > T34.12.0 : missing info after dest. reg
('strleh r10, r12', [4, 0xD000A0B0], -2502), # T34.11.1b > T34.12.1 + override : missing '['
('strlsh r10, [12', [4, 0x9000A0B0], -2503), # T34.11.1b > T34.12.1 + override : missing reg after '['
('strloh r8, [r12', [4, 0x300080B0], -2504), # T34.11.1b > T34.12.1 + override : missing closure
('streqh r9, [r1,', [4, 0x000090B0], -2505), # T34.11.1b > T34.12.1 + override : missing displacement
('ldsccb r1,[r2]!', [4, 0x301010D0], -2510), # T34.11.1b > T34.12.1 + override: unexpected text after ']'
('strh r2, [r3, 3', [4, 0xE00020B0], -2506), # + override : wrong displacement
('ldsvch r4, [r5, r1a', [4, 0x701040F0], -1304), # + override : wrong reg number
('ldrvssb r5, [r7, -r6', [4, 0x601050D0], -2504), # + override : check negative reg displ.
('strplh r9, [r5, r10, ', [4, 0x500090B0], -2513), # + override : scaled reg. displ. not allowed
('ldsmib r10, [r9, #0x134]', [4, 0x4010A0D0], -2511), # + override : too long immediate displacement
('ldrgtsb r11 , [ r6, #+0]', [4, 0xC1D6B0D0], 1000), # > T34.11.1b > T34.12.1 success: base + imm. displ.
('strh r12, [r6 ,#195]', [4, 0xE1C6CCB3], 1000), # : base + imm. displ.
('ldrlsh r3, [r10, #-180]', [4, 0x915A3BB4], 1000), # : base + negative imm. displ.
('ldsgeh r13, [r8, # -80]', [4, 0xA158D5F0], 1000), # : base + negative imm. displ.
('ldshsb r14,[r9, #-255 ]', [4, 0x2159EFDF], 1000), # : minimum negative imm. displ.
('strhih pc, [r10]', [4, 0x81CAF0B0], 1000), # : success base only
(' ldrgtsh lr, [ pc ]', [4, 0xC1DFE0F0], 1000), # : idem with trailing space
('ldsvsb r10,[r9,r1]', [4, 0x6199A0D1], 1000), # : success base + reg. displacement
('ldrlssh r0, [ sp , lr ]', [4, 0x919D00FE], 1000), # : idem with extra spaces
('strleh r1, [r6, -r7]', [4, 0xD10610B7], 1000), # : check negative reg displ.
('ldsb r9, [r5, r15]', [4, 0xE01090D0], -2512) # + override : PC not allowed as Rm
]
imm_test = [('ldm', [4, 0xE0000000], -3404), # T34.0.2 > T34.1.3 > T34.13.0 error: wrong memory transfer inst.
('stmz', [4, 0xE0000000], -3404), # T34.0.2 > T34.1.3 > T34.13.3 error: wrong memory transfer inst.
('ldmia', [4, 0xE8900000], -3403), # > T34.13.2 > T34.15.0 : missing space after inst.
('stmdb ', [4, 0xE9000000], -3405), # > T34.15.1 > T34.16.0 : missing destination reg
('ldmibe', [4, 0xE9900000], -3404), # > T34.13.2 > T34.15.2 : wrong memory transfer inst.
('ldmib e', [4, 0xE9900000], -1302), # > T34.16.1a + override : wrong register
('stmne', [4, 0x10000000], -3404), # > T34.13.1 > T34.14.0 : wrong memory transfer inst.
('ldmccda', [4, 0x38100000], -3403), # > T34.14.1 > T34.15.0 : missing space after inst.
('ldmccde', [4, 0x30000000], -3404), # > T34.14.2 error: missing space after inst.
('ldmeqia r', [4, 0x08900000], -1303), # > T34.16.1a + override : missing reg number
('ldmhsfd r2', [4, 0x28900000], -3406), # > T34.16.1a error: missing ',' after destination reg
('ldmhsfa r2,', [4, 0x28120000], -3407), # > T34.16.1b > T34.18.0 : missing info after dest. reg
('stmhiea r2!', [4, 0x89020000], -3406), # > T34.16.1c > T34.17.0 : missing ',' after destination reg
('stmhiea r2!,', [4, 0x89220000], -3407), # > T34.17.2 > T34.18.0 : missing info after dest. reg
('stmea r2!d', [4, 0xE9020000], -3404), # > T34.17.3 error: wrong text after '!'
('stmccib r3,1', [4, 0x39830000], -1502), # > T34.18.1 + override : missing '{'
('ldmmied r4!, {', [4, 0x49B40000], -1503), # + override : missing registers
('ldmplia r5, {1', [4, 0x58950000], -1302), # + override : unknown register identifier
('stmneda r6! , {r', [4, 0x18260000], -1303), # > T34.17.1 + override : missing register number
('stmia r7,{ra', [4, 0xE8870000], -1304), # + override : wrong reg number
('ldmfd r8, {r0', [4, 0xE8980000], -1503), # + override : unclosed single register
('stmed r9, {r14,}', [4, 0xE9890000], -1504), # + override : missing register after ','
('ldmfd r13!, {r4-}', [4, 0xE8BD0000], -1403), # + override : missing second reg in range list
('ldmfd r13!, {r14, }', [4, 0xE8BD0000], -1504), # + override : missing register after ', '
('ldmeqda r10!, {r0}', [4, 0x083A0001], 1000), # > T34.18.1 success: single register
('ldmalib r11 , {r0-r5}', [4, 0xE99B003F], 1000), # : single range
('stmccdb r12!, {pc, r1-r2, sp-r12, r5}', [4, 0x392CB026], 1000), # : several ranges, with spaces
('stmea r13!, {r14,r8}', [4, 0xE92D4100], 1000), # : no space after ','
('ldmfd r13!, { r9 , r13 }', [4, 0xE8BD2200], 1000) # : extra spaces
]
iil_test = [('str r0, =', [4, 0xE4000000], -3409), # > T34.6.2 + override : 'str' cannot use '=' loading
('ldrb r0,=', [4, 0xE4500000], -3409), # > T34.6.2 + override : neither 'ldrb'
('ldrh r0,=', [4, 0xE01000B0], -2502), # > T34.12.1 + override error: nor 'ldrh'
('ldr r0, =', [4, 0xE4100000], -3410), # > T34.6.2 > T34.7.0 error: missing number for immediate load
('ldr r0, = ', [4, 0xE4100000], -3410), # > T34.7.1 > T34.7.0 : idem with tranling space
('ldr r0, =t', [4, 0xE4100000], -3410), # > T34.7.1 > T34.7.3 : idem with tranling rubbish
('ldr r1, =0b00130', [4, 0xE4101000], -1002), # > T34.7.2 + override: invalid binary digit
('ldr r2, =00180', [4, 0xE4102000], -1003), # + override: invalid octal digit
('ldr r3, = -18a', [4, 0xE4103000], -1004), # + override: invalid decimal digit
('ldr r4, =0x10GA', [4, 0xE4104000], -1005), # + override: invalid hexa digit
('ldr r5, =0x100000000', [4, 0xE4105000], -1006), # + override: too big number
('ldr r6, =+0', [4, 0xE59F6FF8, 0], 1000), # > T34.7.2 success: set a relative pc loading
('ldrhi r7, = 00317652', [4, 0x859F7FF8, 0x19FAA], 1000), # : octal number
('ldrlt lr, =-1000', [4, 0xB59FEFF8, -1000], 1000), # : negative number
('ldr pc, = 0x8000', [4, 0xE59FFFF8, 0x8000], 1000) # : hexa number (load PC)
]
imi_test = [('', [], -3501), # T35.0.0 error: missing miscellanea instruction
(' ', [], -3501), # T35.0.1 > T35.0.0 : idem with space
('ldr', [], -3503), # T35.0.4 error: unrecognizable instruction
('push', [], -3502), # T35.0.2a error: missing operands
(' clz', [], -3502), # T35.0.1 > T35.0.3a error: idem with leading space
('pop ', [4, 0xE8BD0000], -3502), # > T35.0.2b > T35.2.0 : idem with a trailing space
('clz ', [4, 0xE1600010], -3502), # > T35.0.3b > T35.4.0 : idem for 'clz'
('clz 2', [4, 0xE1600010], -1302), # > T35.4.1a + override : unrecognizable register
('clz r', [4, 0xE1600010], -1303), # > T35.4.1a + override : missing register number
('clz r16', [4, 0xE1600010], -1304), # > T35.4.1a + override : too high reg number
('push 1', [4, 0xE92D0000], -1502), # > T35.2.1 + override : missing '{'
('pop {', [4, 0xE8BD0000], -1503), # + override : missing registers
('pushne {1', [4, 0x192D0000], -1302), # + override : unknown register identifier
('pophs {r', [4, 0x28BD0000], -1303), # + override : missing register number
('pushhi {ra', [4, 0x892D0000], -1304), # + override : wrong reg number
('poplo {r0', [4, 0x38BD0000], -1503), # + override : unclosed single register
('pushge {r14,}', [4, 0xA92D0000], -1504), # + override : missing register after ','
('popcc {r4-}', [4, 0x38BD0000], -1403), # + override : missing second reg in range list
('pushvs {r14, }', [4, 0x692D0000], -1504), # + override : missing register after ', '
('pusheq', [4, 0xE92D0000], -3502), # T35.0.2c > T35.1.1a error: missing operands
('popcce', [4, 0x38BD0000], -3504), # T35.0.2c > T35.1.1c error: wrong text after inst.
('popce', [4, 0xE8BD0000], -3504), # T35.0.2c > T35.1.2 error: wrong text after inst.
('pushle ', [4, 0xD92D0000], -3502), # > T35.1.1b > T35.2.0 error: missing operands
('clzh', [4, 0xE1600010], -3504), # T35.0.3c > T35.3.2 error: wrong text after inst.
('clzhi', [4, 0xE1600010], -3502), # T35.0.3c > T35.3.1a error: missing operands
('clzhi ', [4, 0x81600010], -3502), # > T35.3.1b > T35.4.0 err: missing operands
('clzhii', [4, 0x81600010], -3504), # T35.0.3c > T35.3.1c error: wrong text after inst.
('clzhs r15,', [4, 0x2160F010], -3502), # > T35.4.1b > T35.5.0 : missing operands
('clzhs r15 z,', [4, 0x21600010], -1304), # > T35.4.1a + override : wrong reg
('clzhs r15, ', [4, 0x2160F010], -3505), # > T35.4.1c > T35.5.2 : wrong info after Rd
('clzls r15,r6', [4, 0x9160F016], 1000), # > T35.4.1b > T35.5.1 : success 'clz' + cond
('pushls {r14}', [4, 0x992D4000], 1000), # > T35.1.1b > T35.2.1 : success 'push' + cond
('pop {r0, r4-r10, r14}', [4, 0xE8BD47F1], 1000) # > T35.2.1 : success 'pop'
]
data_arm = [('', [], -4001), # T40.0.0 error: missing initial hex address
('2', [], -4002), # T40.0.4 error: wrong initial address
('>', [], -4003), # T40.0.2a error: missing space after '>'
('>a', [], -4003), # T40.0.2c error: unexpected char after '>'
(' ', [], -4001), # T40.0.1 > T40.0.0 error: white leading space
('0x', [], -2002), # T40.0.3 + override : leading '0x', missing hex digits
(' 0x8001', [], -2003), # T40.0.1 > T40.0.3 + override : missing space after address
(' 0x8001 ', [0x8001], -4004), # T40.0.1 > T40.0.3 > T40.1.0 error: right address, missing info
('0x10002EF00 .byte 2', [], -2004), # T40.0.3 + override : long hex address (> 2^32)
('0x8000.f', [], -2003), # T40.0.3 + override : missing space after address
('0x8000 .f', [0x8000], -2104), # T40.0.3 > T40.1.1 + override : unknown data dir
('0x8024 .byte', [0x8024], -2102), # T40.0.3 > T40.1.1 + override : address & directive, missing val
('0x8000 .byte ', [0x8000], -2102), # T40.0.3 > T40.1.1 + override : missing data values
('0x8000 .byte2', [0x8000], -2103), # T40.0.3 > T40.1.1 + override : missing space after directive
('0x8024 .byte 23', [0x8024, [1, 23]], 1000), # T40.0.3 > T40.1.1 success: capture one byte
('> ', [0x8025], -4004), # T40.0.2b > T40.2.0 error: missing info after '>'
('> .byte 2', [0x8025, [1, 2]], 1000), # T40.0.2b > T40.2.1 success: .byte directive after '>'
('> .byte 3', [0x8026, [1, 3]], 1000), # T40.0.2b > T40.2.1 success: '>' after '>'
('> .byte 230', [0x8027, [1, 230]], 1000), # T40.0.2b > T40.2.1 success : '>' after .byte (1 value)
('0x802F .byte 23, 0xCB', [0x802F, [1, 23, 0xCB]], 1000), # T40.0.3 > T40.1.1 success: capture two bytes
('0x802F .byte \'e\' c', [0x802F], -2105), # T40.0.3 > T40.1.1 + override : wrong delimiter
('0x802F .byte \'e\', c', [0x802F], -2106), # T40.0.3 > T40.1.1 + override : unrecognizeable info
('0x802F .byte 2000', [0x802F], -2107), # T40.0.3 > T40.1.1 + override : data >= 2**8
('0x901B .hword 2300, 0xCB0', [0x901B, [2, 2300, 0xCB0]], 1000), # T40.0.2b > T40.1.1 / misaligned h
(' > .hword 230', [0x9020, [2, 230]], 1000), # T40.0.2b > T40.2.1 '>' after .hword (2 values)
('0x901A .hword 2300, 0xCB0', [0x901A, [2, 2300, 0xCB0]], 1000), # T40.0.3 > T40.1.1 / aligned h
(' > .hword 320', [0x901E, [2, 320]], 1000), # T40.0.2b > T40.2.1 '>' after .hword (h aligned)
('0xCbf8 .word 230000, 0xCB000', [0xCBF8, [4, 230000, 0xCB000]], 1000), # T40.0.3 > T40.1.1 / aligned w
('0xCbf9 .word 230000, 0xCB000', [0xCBF9, [4, 230000, 0xCB000]], 1000), # / misaligned w (1)
('0xCbfa .word 230000, 0xCB000', [0xCBFA, [4, 230000, 0xCB000]], 1000), # / misaligned w (2)
('0xCbfb .word 230000, 0xCB000', [0xCBFB, [4, 230000, 0xCB000]], 1000), # / misaligned w (3)
('> .word 010', [0xCC04, [4, 8]], 1000), # T40.0.2b > T40.2.1 '>' after .word (2 values)
('0xa03c .ascii \'2\'', [0xA03C, [1, 50]], 1000), # T40.0.3 > T40.1.1 success: .ascii directive
('> .word 0x010', [0xA040, [4, 16]], 1000), # T40.0.2b > T40.2.1 '>' after .ascii (1 value)
('0xa03b .asciz \'2\', \"0xCB\"', [0xA03B, [1, 50, 0, 48, 120, 67, 66, 0]], 1000), # / two strings
('> .word 0b010', [0xA044, [4, 2]], 1000), # T40.0.2b > T40.2.1 '>' after .asciz (7 values)
('0xa03c .ascii \' ', [0xA03C], -1104), # T40.0.3 > T40.1.1 + override : unclosed char
('0xa03c .ascii \" ', [0xA03C], -1204), # : unclosed string
('0xa03c .asciz \' ', [0xA03C], -1104), # : unclosed char
('0xa03c .asciz \" ', [0xA03C], -1204), # : unclosed string
('0xa03c .ascii \'\'', [0xA03C], -1102), # : empty char
('0xa03c .ascii \"\"', [0xA03C], -1202), # : empty string
('0xa03c .asciz \'\'', [0xA03C], -1102), # : empty char
('0xa03c .asciz \"\"', [0xA03C], -1202), # : empty string
('0xc30a .ascii \'\t\'', [0xC30A], -1103), # : illegal character ''
('0xc30a .asciz \'\t\'', [0xC30A], -1103), # : idem after .ascii
('0xc30a .ascii \"\t\"', [0xC30A], -1203), # : illegal character ""
('0xc30a .asciz \" \t\"', [0xC30A], -1203), # : idem after valid char
('0x3000 .ascii \' t\'', [0x3000], -1105), # : more than one character
('0x3000 .asciz \' t\'', [0x3000], -1105), # : idem after .ascii
('0x1000 .byte 0b012', [0x1000], -1002), # : unexpected binary digit
('0x2000 .hword 0408', [0x2000], -1003), # : unexpected octal digit
('0x2000 .hword 4oo8', [0x2000], -1004), # : unexpected decimal digit
('0x2000 .hword 408000', [0x2000], -2107), # : out of range dec. number
('0x2000 .hword -48000', [0x2000], -2107), # : out of range neg. number
('0x4000 .word 0x40x', [0x4000], -1005), # : unexpected hexa digit
('0x4000 .word 0x400000000', [0x4000], -1006), # : too long num. (>2^32 bits)
('0x4000 .word 0x4, 0x', [0x4000], -1005), # : unexpected hexa digit
('0xfffffffc .ascii \'0\'', [0xFFFFFFFC, [1, 48]], 1000), # almost in the address space limit
('> .word 0b1', [0x100000000, [4, 1]], -4006), # T40.0.2b > T40.2.1 '>' after .asciz (7 values)
]
idat_arm = [('0x8000 2', [0x8000], -4005), # T40.0.3 > T40.1.7 error: unrecognizable instruction
('0x8004 and', [0x8004], -3102), # T40.0.3 > T40.1.2 + override : missing operands after instr.
('0x8008 eor ', [0x8008], -3102), # T40.0.3 > T40.1.2 + override : missing operands after instr.
('0x800C sub 20,', [0x800C], -1302), # : unrecognizable operand with ','
('0x8010 rsb r', [0x8010], -1303), # : missing register number
('0x8014 add r65', [0x8014], -1304), # : too high reg number
('0x8018 adc r12', [0x8018], -2302), # : good dest reg, missing other ops
('0x801C sbc ', [0x801C], -2303), # : missing dest reg
('0x8020 rsc r1,', [0x8020], -2304), # : missing source operands
('0x8024 orr r2, ', [0x8024], -2306), # : missing source operands
('0x8028 bic r3, gu', [0x8028], -2306), # : wrong source op 1
('0x802C and r12, r3, e3', [0x802C], -2308), # : wrong op 2
('0x8030 eor r3, #', [0x8030], -1603), # : missing value after '#'
('0x8034 sub r4, # ', [0x8034], -1604), # : unexpected space after '#'
('0x8038 rsb r5, #f', [0x8038], -1605), # : unrecognizable info after '#'
('0x803C add r10, #0x1002', [0x803C], -1606), # : impossible fixup for odd rotations
('0x8040 adc r11, #\'c\' 5', [0x8040], -1607), # : unexpected text after imm val.
('0x8044 sbc r10, r1,', [0x8044], -2204), # : missing shift register
('0x8048 rsc r7, r2, lsl', [0x8048], -2205), # : missing space after shift mode
('0x804C orr r9, r4, asr x', [0x804C], -2207), # : wrong info after shift mode
('0x8050 bic r0, r8, ror #', [0x8050], -1703), # : missing value after '#'
('0x8054 and r1, r9, lsl # ', [0x8054], -1704), # : unexpected space after '#'
('0x8058 eor r2, r10, lsr #f3', [0x8058], -1705), # : unrecognizable info after '#'
('0x805C sub r4, r12, ror #-2', [0x805C], -1706), # : negative number of shifts
('0x8060 orrs', [0x8060], -3102), # : missing data instruction operands
('0x8064 teqslo', [0x8064], -3105), # : wrong text after instruction
('0x8068 cmnlyy', [0x8068], -3104), # : unknown instruction condition
('0x8068 cmnls r0, #90', [0x8068, [4, 0x9370005A]], 1000), # T40.0.3 > T40.1.2 success: 1 reg, 1 imm.
('> rsbals r6, r11, #256', [0x806C, [4, 0xE27B6C01]], 1000), # T40.0.2b > T40.2.2 success: 2 regs, 1 imm.
('> addgt r12, r12, lsl r12', [0x8070, [4, 0xC08CCC1C]], 1000), # T40.0.2b > T40.2.2 : LSL reg
('0x8080 adcs r1, r2, lsr r0 ', [0x8080, [4, 0xE0B11032]], 1000), # T40.0.3 > T40.1.2 : LSR reg with space
('> rscles pc, lr, lsr #0x1F ', [0x8084, [4, 0xD0FFFFAE]], 1000), # 40.0.2b > T40.2.2 : LSR imm with space
('0x8088 bicmis r10, r11, r12, lsl r12', [0x8088, [4, 0x41DBAC1C]], 1000), # : three regs, shift reg
('0x8088 bicmis r0, r1, r2, lsl #0', [0x8088, [4, 0x41D10002]], 1000), # : three regs, LSL #0
('0x8088 bicmis r0, r1, r2, ror #0', [0x8088, [4, 0x41D10062]], 1000), # : three regs, ROR #0 -> RRX
('> tst r7,r1, #01010', [0x808C], -2310), # > T40.2.2 + override : 3 ops with 'tst'
('> movvc r1,r9, #0xC000', [0x808C], -2311), # > T40.2.2 + override : 3 ops with 'mov'
('> tst r7, #01010', [0x808C, [4, 0xE3170F82]], 1000), # T40.0.2b > T40.2.2 : 'tst' + reg + imm
('> teqlts r7,r8,lsl #12', [0x8090, [4, 0xB1370608]], 1000), # T40.0.2b > T40.2.2 : 'teq'+reg+shifted reg
('> mov r2, #-100', [0x8094, [4, 0xE3E02063]], 1000), # T40.0.2b > T40.2.2 : 'mov' + reg + NOT imm
('> and r4, #-250', [0x8098, [4, 0xE3C440F9]], 1000), # T40.0.2b > T40.2.2 : 'and' + reg + NOT imm
('> add r6, #-3120', [0x809C, [4, 0xE2466EC3]], 1000), # T40.0.2b > T40.2.2 : 'add' + reg + NOT imm
('0xA0008 cmp r8, #-1004', [0xA0008, [4, 0xE3780FFB]], 1000), # T40.0.3 > T40.1.2 : 'cmp' + reg + NOT imm
('> .byte -1', [0xA000C, [1, 255]], 1000), # T40.0.2b > T40.2.1 : automatic inc. +1
('> bics r5, #-255', [0xA0010, [4, 0xE21550FE]], 1000), # T40.0.2b > T40.2.2 : adjust adr. 3 bytes
('> .hword -2', [0xA0014, [2, 65534]], 1000), # T40.0.2b > T40.2.1 : automatic inc. +2
('> movvss r9,#0xC0000', [0xA0018, [4, 0x63B09703]], 1000), # T40.0.2b > T40.2.2 : adjust adr. 2 bytes
(' > .byte -1, -2, -3', [0xA001C, [1, 255, 254, 253]], 1000), # T40.0.2b > T40.2.1 : automatic inc. +3
(' > cmnne r5, #-256', [0xA0020, [4, 0x13550C01]], 1000), # T40.0.2b > T40.2.2 : adjust adr. 1 byte
('> r5, #-256', [0xA0024], -4005), # T40.0.2b > T40.2.7 : unrecognized inst.
('0xA0025 cmp r9, #1004', [0xA0025, [4, 0xE3590FFB]], 1000), # warning : address missaligned 1 byte
('0xA0026 cmp r10, #1008', [0xA0026, [4, 0xE35A0E3F]], 1000), # warning : address missaligned 1 byte
(' 0xA0027 cmp r11, #1012', [0xA0027, [4, 0xE35B0FFD]], 1000), # warning : address missaligned 1 byte
('0x8068 .word -4', [0x8068, [4, 4294967292]], 1000) # final test: set auto-address as before the first
# test in this series that makes use of '>'
]
imul_arm = [('0x7FFC .word -4', [0x7FFC, [4, 4294967292]], 1000), # set auto-address as before the first use of '>'
('> ', [0x8000], -4005), # T40.0.2b > T40.1.7 error: unrecognizable instruction
('> 2', [0x8000], -4005), # T40.0.2b > T40.1.7 error: unrecognizable instruction
('> mul', [0x8000], -3202), # T40.0.2b > T40.2.3 + override : missing operands after instr.
('> mla ', [0x8000], -3202), # T40.0.2b > T40.2.3 + override : missing operands after instr.
('> umull 2', [0x8000], -1302), # : wrong register
('> umull 2,', [0x8000], -1302), # : wrong register with ','
('> umull r', [0x8000], -1303), # : missing register number
('> smull r65', [0x8000], -1304), # : too high reg number
('> umlal r12', [0x8000], -3202), # : missing other regs
('> mul ', [0x8000], -1301), # : missing other regs
('0x90FC mul r1,', [0x90FC], -3202), # : missing source operands
('> mla r2, ', [0x8000], -1301), # : missing source operands
('> smlal r3, gu', [0x8000], -1302), # : wrong reg2
('> umlal r12, r3, e3', [0x8000], -1302), # : wrong reg3
('> mul r3, r4, r5, r6', [0x8000], -3207), # : four registers with 'mul'
('> smlal r3, r4, r5, ', [0x8000], -1301), # : missing reg4
('> mla r3, r4, r5', [0x8000], -3202), # : three regs with 'mla'
('> mul r1, r10, r8', [0x8000, [4, 0xE001089A]], 1000), # success: three regs with 'mul'
('0xA000 mla r13, r14, r0, r0', [0xA000, [4, 0xE02D009E]], 1000), # success: four regs with 'mla'
('> umull sp, lr, r12, r13', [0xA004, [4, 0xE08EDD9C]], 1000), # success: four regs with 'umull'
('> mul r10, pc, r7', [0xA008], -3208), # + override: use of PC as Rm
('> smulllex r10, r11, lr, r10', [0xA008], -3205), # + override: error after cond
('> mulz', [0xA008], -3204) # + override: wrong text after
]
ijmp_arm = [('0x7FFC .word -4', [0x7FFC, [4, 4294967292]], 1000), # set auto-address as before the first use of '>'
('> blo', [0x8000], -3302), # T40.0.2b > T40.2.4 + override: missing offset
('0x9004 bleq ', [0x9004], -3302), # T40.0.3 > T40.1.4 + override : missing offset
('> blox', [0x8000], -4005), # T40.0.2b > T40.2.4 + override: unexpected text after inst
('0xA0000 bx', [0xA0000], -3304), # T40.0.3 > T40.1.4 + override : missing reg after instr.
('> blxo', [0x8000], -4005), # T40.0.2b > T40.2.4 + override: unexpected text after inst
('0x10 blt f', [0x10], -3305), # T40.0.3 > T40.1.4 + override : wrong offset
('> bls 0b12', [0x8000], -1002), # T40.0.3 > T40.1.4 + override : unexpected binary digit
('> blls 0192', [0x8000], -1003), # : unexpected octal digit
('> bllo -192a', [0x8000], -1004), # : unexpected decimal digit
('> blvc 0xA3G0', [0x8000], -1005), # : unexpected hexa digit
('> bvc 0xA30000000', [0x8000], -1006), # : too long hex address
('> bxvc 0xA300', [0x8000], -1302), # : unrecognized reg
('> blxcc r', [0x8000], -1303), # : missing reg number
('> bxcc rf', [0x8000], -1304), # : wrong reg number
('> bxmi r16', [0x8000], -1304), # : wrong reg number
('> blgt 0x1302', [0x8000], -3307), # : misaligned address
('> bllt 0x73000000', [0x8000], -3308), # : out of range offset
('> blal -73000000', [0x8000], -3308), # : out of range neg. offset
('> bal -7300001', [0x8000], -3307), # : misaligned negative address
('> bx r6 ', [0x8000, [4, 0xE12FFF16]], 1000), # T40.0.2b > T40.2.4 success: 'bx' jump
('> blxpl r6', [0x8004, [4, 0x512FFF36]], 1000), # : 'blx' jump
('0x7A0C blxlt r15', [0x7A0C, [4, 0xB12FFF3F]], 1000), # > T40.1.4 warning: use of pc (r15)
('> b 0xA300', [0x7A10, [4, 0xEA000A3A]], 1000), # > T40.2.4 success: 'b' jump
('0xFFF8 bl 1300', [0xFFF8, [4, 0xEBFFC145]], 1000), # > T40.1.4 success: 'bl' negative jump
('> blt 073000000', [0xFFFC, [4, 0xBA3ABFFF]], 1000), # > T40.2.4 success: 'blt' octal jump
('> bleq 0x730000', [0x10000, [4, 0x0B1C7FFE]], 1000), # > T40.2.4 success: 'bleq' hexa jump
('0x7FF8 bhi 0xA30000', [0x7FF8, [4, 0x8A28A000]], 1000), # > T40.1.4 success: 'bhi' jump
('> bge 0x2008000', [0x7FFC, [4, 0xAA7FFFFF]], 1000), # : forward jump limit
('0x2000000 blhs 0x8', [0x2000000, [4, 0x2B800000]], 1000), # : backward jump limit
('0x400000 blhs 0xC', [0x400000, [4, 0x2BF00001]], 1000), # : another backward jump
('0x4000 blhi 0x4000', [0x4000, [4, 0x8BFFFFFE]], 1000), # : jump onto same address
('0x4000 blhi 0x4008', [0x4000, [4, 0x8B000000]], 1000), # : jump onto advanced pc
('0x4001 blhi 0x4008', [0x4001, [4, 0x8BFFFFFF]], 1000) # : jump from misaligned adr.
]
imem_arm = [('0x7FFC .word -4', [0x7FFC, [4, 4294967292]], 1000), # set auto-address as before the first use of '>'
('> ld', [0x8000], -4005), # T40.0.2b > T40.2.5 + override: missing inst. continuation
('> st ', [0x8000], -4005), # + override: missing inst. continuation
('> str', [0x8000], -3403), # + override: missing space after inst.
('> ldr ', [0x8000], -3405), # + override: missing destination register
('> sts', [0x8000], -3408), # + override: 's' not allowed for store inst.
('> ldx', [0x8000], -4005), # + override: unrecognized mem. transfer inst.
('> ldrby', [0x8000], -3404), # + override: wrong text after inst.
('> ldrb e', [0x8000], -1302), # + override: unknown reg
('> str r', [0x8000], -1303), # + override: missing reg number
('> ldr rb', [0x8000], -1304), # + override: wrong reg number
('> ldrb r1', [0x8000], -3406), # + override: missing ',' after dest. reg
('> strb r2,', [0x8000], -3407), # + override: missing info after dest. reg
('> ldrhsb r2, 2', [0x8000], -2402), # + override: missing '['
('> strvcb r3, [', [0x8000], -2403), # + override: missing info after '['
('> ldrge r4, [2', [0x8000], -2403), # + override: unrecognizable register
('> strltb r5,[r', [0x8000], -1303), # + override: missing register number
('> ldrvc r6, [r16', [0x8000], -1304), # + override: too high reg number
('> ldr lr, [r12', [0x8000], -2404), # + override: good base reg, missing closure
('> ldrb r15, [r1,', [0x8000], -2405), # + override: missing displacement
('> strb pc, [r2]!', [0x8000], -2410), # + override: unexpected text after ']'
('> ldrvsb r4,[r3, 3', [0x8000], -2406), # + override: wrong displacement
('> ldrltb r6, [r5, r10, ', [0x8000], -2407), # + override: missing shift mode
('> strlsb r7, [r7, r2, lsl', [0x8000], -2408), # + override: missing space after shift
('> ldr r11, [r10, r5, ror r', [0x8000], -1702), # + override: missing info after shift mode
('> ldrb r12, [r1, r9, lsl # ', [0x8000], -1704), # + override: unexpected space after '#'
('> strb r13,[r9,#0xC0000034]', [0x8000], -2411), # + override: too long immediate displacement
('> ldr r0, [r12, #0b1002000]', [0x8000], -1002), # + override: invalid binary digit
('> strhi r1, [r13, #018000005]', [0x8000], -1003), # + override: invalid octal digit
('> strlob r2, [r14, #5d4]', [0x8000], -1004), # + override: invalid decimal digit
('> ldrplb r3, [r15, #0x4r]', [0x8000], -1005), # + override: invalid hexa digit
('> ldrb r3, [r15, #0x400000000]', [0x8000], -1006), # + override: too big number
('> ldrcsb r4, [ r6, #+0]', [0x8000, [4, 0x25D64000]], 1000), # success: base + imm. displ.
('> ldr r5, [r6, #20]', [0x8004, [4, 0xE5965014]], 1000), # success: base + imm. displ.
('> str r6,[r7, #+4095]', [0x8008, [4, 0xE5876FFF]], 1000), # success: maximum positive imm. displ.
('> ldreqb r7, [r8, #-20]', [0x800C, [4, 0x05587014]], 1000), # success: base + negative imm. displ.
('> strccb r8, [r9, #-4095] ', [0x8010, [4, 0x35498FFF]], 1000), # : minimum negative imm. displ.
('> ldr r9, [r10]', [0x8014, [4, 0xE59A9000]], 1000), # : base only
('> str r10,[r9,+r1]', [0x8018, [4, 0xE789A001]], 1000), # : base + reg. displacement
('> str r10, [r5, r15]', [0x801C], -2412), # + override: PC not allowed as Rm
('> strb r11, [r0, r8, ror #]', [0x801C], -1703), # + override: missing value after '#'
('> ldrle r12, [r2, r10, lsr #f]', [0x801C], -1705), # + override: unrecogn. info after '#'
('> strmib r13, [r4, r12, ror #-20]', [0x801C], -1706), # + override: negative number of shifts
('> ldrplb r14, [r5, r13, lsl #040]', [0x801C], -1706), # + override: too high number of shifts
('> ldrvs r15,[r6, lr, lsr #0x1C] ', [0x801C, [4, 0x6796FE2E]], 1000), # success: with trailing space
('> str r0, [r5, r13, lsl #00]', [0x8020, [4, 0xE785000D]], 1000), # success: true LSL #0
('0x904A ldr r1, [r6, sp, lsr #0x0 ]', [0x904A, [4, 0xE796100D]], 1000), # : converting LSR #0 into LSL #0
('> str r2, [r7,-r1,asr #0b10101]', [0x9050, [4, 0xE7072AC1]], 1000), # : ASR bin imm, no space after ','
('0x8090 ldr r3 ,[r7,+r1,asr #0b0]', [0x8090, [4, 0xE7973001]], 1000), # : converting ASR #0 into LSL #0
('> ldrb r4,[r9, r12, ror #0x1F]', [0x8094, [4, 0xE7D94FEC]], 1000), # : success ROR with 31 shifts
('> strb r5, [r9, r12, ror #0x0]', [0x8098, [4, 0xE7C9506C]], 1000), # : coding ROR #0 as RRX
('> lds', [0x809C], -3404), # + override: wrong memory transfer inst.
('> strz', [0x809C], -3404), # + override: wrong memory transfer inst.
('> strs', [0x809C], -3408), # + override: 's' not allowed for store inst.
('> ldrsb e', [0x809C], -1302), # + override: wrong text after inst.
('> strleh r10, r12', [0x809C], -2502), # + override: missing '['
('> strlsh r10, [12', [0x809C], -2503), # + override: missing reg after '['
('> strloh r8, [r12', [0x809C], -2504), # + override: missing closure
('> streqh r9, [r1,', [0x809C], -2505), # + override: missing displacement
('> ldsccb r1,[r2]!', [0x809C], -2510), # + override: unexpected text after ']'
('> strh r2, [r3, 3', [0x809C], -2506), # + override: wrong displacement
('> strplh r9, [r5, r10, ', [0x809C], -2513), # + override: scaled reg. displ. not allowed
('> ldsmib r10, [r9, #0x134]', [0x809C], -2511), # + override: too long immediate displacement
('> ldsb r9, [r5, r15]', [0x809C], -2512), # + override: PC not allowed as Rm
('> ldrgtsb r11 , [ r6, #+0]', [0x809C, [4, 0xC1D6B0D0]], 1000), # success: base + imm. displ.
('0x20030 strh r12, [r6 ,#195]', [0x20030, [4, 0xE1C6CCB3]], 1000), # success: base + imm. displ.
('0x2000 ldrlsh r3, [r10, #-180]', [0x2000, [4, 0x915A3BB4]], 1000), # : base + negative imm. displ.
('> stmz', [0x2004], -3404), # + override: wrong memory transfer inst.
('> ldmia', [0x2004], -3403), # + override: missing space after inst.
('> stmdb ', [0x2004], -3405), # + override: missing destination reg
('> ldmhsfd r2', [0x2004], -3406), # + override: missing ',' after destination reg
('> ldmhsfa r2,', [0x2004], -3407), # + override: missing info after dest. reg
('> stmccib r3,1', [0x2004], -1502), # + override: missing '{'
('> ldmmied r4!, {', [0x2004], -1503), # + override: missing registers
('> stmed r9, {r14,}', [0x2004], -1504), # + override: missing register after ','
('> ldmfd r13!, {r4-}', [0x2004], -1403), # + override: missing second reg in range list
('0x70FC ldmalib r11 , {r0-r5}', [0x70FC, [4, 0xE99B003F]], 1000), # success: single range
('> stmccdb r12!, {pc, r1-r2, sp-r12, r5}', [0x7100, [4, 0x392CB026]], 1000), # : several ranges, with spcs
('> str r0, =', [0x7104], -3409), # + override: 'str' cannot use '=' loading
('> ldrh r0,=', [0x7104], -2502), # + override: nor 'ldrh'
('> ldr r0, =t', [0x7104], -3410), # + override: idem with tranling rubbish
('> ldr r5, =0x100000000', [0x7104], -1006), # + override: too big number
('> ldr r6, =+0', [0x8104, [4, 0], 0x7104, [4, 0xE59F6FF8]], 1000), # success: set a relative pc loading
('> ldrhi r7, = 00317652', [0x8108, [4, 0x19FAA], 0x7108, [4, 0x859F7FF8]], 1000), # : octal number
('0x801C ldrlt lr, =-1000', [0x901C, [4, 0xFFFFFC18], 0x801C, [4, 0xB59FEFF8]], 1000), # : negative number
('> ldr pc, = 0x8000', [0x9020, [4, 0x8000], 0x8020, [4, 0xE59FFFF8]], 1000), # : hexa num. (load PC)
('0x801A ldrgt lr, =0x1FF80', [0x901A, [4, 0x1FF80], 0x801A, [4, 0xC59FEFF8]], 1000), # : explicit misalign
('> ldr sp , =0x80000', [0x9020, [4, 0x80000], 0x8020, [4, 0xE59FDFF8]], 1000), # : implicit misalign
('0xfffffffc .ascii \'1\'', [0xFFFFFFFC, [1, 49]], 1000), # almost in the address space limit
('> ldr r0, =8', [0x100001000, [4, 8], 0x100000000, [4, 0xE59F0FF8]], -4006), # crossing addr. space limit
('0xffffeffc .ascii \'2\'', [0xFFFFEFFC, [1, 50]], 1000), # almost in the address space limit
('> ldr r2,=-8', [0x100000000, [4, 0xFFFFFFF8], 0xFFFFF000, [4, 0xE59F2FF8]], -4006) # crossing addr. limit
]
imsc_arm = [('0x7FFC .word -4', [0x7FFC, [4, 4294967292]], 1000), # set auto-address as before the first use of '>'
('> push', [0x8000], -3502), # T40.0.2b > T40.2.6 + override : missing operands
('0x8000 clz 2', [0x8000], -1302), # T40.0.3 > T40.1.6 + override : unrecognizable register
('> clz r', [0x8000], -1303), # + override : missing register number
('> clz r16', [0x8000], -1304), # + override : too high reg number
('> push 1', [0x8000], -1502), # + override : missing '{'
('> pop {', [0x8000], -1503), # + override : missing registers
('> pushge {r14,}', [0x8000], -1504), # + override : missing register after ','
('> popcc {r4-}', [0x8000], -1403), # + override : missing second reg in range list
('0x9004 popcce', [0x9004], -3504), # + override : wrong text after inst.
('> clzhs r15, ', [0x8000], -3505), # + override : wrong info after Rd
('> clzls r15,r6', [0x8000, [4, 0x9160F016]], 1000), # success : 'clz' + cond
('0xA00 pushls {r14}', [0xA00, [4, 0x992D4000]], 1000), # success : 'push' + cond
('> pop {r0, r4-r10, r14}', [0xA04, [4, 0xE8BD47F1]], 1000) # success : 'pop'
]
test_groups = [(number_analyzer, hex_test, 'hexadecimal numbers'),
(number_analyzer, dec_test, 'decimal numbers'),
(number_analyzer, oct_test, 'octal numbers'),
(number_analyzer, bin_test, 'binary numbers'),
(char_analyzer, chr_test, 'single quoted chars'),
(string_analyzer, str_test, 'double quoted strings'),
(data_analyzer, dat_test, 'data directives'),
(address_analyzer, adr_test, 'hex addresses'),
(register_analyzer, reg_test, 'register identifiers'),
(regbit_analyzer, rbt_test, 'registers bit mask'),
(reglst_analyzer, rlt_test, 'registers list mask'),
(immediate_op_analyzer, imo_test, 'immediate operand'),
(immediate_sr_analyzer, ims_test, 'immediate shift register'),
(op2_analyzer, op2_test, 'second operand'),
(opdat_analyzer, opd_test, 'data instruction operands'),
(instdat_analyzer, idt_test, 'data instructions'),
(instmul_analyzer, iml_test, 'multiplication instructions'),
(instjmp_analyzer, ibr_test, 'branch instructions'),
(opldst2_analyzer, am2_test, 'addressing mode 2'),
(opldst3_analyzer, am3_test, 'addressing mode 3'),
(instmem_analyzer, im2_test, 'memory transfer instructions, addressing mode 2'),
(instmem_analyzer, im3_test, 'memory transfer instructions, addressing mode 3'),
(instmem_analyzer, imm_test, 'memory transfer instructions, multiple registers'),
(instmem_analyzer, iil_test, 'memory transfer instructions, immediate load'),
(instmsc_analyzer, imi_test, 'miscellanea instructions'),
(arm_analyzer, data_arm, 'arm data directives'),
(arm_analyzer, idat_arm, 'arm data instructions'),
(arm_analyzer, imul_arm, 'arm multiplication instructions'),
(arm_analyzer, ijmp_arm, 'arm branch instructions'),
(arm_analyzer, imem_arm, 'arm memory transfer instructions'),
(arm_analyzer, imsc_arm, 'arm miscellanea instructions')
]
| 92.899927 | 120 | 0.498529 | from num_analyzer import NumberAnalyzer
from string_analyzer import CharAnalyzer
from string_analyzer import StringAnalyzer
from data_analyzer import DataAnalyzer
from adr_analyzer import AddressAnalyzer
from reg_analyzer import RegisterAnalyzer
from reg_analyzer import RegisterBitsAnalyzer
from reg_analyzer import RegisterListAnalyzer
from imm_analyzer import ImmediateOpAnalyzer
from imm_analyzer import ImmediateRSAnalyzer
from op2_analyzer import Op2Analyzer
from opdat_analyzer import OpdatAnalyzer
from instdat_analyzer import InstdatAnalyzer
from instmul_analyzer import InstmulAnalyzer
from instjmp_analyzer import InstjmpAnalyzer
from opldst_analyzer import Opldst2Analyzer
from opldst_analyzer import Opldst3Analyzer
from instmem_analyzer import InstmemAnalyzer
from instmsc_analyzer import InstmscAnalyzer
from arm_analyzer import ArmAnalyzer
number_analyzer = NumberAnalyzer()
char_analyzer = CharAnalyzer()
string_analyzer = StringAnalyzer()
data_analyzer = DataAnalyzer()
address_analyzer = AddressAnalyzer()
register_analyzer = RegisterAnalyzer()
regbit_analyzer = RegisterBitsAnalyzer()
reglst_analyzer = RegisterListAnalyzer()
immediate_op_analyzer = ImmediateOpAnalyzer()
immediate_sr_analyzer = ImmediateRSAnalyzer()
op2_analyzer = Op2Analyzer()
opdat_analyzer = OpdatAnalyzer()
instdat_analyzer = InstdatAnalyzer()
instmul_analyzer = InstmulAnalyzer()
instjmp_analyzer = InstjmpAnalyzer()
opldst2_analyzer = Opldst2Analyzer()
opldst3_analyzer = Opldst3Analyzer()
instmem_analyzer = InstmemAnalyzer()
instmsc_analyzer = InstmscAnalyzer()
arm_analyzer = ArmAnalyzer()
hex_test = [('', [], -1001), (' ', [], -1001), ('0x', [], -1005), (' 0x', [], -1005), ('0x1', [1], 1000), (' 0x1', [1], 1000), (' 0xA', [10], 1000), ('0x01', [1], 1000), (' 0x001', [1], 1000), ('0x10', [16], 1000), ('0x2864', [10340], 1000), ('0xF3AE', [62382], 1000), ('0xb14a', [45386], 1000), ('0xb14A', [45386], 1000), ('0xR124', [], -1005), ('0x51V4', [], -1005), ('0x514W', [], -1005), ('0x10002EF0', [268447472], 1000), ('0x10002EF00', [], -1006) ]
dec_test = [('0', [0], 1000), (' 0', [0], 1000), ('1', [1], 1000), (' 1', [1], 1000), ('-1', [-1], 1000), (' -1', [-1], 1000), ('10', [10], 1000), ('2864', [2864], 1000), ('-2864', [-2864], 1000), ('+2864', [2864], 1000), ('r12', [], -1001), ('5V6', [], -1004), ('514W', [], -1004), ('-', [], -1004), ('+', [], -1004), ('-r12', [], -1004), ('+r12', [], -1004), ('-5V6', [], -1004), ('4684474720', [], -1006), ('-2147483649', [], -1006) ]
oct_test = [('000', [0], 1000), (' 00', [0], 1000), ('01', [1], 1000), (' 01', [1], 1000), ('001', [1], 1000), ('010', [8], 1000), ('02764', [1524], 1000), ('02864', [], -1003), ('0r12', [], -1003), ('05V6', [], -1003), ('0514W', [], -1003), ('00r12', [], -1003), ('063710000000', [], -1006) ]
bin_test = [('0b', [], -1002), (' 0b', [], -1002), ('0b1', [1], 1000), (' 0b1', [1], 1000), (' 0b0', [0], 1000), ('0b01', [1], 1000), (' 0b001', [1], 1000), ('0b10', [2], 1000), ('0b0110', [6], 1000), ('0bR101', [], -1002), ('0b01V4', [], -1002), ('0b110W', [], -1002), ('0b0140', [], -1002), ('0b10000000000000001000000000000000', [2147516416], 1000), ('0b100000000000000010000000000000001', [], -1006) ]
chr_test = [('', [], -1101), ("'", [], -1101), # T11.0.2 > T11.1.0 error: open single quote, missing char
(' n\'', [], -1101), ("''", [], -1102), ("' ", [32], -1104), # T11.0.2 > T11.1.2 > T11.2.0 error: unclosed single quoted char
("' 0", [32], -1105), ("' '", [32], 1000), (" ' '", [32], 1000), ('" "', [], -1101), ('\'\"\'', [34], 1000), # T11.0.2 > T11.1.2 > T11.2.1 capture double quote as single char
('\'\n\'', [], -1103) # T11.0.2 > T11.1.3 illegal character in single quotes
]
str_test = [('', [], -1201), # T12.0.0 error: no double quote
("'", [], -1201), # T12.0.3 error: unexpected single quote
('"', [], -1201), # T12.0.2 > T12.1.0 error: open double quote, missing string
(' n\"', [], -1201), # T12.0.1 > T12.0.3 error: missing quote before characters
('""', [], -1202), # T12.0.2 > T12.1.1 error: empty double quotes
('" ', [32], -1204), # T12.0.2 > T12.1.2 > T12.2.0 error: unclosed double quotes
('" 0', [32, 48], -1204), # T12.0.2 > T12.1.2 > T12.2.2 > T12.2.0 / idem with two chars
('" "', [32], 1000), # T12.0.2 > T12.1.2 > T12.2.1 successful single-char string
(' " "', [32], 1000), # T12.0.1 > T12.0.2 > T12.1.2 > T12.2.1 / idem with leading space
('"0123456789"', [48, 49, 50, 51, 52, 53, 54, 55, 56, 57], 1000), # T12.0.2 > T12.1.2 > T12.2.2 > T12.2.1
('"abcdefghijklmnopqrstuvwxyz"', [97, 98, 99, 100, 101, 102, 103, # alphabetic digits
104, 105, 106, 107, 108, 109, 110, 111, 112,
113, 114, 115, 116, 117, 118, 119, 120, 121,
122], 1000), # lower case letters
('"ABCDEFGHIJKLMNOPQRSTUVWXYZ"', [65, 66, 67, 68, 69, 70, 71, 72,
73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83,
84, 85, 86, 87, 88, 89, 90], 1000), # upper case letters
('"! 40, 41, 42, 43, 44, 45, 46, 47, 58, 59, 60,
61, 62, 63, 64, 91, 92, 93, 94, 95, 96, 123,
124, 125, 126], 1000), # punctuation letters
('\"\'\"', [39], 1000), # T12.0.2 > T12.1.2 > T12.2.1 capture single quote as a string
('\"\n\"', [], -1203), # T12.0.2 > T12.1.3 illegal character after double quote
('\" \n\"', [32], -1203) # T12.0.2 > T12.1.2 > T12.2.2 > T12.2.3 idem after a valid char
]
dat_test = [('', [], -2101), # T21.0.0 error: missing data directive
(' ', [], -2101), # T21.0.1 > T21.0.0 idem with leading space
('.', [], -2101), # T21.0.2 > T21.1.0 error: missing directive after '.'
('f', [], -2101), # T21.0.3 error: missing '.'
('.f', [], -2104), # T21.0.2 > T21.1.6 error: unknown data directive
('.byte', [], -2102), # T21.0.2 > T21.1.1a error: missing data values
('.byte ', [1], -2102), # T21.0.2 > T21.1.1b > T21.2.0 error: missing data values
('.byte2', [], -2103), # T21.0.2 > T21.1.1c error: missing space after directive
('.byte 2', [1, 2], 1000), # T21.0.2 > T21.1.1b > T21.2.1a success: get one byte
('.byte 20', [1, 20], 1000), # T21.0.2 > T21.1.1b > T21.2.1a idem with two digits
('.byte -20', [1, 236], 1000), # T21.0.2 > T21.1.1b > T21.2.1a idem with negative number
('.byte 2000', [1], -2107), # T21.0.2 > T21.1.1b > T21.2.1a + override data >= 2**8
('.byte -200', [1], -2107), # T21.0.2 > T21.1.1b > T21.2.1a + override data < -2**7
('.byte 45r', [1], -1004), # T21.0.2 > T21.1.1b > T21.2.1a + override unexpected decimal digit
('.byte 45,', [1, 45], -2102), # T21.0.2 > T21.1.1b > T21.2.1b > T21.2.0 error: missing data
('.byte 45, ', [1, 45], -2106), # T21.0.2 > T21.1.1b > T21.2.1b > T21.2.3 unrecognizeable info
('.byte 200, 0xF4', [1, 200, 244], 1000), # T21.0.2 > T21.1.1b > T21.2.1b > T21.2.1a get two bytes
('.byte \'2\'', [1, 50], 1000), # T21.0.2 > T21.1.1b > T21.2.2a success: get one char
('.byte \'2\', \'F\'', [1, 50, 70], 1000), # T21.0.2 > T21.1.1b > T21.2.2b > T21.2.2a get two chars
('.byte \'2\', 0123', [1, 50, 83], 1000), # T21.0.2 > T21.1.1b > T21.2.2b > T21.2.1a one char + one num.
('.byte \'2\' , 0123', [1, 50, 83], 1000), # T21.0.2 > T21.1.1b > T21.2.2b > T21.2.1a with extra space
('.byte \'2\', 0123 ', [1, 50, 83], 1000), # T21.0.2 > T21.1.1b > T21.2.2b > T21.2.1a with trailing space
('.byte 0b110, \'e\'', [1, 6, 101], 1000), # T21.0.2 > T21.1.1b > T21.2.1b > T21.2.2a one num. + one char
('.byte 0b110 , \'e\'', [1, 6, 101], 1000), # T21.0.2 > T21.1.1b > T21.2.1b > T21.2.2a with extra space
('.byte 0b110, \'e\' ', [1, 6, 101], 1000),
# T21.0.2 > T21.1.1b > T21.2.1b > T21.2.2a with trailing space
('.byte \'e\' c', [1], -2105), # T21.0.2 > T21.1.1b > T21.2.1b > T21.2.1c wrong delimiter
('.byte \'e\', c', [1, 101], -2106), # T21.0.2 > T21.1.1b > T21.2.1b > T21.2.3 unrecognizeable info
('.byte c', [1], -2106), # T21.0.2 > T21.1.1b > T21.2.3 unrecognizeable info
('.hword', [], -2102), # T21.0.2 > T21.1.2a error: missing data values
('.hword ', [2], -2102), # T21.0.2 > T21.1.2b > T21.3.0 error missing halfwords
('.hword2', [], -2103), # T21.0.2 > T21.1.2c error: missing space after directive
('.hword 2000', [2, 2000], 1000), # T21.0.2 > T21.1.2b > T21.3.1a success: capture a halfword
('.hword 2000, 0b0010', [2, 2000, 2], 1000), # T21.0.2 > T21.1.2b > T21.3.1b > T21.3.1a two halfwords
('.hword 02000, -1, 0xF00A', [2, 1024, 65535, 61450], 1000), # success: three halfwords
('.hword \'e\'', [2], -2106), # T21.0.2 > T21.1.2b > T21.3.2 unrecognizeable info
('.hword 045r', [2], -1003), # T21.0.2 > T21.1.2b > T21.3.1a + override unexpected hexa digit
('.hword 45,', [2, 45], -2102), # T21.0.2 > T21.1.2b > T21.3.1b > T21.3.0 error: missing data
('.hword 2 , -0123 ', [2, 2, 0xFF85], 1000), # T21.0.2 > T21.1.2b > T21.3.1b > T21.3.1a extra space
('.hword -45000', [2], -2107), # T21.0.2 > T21.1.2b > T21.3.1a + overrride error: data < -2**15
('.word', [], -2102), # T21.0.2 > T21.1.3a error: missing data values
('.word ', [4], -2102), # T21.0.2 > T21.1.3b > T21.4.0 error missing words
('.wordh', [], -2103), # T21.0.2 > T21.1.3c error: missing space after directive
('.word 2000', [4, 2000], 1000), # T21.0.2 > T21.1.3b > T21.4.1a success: capture a word
('.word -2147483648, 0b0010', [4, 2147483648, 0b0010], 1000), # T21.0.2 > T21.1.3b > T21.4.1b > T21.4.1a
('.word 020000000, -1, 0x1F00A', [4, 0o20000000, 4294967295, 0x1F00A], 1000), # three words
('.word r45', [4], -2106), # T21.0.2 > T21.1.3b > T21.4.2 unrecognizeable info
('.word 0b45', [4], -1002), # T21.0.2 > T21.1.3b > T21.4.1a + override unexpected binary digit
('.word 0x4X5', [4], -1005), # T21.0.2 > T21.1.3b > T21.4.1a + override unexpected hexa digit
('.word 0x400000000', [4], -1006), # T21.0.2 > T21.1.3b > T21.4.1a + override too long value (>2^32)
('.word 45,', [4, 45], -2102), # T21.0.2 > T21.1.3b > T21.4.1b > T21.4.0 error: missing data
('.word 2 , -0123 ', [4, 2, 4294967173], 1000), # T21.0.2 > T21.1.3b > T21.4.1b > T21.4.1a
('.word 4294967295', [4, 4294967295], 1000), # T21.0.2 > T21.1.3b > T21.4.1a success: maximum int
('.ascii', [], -2102), # T21.0.2 > T21.1.4a error: missing string
('.asciz', [], -2102), # T21.0.2 > T21.1.5a error: missing string
('.ascii ', [1], -2102), # T21.0.2 > T21.1.4b > T21.5.0 : missing string
('.asciz ', [1], -2102), # T21.0.2 > T21.1.5b > T21.6.0 : missing string
('.ascii5', [], -2103), # T21.0.2 > T21.1.4c error: missing space after directive
('.asciz8', [], -2103), # T21.0.2 > T21.1.5c error: missing space after directive
('.ascii \' \'', [1, 32], 1000), # T21.0.2 > T21.1.4b > T21.5.1a success: get one char
('.asciz \' \'', [1, 32, 0], 1000), # T21.0.2 > T21.1.5b > T21.6.1a success: get one char + '\0'
('.ascii \'a\', \'b\' ,\'c\' , \'d\' ', [1, 97, 98, 99, 100], 1000), # > T21.5.1b > T21.5.1a
('.asciz \'a\', \'b\' ,\'c\' , \'d\' ', [1, 97, 0, 98, 0, 99, 0, 100, 0], 1000), # > T21.6.1b > T21.6.1a
('.ascii "0123456789"', [1, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57], 1000), # T21.0.2 > T21.1.4b > T21.5.2a
('.asciz "abcdef"', [1, 97, 98, 99, 100, 101, 102, 0], 1000), # T21.0.2 > T21.1.5b > T21.6.2a
('.ascii \"b\", \"a\"', [1, 98, 97], 1000), # T21.0.2 > T21.1.4b > T21.5.2b > T21.5.2a
('.asciz \"a\", \"b\"', [1, 97, 0, 98, 0], 1000), # T21.0.2 > T21.1.5b > T21.6.2b > T21.6.2a
('.ascii \"b\", \'a\'', [1, 98, 97], 1000), # T21.0.2 > T21.1.4b > T21.5.2b > T21.5.1a
('.asciz \'a\', \"b\"', [1, 97, 0, 98, 0], 1000), # T21.0.2 > T21.1.5b > T21.6.1b > T21.6.2a
('.ascii \' ', [1], -1104), # T21.0.2 > T21.1.4b > T21.5.1a + override unclosed char
('.ascii \" ', [1], -1204), ('.asciz \' ', [1], -1104), # T21.0.2 > T21.1.5b > T21.6.1a + override unclosed char
('.asciz \" ', [1], -1204), # T21.0.2 > T21.1.5b > T21.6.2a + override unclosed string
('.ascii \'\'', [1], -1102), # T21.0.2 > T21.1.4b > T21.5.1a + override empty char
('.ascii \"\"', [1], -1202), # T21.0.2 > T21.1.4b > T21.5.2a + override empty string
('.asciz \'\'', [1], -1102), # T21.0.2 > T21.1.5b > T21.6.1a + override empty char
('.asciz \"\"', [1], -1202), # T21.0.2 > T21.1.5b > T21.6.2a + override empty string
('.ascii \' 0\'', [1], -1105), # T21.0.2 > T21.1.4b > T21.5.2a + override more than one character
('.asciz \' 0\'', [1], -1105), # T21.0.2 > T21.1.5b > T21.6.2a + override idem after .ascii
('.ascii \'a\', \"bc , \'d\"', [1, 97, 98, 99, 32, 44, 32, 39, 100], 1000), # > T21.5.1b > T21.5.2a
('.asciz \',\', \",,\"', [1, 44, 0, 44, 44, 0], 1000), # T21.0.2 > T21.1.5b > T21.6.1a success capture ','
('.ascii \'\t\'', [1], -1103), # T21.0.2 > T21.1.4b > T21.5.1c + override illegal character ''
('.asciz \'\t\'', [1], -1103), # T21.0.2 > T21.1.5b > T21.6.1c + override idem after .ascii
('.ascii \"\t\"', [1], -1203), # T21.0.2 > T21.1.4b > T21.5.2c + override illegal character ""
('.asciz \" \t\"', [1], -1203), # T21.0.2 > T21.1.5b > T21.6.2c + override idem after valid char
('.ascii \'"\'a', [1], -2105), ('.ascii \"\'a\"b', [1], -2105), # T21.0.2 > T21.1.4b > T21.5.2c unexpected separator
('.asciz \'"\'a', [1], -2105), # T21.0.2 > T21.1.5b > T21.6.1c unexpected separator
('.asciz \"\'a\"b', [1], -2105), # T21.0.2 > T21.1.5b > T21.6.2c unexpected separator
('.ascii \' a\'', [1], -1105), # T21.0.2 > T21.1.4b > T21.5.2a + override more than one character
('.asciz \' a\'', [1], -1105), # T21.0.2 > T21.1.5b > T21.6.2a + override idem after .ascii
('.ascii a\'', [1], -2106), # T21.0.2 > T21.1.4b > T21.5.3 non recognizable info
('.asciz a\'', [1], -2106), # T21.0.2 > T21.1.5b > T21.6.3 non recognizable info
(' .asciz \'a\'', [1, 97, 0], 1000) # T21.0.1 > T21.0.2 > T21.1.5b > T21.6.1a success with leading space
]
adr_test = [('', [], -2001), # T20.0.0 error: missing address
(' ', [], -2001), # T20.0.1 > T20.0.0 idem white leading space
('0x', [], -2002), # T20.0.2 > T20.1.0 error: '0x' but missing hex digits
('x0', [], -2001), # T20.0.3 error: missing address start
(' 0x8001', [], -2003), # T20.0.1 > T20.0.2 > T20.1.1a address but missing trailing space
('0xF3AE ', [0xF3AE], 1000), # T20.0.2 > T20.0.2 > T20.1.1b success address with trailing space
('0xR124', [], -2003), # T20.0.2 > T20.1.2 illegal address (first digit)
('0x51V4', [], -2003), # T20.0.2 > T20.1.1c illegal address (in-the-middle)
('0x514W', [], -2003), # T20.0.2 > T20.1.1c illegal address (last one)
('0xF0002E00 ', [0xF0002E00], 1000), # T20.0.2 > T20.1.1b big hex address: eight digits
('0x10002EF00 ', [], -2004) # T20.0.2 > T20.1.1b + override long hex address (> 2^32)
]
reg_test = [('', [], -1301), # T13.0.0 error: missing register
(' ', [], -1301), # T13.0.1 > T13.0.0 / idem with leading space
('1', [], -1302), # T13.0.4 error: unknown register identifier
('r', [], -1303), # T13.0.2 > T13.1.0 error: missing register number
('ra', [], -1304), # T13.0.2 > T13.1.2 error: wrong reg number
('r1a', [], -1304), # T13.0.2 > T13.1.2 error: wrong reg number
('r-1', [], -1304), # T13.0.2 > T13.1.1 + override : negative reg number
('r16', [], -1304), # T13.0.2 > T13.1.1 + override : too high reg number
('r12', [12], 1000), # T13.0.2 > T13.1.1 success: two digit reg number
('r0', [0], 1000), # T13.0.2 > T13.1.1 success: one digit reg number
('sp', [13], 1000), # T13.0.3 success: stack pointer
('lr', [14], 1000), # T13.0.3 success: link register
('pc', [15], 1000) # T13.0.3 success: program counter
]
rbt_test = [('', [], -1401), # T14.0.0 error: missing register
(' ', [], -1401), # T14.0.1 > T14.0.0 / idem with leading space
('1', [], -1302), # T14.0.2c + override unknown register identifier
('r', [], -1303), # T14.0.2a + override missing register number
('ra', [], -1304), # T14.0.2a + override wrong reg number
('r1a', [], -1304), # T14.0.2c + override wrong reg number
('r-1', [], -1303), # T14.0.2b + override negative reg number
('r16', [], -1304), # T14.0.2a + override too high reg number
('r0', [0x1], 1000), # T14.0.2a success: single register
('r15', [0x8000], 1000), # T14.0.2a : maximum single reg value
('r0-r5', [0x3F], 1000), # T14.0.2b > T14.1.1 success: reg range (min, max)
('r12-r2', [0x1FFC], 1000), # T14.0.2b > T14.1.1 : (max, min)
('lr-pc', [0xC000], 1000), # T14.0.2b > T14.1.1 : (symbolic)
('sp-r12', [0x3000], 1000), # T14.0.2b > T14.1.1 : (symbolic & numeric, two bits)
('sp-r13', [0x2000], 1000), # T14.0.2b > T14.1.1 : (symbolic & numeric, one bit)
('r4-', [0x10], -1403), # T14.0.2b > T14.1.0 error: missing second reg in range list
('r8-1', [0x100], -1302), # T14.0.2a > T14.1.1 + override wrong second reg
('r9-r16', [0x200], -1304) # T14.0.2a > T14.1.1 + override too high second reg number
]
rlt_test = [('', [], -1501), # T15.0.0 error: missing register list
(' ', [], -1501), # T15.0.1 > T15.0.0 : idem with leading space
('1', [], -1502), # T15.0.3 error: missing '{'
('{', [], -1503), # T15.0.2 > T15.1.0 error: missing registers
('{1', [], -1302), # T15.0.2 > T15.1.1a + override : unknown register identifier
('{r', [], -1303), # T15.0.2 > T15.1.1a + override : missing register number
('{ra', [], -1304), # T15.0.2 > T15.1.1a + override : wrong reg number
('{r1a', [], -1304), # T15.0.2 > T15.1.1a + override : wrong reg number
('{r-1', [], -1303), # T15.0.2 > T15.1.1a + override : negative reg number
('{r16', [], -1304), # T15.0.2 > T15.1.1a + override : too high reg number
('{r0', [], -1503), # T15.0.2 > T15.1.1a error: unclosed single register
('{r0}', [0x1], 1000), # T15.0.2 > T15.1.1c success: single register
('{r0-r5}', [0x3F], 1000), # T15.0.2 > T15.1.1c success: single range
('{r0-r5 }', [0x3F], 1000), # : idem with trailing space
('{r12-r2, lr', [0x1FFC], -1503), # > T15.1.1b > T15.1.1a error: missing '}' after list
('{r12 - r2, lr}', [0x5FFC], 1000), # > T15.1.1b > T15.1.1c success: range + single register
('{ pc, r1 -r2, sp- r12, r5}', [0xB026], 1000), # : several ranges, with spaces
('{r4-}', [], -1403), # > T15.1.1a + override : missing second reg in range list
('{r14, r8-1', [0x4000], -1302), # > T15.1.1a + override : wrong second reg
('{r9-r16, r13}', [], -1304), # > T15.1.1a + override : too high second reg number
('{r14,r8}', [0x4100], 1000), # success: no space after ','
('{ r9 , r13 }', [0x2200], 1000), # success: extra spaces
('{r14,}', [0x4000], -1504), # > T15.1.1b > T15.1.2 error: missing register after ','
('{r14, }', [0x4000], -1504), # > T15.1.1b > T15.1.2 : missing register after ', '
('{r9-r15, sp13}', [0xFE00], -1402) # > T15.1.1b + override : unrecognized register id
]
imo_test = [('', [], -1601), # T16.0.0 error: missing immediate value
(' ', [], -1601), # T16.0.1 > T16.0.0 idem with leading space
('2', [], -1602), # T16.0.3 error: missing '#'
('#', [], -1603), # T16.0.2 > T16.1.0 error: missing value after '#'
('# ', [], -1604), # T16.0.2 > T16.1.1 error: unexpected space after '#'
('#f', [], -1605), # T16.0.2 > T16.1.4 error: unrecognizable info after '#'
('#20', [20], 1000), # T16.0.2 > T16.1.2 > T16.2.0 success: simple byte value
('#\'f\'', [102], 1000), # T16.0.2 > T16.1.3 > T16.2.0 success: simple char value
('#-20', [], -1606), # T16.0.2 > T16.1.2 + override : impossible fixup for negative number
('#2000', [0xE7D], 1000), # T16.0.2 > T16.1.2 > T16.2.0 success fixup: in-the-middle bits
('#0xC0000034', [0x1D3], 1000), # T16.0.2 > T16.1.2 > T16.2.0 success fixup: split bits
('#0xFF000000', [0x4FF], 1000), # T16.0.2 > T16.1.2 > T16.2.0 success fixup: maximum rotation
('#0xFF0000FF', [], -1606), # T16.0.2 > T16.1.2 + override : impossible fixup for 16 bits
('#0x102', [], -1606), # T16.0.2 > T16.1.2 + override : impossible fixup for odd rotations
('#0x104', [0xF41], 1000), # T16.0.2 > T16.1.2 > T16.2.0 success fixup: odd immediate mask
('#0x108', [0xF42], 1000), # T16.0.2 > T16.1.2 > T16.2.0 : even immediate mask
('#45r', [], -1004), # T16.0.2 > T16.1.2 + override : unexpected decimal digit
('#\'e\' c', [101], -1607), # T16.0.2 > T16.1.3 > T16.2.1 error: unexpected text after imm val.
('#0b111111100000000000', [0xBFE], 1000), # T16.0.2 > T16.1.2 > T16.2.0 success fixup: binary
('#0b1002000', [], -1002), # T16.0.2 > T16.1.2 + override : invalid binary digit
('#012000000005', [0x255], 1000), # T16.0.2 > T16.1.2 > T16.2.0 success fixup: octal
('#012000900005', [], -1003), # T16.0.2 > T16.1.2 + override : invalid octal digit
('#45d', [], -1004), # T16.0.2 > T16.1.2 + override : invalid decimal digit
('#0x4X5', [], -1005), # T16.0.2 > T16.1.2 + override : invalid hexa digit
('#0x400000000', [], -1006), # T16.0.2 > T16.1.2 + override : too long value (>2^32)
('#0x08000002', [0x382], 1000), # T16.0.2 > T16.1.2 > T16.2.0 success fixup: MSB = 1 at IM
('#\'', [], -1605), # T16.0.2 > T16.1.4 error: unclosed char
('#\' ', [], -1104), # T16.0.2 > T16.1.3 + override : unclosed char
('#\'\'', [], -1102), # T16.0.2 > T16.1.3 + override : empty char
('#\' 0\'', [], -1105), # T16.0.2 > T16.1.3 + override : more than one character
('#\'\t\'', [], -1103), # T16.0.2 > T16.1.3 + override : illegal character ''
('#\"t\"', [], -1605), # T16.0.2 > T16.1.4 error: illegal character '"'
(' #\'a\'', [97], 1000) # T16.0.1 > T16.0.2 > T16.1.3 > T16.2.0 success with leading space
]
ims_test = [('', [], -1701), (' ', [], -1701), ('2', [], -1702), ('#', [], -1703), # T17.0.2 > T17.1.0 error: missing value after '#'
('# ', [], -1704), # T17.0.2 > T17.1.1 error: unexpected space after '#'
('#f', [], -1705), # T17.0.2 > T17.1.3 error: unrecognizable info after '#'
('#2', [2], 1000), # T17.0.2 > T17.1.2 > T17.2.0 success: valid number of shifts
('#-20', [], -1706), # T17.0.2 > T17.1.2 + override : negative number of shifts
('#040', [], -1706), # T17.0.2 > T17.1.2 + override : too high number of shifts
('#0x1C', [28], 1000), # T17.0.2 > T17.1.2 > T17.2.0 success: hexa number
('#0b10101', [21], 1000), # T17.0.2 > T17.1.2 > T17.2.0 success: binary number
('#0b10020', [], -1002), # T17.0.2 > T17.1.2 + override : invalid binary digit
('#019', [], -1003), # T17.0.2 > T17.1.2 + override : invalid octal digit
('#4d', [], -1004), # T17.0.2 > T17.1.2 + override : invalid decimal digit
('#0xX', [], -1005), # T17.0.2 > T17.1.2 + override : invalid hexa digit
(' #0x1F', [31], 1000) # T17.0.1 > T17.0.2 > T17.1.2 > T17.2.0 success with leading space
]
op2_test = [('', [], -2201), (' ', [], -2203), ('2', [], -2203), ('#', [], -1603), # T22.0.1 + override : missing value after '#'
('# ', [], -1604), # T22.0.1 + override : unexpected space after '#'
('#f', [], -1605), # T22.0.1 + override : unrecognizable info after '#'
('#20', [0x02000014], 1000), # T22.0.1 success: simple byte value
('#\'f\'', [0x02000066], 1000), # T22.0.1 success: simple char value
('#-20', [], -1606), # T22.0.1 + override : impossible fixup for negative number
('#0xC0000034', [0x020001D3], 1000), # T22.0.1 success fixup: split bits
('#0x102', [], -1606), # T22.0.1 + override : impossible fixup for odd rotations
('#\'e\' c', [], -1607), # T22.0.1 + override : unexpected text after imm val.
('#0b1002000', [], -1002), # T22.0.1 + override : invalid binary digit
('#012000900005', [], -1003), # T22.0.1 + override : invalid octal digit
('#45d', [], -1004), # T22.0.1 + override : invalid decimal digit
('#0x4X5', [], -1005), # T22.0.1 + override : invalid hexa digit
('#0x400000000', [], -1006), # T22.0.1 + override : too long value (2^32)
('#\'', [], -1605), # T22.0.1 + override : unclosed char
(' ('#\'\'', [], -1102), # T22.0.1 + override : empty char
('#\' 0\'', [], -1105), # T22.0.1 + override : more than one character
('#\'\t\'', [], -1103), # T22.0.1 + override : illegal character ''
('#\"t\"', [], -1605), # T22.0.1 + override : illegal character '"'
(' #\'a\'', [0x02000061], 1000), # T22.0.1 success with leading space
('r', [], -1303), # T22.0.2a + override : missing register number
('ra', [], -1304), # T22.0.2a + override : wrong reg number
('r1a', [], -1304), # T22.0.2a + override : wrong reg number
('r-1', [], -1304), # T22.0.2a + override : negative reg number
('r16', [], -1304), # T22.0.2a + override : too high reg number
('r12', [12], 1000), # T22.0.2a success: single reg
('r0 ', [0], 1000), # T22.0.2a success: single reg with trailing space
(' sp', [13], 1000), # T22.0.2a success: single reg with leading space
('r1,', [1], -2204), # T22.0.2b > T22.1.0 error: missing shift mode
('r2, ', [2], -2204), # T22.0.2b > T22.1.1 > T22.1.0 : idem with trailing space
('r3, lslx', [3], -2206), # T22.0.2b > T22.1.1 > T22.1.2c : missing space after shift mode
('r3, r0', [3], -2206), # T22.0.2b > T22.1.1 > T22.1.2c : missing space after shift mode
('r3, #0', [3], -2206), # T22.0.2b > T22.1.1 > T22.1.2c : missing space after shift mode
('r4, xl', [4], -2206), # T22.0.2b > T22.1.1 > T22.1.3 : unrecognized shift mode
('r5, lsl', [5], -2205), # T22.0.2b > T22.1.1 > T22.1.2a : missing space after shift mode
('r6, lsr ', [6], -2205), # > T22.1.2b > T22.2.0 : missing info after shift mode
('r7, asr x', [7], -2207), # > T22.1.2b > T22.2.3 : wrong info after shift mode
('r8, ror r', [8], -1303), # > T22.1.2b > T22.2.1 + override: missing register number
('r9, lsl ra', [9], -1304), # > T22.1.2b > T22.2.1 + override: wrong reg number
('r10, lsr r1a', [10], -1304), # > T22.1.2b > T22.2.1 + override: wrong reg number
('r11, asr r-1', [11], -1304), # > T22.1.2b > T22.2.1 + override: negative reg number
('r12, ror r16', [12], -1304), # > T22.1.2b > T22.2.1 + override: too high reg number
('r13, lsl r12', [0xC1D], 1000), # > T22.1.2b > T22.2.1 success: LSL reg
('sp, lsr r0 ', [0x3D], 1000), # > T22.1.2b > T22.2.1 : LSR reg with trailing space
('r1,asr lr', [0xE51], 1000), # > T22.1.2b > T22.2.1 : ASR reg no space after ','
('r8, ror #', [8], -1703), # > T22.1.2b > T22.2.2 + override: missing value after '#'
('r9, lsl # ', [9], -1704), # > T22.1.2b > T22.2.2 + override: unexpected space after '#'
('r10, lsr #f', [10], -1705), # > T22.1.2b > T22.2.2 + override: unrecognizable info after '#'
('r11, asr #2', [0x14B], 1000), # > T22.1.2b > T22.2.2 success: valid number of shifts
('r12, ror #-20', [12], -1706), # > T22.1.2b > T22.2.2 + override: negative number of shifts
('r13, lsl #040', [13], -1706), # > T22.1.2b > T22.2.2 + override: too high number of shifts
('pc, lsr #0x1C ', [0xE2F], 1000), # > T22.1.2b > T22.2.2 success LSR imm with trailing space
('r1,asr #0b10101', [0xAC1], 1000), # > T22.1.2b > T22.2.2 : ASR bin imm, no space after ','
('r8, ror #0b10020', [8], -1002), # > T22.1.2b > T22.2.2 + override: invalid binary digit
('r9, lsl #019', [9], -1003), # > T22.1.2b > T22.2.2 + override: invalid octal digit
('r10, lsr #4d', [10], -1004), # > T22.1.2b > T22.2.2 + override: invalid decimal digit
('r11, asr #0xX', [11], -1005), # > T22.1.2b > T22.2.2 + override: invalid hexa digit
(' r12 , ror #0x1F ', [0xFEC], 1000), # > T22.1.2b > T22.2.2 success with lead/trail spaces
('r13, lsl r12 a', [13], -1304), # > T22.1.2b > T22.2.1 + override: unexpected text after parse
('r12, ror #0x1F b', [12], -1005) # > T22.1.2b > T22.2.2 + override: idem for immediate parsing
]
opd_test = [('', [], -2301), # T23.0.0 error: missing operands
(' ', [], -2303), # T23.0.2 error: idem with leading space
('2', [], -1302), # T23.0.1a + override : unrecognizable register
('2,', [], -1302), # T23.0.1b + override : unrecognizable operand with ','
('r', [], -1303), # T23.0.1a + override : missing register number
('ra', [], -1304), # T23.0.1a + override : wrong reg number
('r16', [], -1304), # T23.0.1a + override : too high reg number
('r12', [], -2302), # T23.0.1a error: good dest reg, missing other ops
('r0 ', [], -2302), # T23.0.1a error: missing ',' after dest reg
('r1,', [0x1000], -2304), # T23.0.1b > T23.1.0 error: missing source operands
('r2, ', [0x2000], -2306), # T23.0.1b > T23.1.3 error: missing source operands
('r3, 3', [0x3000], -2306), # T23.0.1b > T23.1.3 error: wrong source op 1
('r4, ra', [0x4000], -1304), # T23.0.1b > T23.1.1 > T23.1.2a + override : wrong reg number
('r5, r1a', [0x5000], -1304), # T23.0.1b > T23.1.1 > T23.1.2a + override : wrong reg number
('r6, r-1', [0x6000], -1304), # T23.0.1b > T23.1.1 > T23.1.2a + override : negative reg number
('r7, r16', [0x7000], -1304), # T23.0.1b > T23.1.1 > T23.1.2a + override : too high reg number
('r8, r12', [0x8800C], 1000), # T23.0.1b > T23.1.1 success: two registers
('r9,r1 ', [0x99001], 1000), # T23.0.1b > T23.1.1 success: idem with no space after ','
(' sp , lr ', [0xDD00E], 1000), # T23.0.1b > T23.1.1 success: idem with extra spaces
('r10, r1,', [0x0A000], -2204), # T23.0.1b > T23.1.1 + override : missing shift register
('r11, r2, ', [0x0B000], -2204), # T23.0.1b > T23.1.1 + override : idem with space
('r12, r3, 3', [0x3C000], -2308), # T23.0.1b > T23.1.2b > T23.2.2 error: wrong op 2
('r13, r4, ra', [0x4D000], -1304), # T23.0.1b > T23.1.2b > T23.2.1 + override : wrong reg number
('r14, r5, r1a', [0x5E000], -1304), # T23.0.1b > T23.1.2b > T23.2.1 + override : wrong reg number
('r15, r6, r-1', [0x6F000], -1304), # T23.0.1b > T23.1.2b > T23.2.1 + override : negative reg number
('r0, r7, r16', [0x70000], -1304), # T23.0.1b > T23.1.2b > T23.2.1 + override : too high reg number
('r1, r8, r12', [0x8100C], 1000), # T23.0.1b > T23.1.2b > T23.2.1 success: three registers
('r2,r9,r1 ', [0x92001], 1000), # T23.0.1b > T23.1.2a : idem with no space after ','
('r3, #', [0x03000], -1603), # T23.0.1b > T23.1.1 + override : missing value after '#'
('r4, # ', [0x04000], -1604), # T23.0.1b > T23.1.1 + override : unexpected space after '#'
('r5, #f', [0x05000], -1605), # T23.0.1b > T23.1.1 + override : unrecognizable info after '#'
('r6, #20', [0x02066014], 1000), # T23.0.1b > T23.1.1 success: dest reg + immediate value
('r7, #\'f\'', [0x02077066], 1000), # T23.0.1b > T23.1.1 success: dest reg + immediate char
('r8, #-20', [0x08000], -1606), # T23.0.1b > T23.1.1 + override : impossible fixup for negative num.
('r9,#0xC0000034', [0x020991D3], 1000), # T23.0.1b > T23.1.1 success fixup: split bits
('r10, #0x102', [0x0A000], -1606), # T23.0.1b > T23.1.1 + override : impossible fixup for odd rotations
('r11, #\'e\' c', [0xB000], -1607), # T23.0.1b > T23.1.1 + override : unexpected text after imm val.
('r12, #0b1002000', [0x0C000], -1002), # T23.0.1b > T23.1.1 + override : invalid binary digit
('r13, #012000900005', [0x0D000], -1003), # > T23.1.1 + override : invalid octal digit
('r14, #45d', [0x0E000], -1004), # T23.0.1b > T23.1.1 + override : invalid decimal digit
('r15, #0x4X5', [0x0F000], -1005), # T23.0.1b > T23.1.1 + override : invalid hexa digit
('r0, #\'', [0x0], -1605), # T23.0.1b > T23.1.1 + override : unclosed char
('r1, #\' ', [0x01000], -1104), # T23.0.1b > T23.1.1 + override : unclosed char
('r2, #\'\'', [0x02000], -1102), # T23.0.1b > T23.1.1 + override : empty char
('r3, #\' 0\'', [0x03000], -1105), # T23.0.1b > T23.1.1 + override : more than one character
('r4, #\'\t\'', [0x04000], -1103), # T23.0.1b > T23.1.1 + override : illegal character ''
('r5, lslx', [0x05000], -2306), # T23.0.1b > T23.1.3 error: unrecognized source operand
('r5, r10, lslx', [0xA5000], -2308), # T23.0.1b > T23.1.2b > T23.2.2 error: wrong second operand
('r5, r10, r1', [0xA5001], 1000), # T23.0.1b > T23.1.2b > T23.2.1 success: three registers
('r5, r10, #2', [0x20A5002], 1000), # T23.0.1b > T23.1.2b > T23.2.1 success: two regs, one immediate
('r6, r1, xl', [0x16000], -2308), # T23.0.1b > T23.1.2b > T23.2.2 error: wrong second operand
('r7, r2, lsl', [0x07000], -2205), # T23.0.1b > T23.1.1 + override : missing space after shift mode
('r8, r3, lsr ', [0x08000], -2205), # T23.0.1b > T23.1.1 + override : missing info after shift mode
('r9, r4, asr x', [0x09000], -2207), # T23.0.1b > T23.1.1 + override : wrong info after shift mode
('r10, r5, ror r', [0x0A000], -1303), # T23.0.1b > T23.1.1 + override : missing register number
('r11, r6, lsl ra', [0x0B000], -1304), # T23.0.1b > T23.1.1 + override : wrong reg number
('r12, r7, ror r16', [0x0C000], -1304), # T23.0.1b > T23.1.1 + override : too high reg number
('r13, r8, lsl r12', [0xDDC18], 1000), # T23.0.1b > T23.1.1 success: LSL reg
('r14, sp, lsr r0 ', [0xEE03D], 1000), # T23.0.1b > T23.1.1 : LSR reg with trailing space
('r15, r1,asr lr', [0xFFE51], 1000), # T23.0.1b > T23.1.1 : ASR reg no space after ','
('r0, r8, ror #', [0], -1703), # T23.0.1b > T23.1.1 + override : missing value after '#'
('r1, r9, lsl # ', [0x01000], -1704), # T23.0.1b > T23.1.1 + override : unexpected space after '#'
('r2, r10, lsr #f', [0x02000], -1705), # T23.0.1b > T23.1.1 + override : unrecognizable info after '#'
('r3, r11, asr #2', [0x3314B], 1000), # T23.0.1b > T23.1.1 success: valid number of shifts
('r4, r12, ror #-20', [0x04000], -1706), # > T23.1.1 + override : negative number of shifts
('r5, r13, lsl #040', [0x05000], -1706), # > T23.1.1 + override : too high number of shifts
('r5, r13, lsl #00', [0x05500D], 1000), # > T23.1.1 success: true LSL #0
('r6, pc, lsr #0x1C ', [0x66E2F], 1000), # > T23.1.1 success LSR imm with trailing space
('r6, pc, lsr #0x0 ', [0x6600F], 1000), # > T23.1.1 converting LSR #0 into LSL #0
('r7,r1,asr #0b10101', [0x77AC1], 1000), # > T23.1.1 : ASR bin imm, no space after ','
('r7,r1,asr #0b0', [0x77001], 1000), # > T23.1.1 converting ASR #0 into LSL #0
('r8, r13, lsl r12 a', [0x08000], -1304), # > T23.1.1 + override : unexpected text after parse
('r9, r12, ror #0x1F b', [0x09000], -1005), # > T23.1.1 + override : idem for immediate parsing
('r9, r12, ror #0x1F', [0x99FEC], 1000), # > T23.1.1 success ROR with 31 shifts
('r9, r12, ror #0x0', [0x9906C], 1000), # > T23.1.1 coding ROR #0 as RRX
('r13, r7, r8, lsl r12 ', [0x7DC18], 1000), # > T23.1.2 > T23.2.1 success: three regs, last shift reg
('r14 , r8 , sp , lsr r10', [0x8EA3D], 1000), # > T23.1.2 > T23.2.1 : idem with trailing spaces
('r15,r9,r1,asr lr', [0x9FE51], 1000), # > T23.1.2 > T23.2.1 : idem with space after ','
('r13, r7, r8, lsl #12 ', [0x7D608], 1000), # > T23.1.2 > T23.2.1 success: three regs, last shift imm
('r14 , r8 , sp , lsr #10', [0x8E52D], 1000), # > T23.1.2 > T23.2.1 : idem with trailing spaces
('r15,r9,r1,asr #31', [0x9FFC1], 1000), # > T23.1.2 > T23.2.1 : idem with space after ','
('r15,r9,r1,asr r32', [0x9F000], -1304), # > T23.1.2 > T23.2.1 + override : wrong range reg number
('r15,r9,r1,asr #32', [0x9F000], -1706), # > T23.1.2 > T23.2.1 + override : invalid number of shifts
('r15,r9,r1,asr r', [0x9F000], -1303), # > T23.1.2 > T23.2.1 + override : missing reg number
('r15,r9,r1,asr ', [0x9F000], -2205) # > T23.1.2 > T23.2.1 + override : missing info after shift
]
idt_test = [('', [], -3101), # T31.0.0 error: missing data instruction
(' ', [], -3101), # T31.0.1 > T31.0.0 error: idem with leading space
('2', [], -3103), # T31.0.3 error: unrecognizable instruction
('and', [], -3102), # T31.0.2a error: missing operands after instr.
('eor ', [4, 0xE0200000], -3102), # T31.0.2b > T31.3.0 error: missing operands after instr.
('sub 2,', [4, 0xE0400000], -1302), # T31.0.2b > T31.3.1 + override : unrecognizable operand with ','
('rsb r', [4, 0xE0600000], -1303), # T31.0.2b > T31.3.1 + override : missing register number
('add r16', [4, 0xE0800000], -1304), # T31.0.2b > T31.3.1 + override : too high reg number
('adc r12', [4, 0xE0A00000], -2302), # T31.0.2b > T31.3.1 + override : good dest reg, missing other ops
('sbc ', [4, 0xE0C00000], -2303), # T31.0.2b > T31.3.1 + override : missing dest reg
('rsc r1,', [4, 0xE0E00000], -2304), # T31.0.2b > T31.3.1 + override : missing source operands
('orr r2, ', [4, 0xE1800000], -2306), # T31.0.2b > T31.3.1 + override : missing source operands
('bic r3, 3', [4, 0xE1C00000], -2306), # T31.0.2b > T31.3.1 + override : wrong source op 1
('and r12, r3, 3', [4, 0xE0000000], -2308), # > T31.3.1 + override : wrong op 2
('eor r3, #', [4, 0xE0200000], -1603), # > T31.3.1 + override : missing value after '#'
('sub r4, # ', [4, 0xE0400000], -1604), # > T31.3.1 + override : unexpected space after '#'
('rsb r5, #f', [4, 0xE0600000], -1605), # > T31.3.1 + override : unrecognizable info after '#'
('add r10, #0x102', [4, 0xE0800000], -1606), # > T31.3.1 + override : impossible fixup for odd rotations
('adc r11, #\'e\' c', [4, 0xE0A00000], -1607), # > T31.3.1 + override : unexpected text after imm val.
('sbc r10, r1,', [4, 0xE0C00000], -2204), # > T31.3.1 + override : missing shift register
('rsc r7, r2, lsl', [4, 0xE0E00000], -2205), # > T31.3.1 + override : missing space after shift mode
('orr r9, r4, asr x', [4, 0xE1800000], -2207), # > T31.3.1 + override : wrong info after shift mode
('bic r0, r8, ror #', [4, 0xE1C00000], -1703), # > T31.3.1 + override : missing value after '#'
('and r1, r9, lsl # ', [4, 0xE0000000], -1704), # > T31.3.1 + override : unexpected space after '#'
('eor r2, r10, lsr #f', [4, 0xE0200000], -1705), # > T31.3.1 + override : unrecognizable info after '#'
('sub r4, r12, ror #-20', [4, 0xE0400000], -1706), # > T31.3.1 + override : negative number of shifts
('rsb r12, #0b1002000', [4, 0xE0600000], -1002), # > T31.3.1 + override : invalid binary digit
('add r13, #012000900005', [4, 0xE0800000], -1003), # > T31.3.1 + override : invalid octal digit
('adc r14, #45d', [4, 0xE0A00000], -1004), # > T31.3.1 + override : invalid decimal digit
('sbc r15, #0x4X5', [4, 0xE0C00000], -1005), # > T31.3.1 + override : invalid hexa digit
('rsc r2, #\'\'', [4, 0xE0E00000], -1102), # > T31.3.1 + override : empty char
('orr r4, #\'\t\'', [4, 0xE1800000], -1103), # > T31.3.1 + override : illegal character ''
('bic r1, #\' ', [4, 0xE1C00000], -1104), # > T31.3.1 + override : unclosed char
('and r3, #\' 0\'', [4, 0xE0000000], -1105), # > T31.3.1 + override : more than one character
('eors', [4, 0xE0200000], -3102), # T31.0.2c > T31.1.2a error: data operands
('eoral', [4, 0xE0200000], -3102), # T31.0.2c > T31.1.1a error: data operands
('tsts', [4, 0xE1100000], -3102), # T31.0.2c > T31.1.2a : missing operands
('tsts ', [4, 0xE1100000], -3102), # T31.0.2c > T31.1.2b > T31.3.0 : missing operands
('teqst', [4, 0xE1300000], -3105), # T31.0.2c > T31.1.2c error: wrong text after instruction
('cmpxx', [4, 0xE1500000], -3104), # T31.0.2c > T31.1.3 error: unknown instruction condition
('cmneq', [4, 0xE1700000], -3102), # T31.0.2c > T31.1.1a error: missing ops after pred.inst.
('movne ', [4, 0x11A00000], -3102), # T31.0.2c > T31.1.1b > T31.3.0 : idem after space
('mvncss', [4, 0x21E00000], -3102), # T31.0.2c > T31.1.1c > T31.2.1a : idem after set flag
('mvncsx', [4, 0x21E00000], -3105), # T31.0.2c > T31.1.1c > T31.2.2 : wrong text after pred.inst
('mvncssx', [4, 0x21E00000], -3105), # T31.0.2c > T31.1.1c > T31.2.1c : wrong text after pred.inst + flag
('andhss', [4, 0x20000000], -3102), # T31.0.2c > T31.1.1c > T31.2.1a : missing operands after set flag
('andhss ', [4, 0x20100000], -3102), # T31.0.2c > T31.1.1c > T31.2.1b > T31.3.0 : after set flag + space
('eorccx', [4, 0x30200000], -3105), # T31.0.2c > T31.1.1c > T31.2.2 : wrong text after pred.inst
('sublosx', [4, 0x30400000], -3105), # T31.0.2c > T31.1.1c > T31.2.1c : wrong text after pred.inst + flag
('cmp', [], -3102), # T31.0.2a error: missing operands after instr.
('cmn ', [4, 0xE1700000], -3102), # T31.0.2b > T31.3.0 error: missing operands after instr.
('mov 2,', [4, 0xE1A00000], -1302), # T31.0.2b > T31.3.1 + override : unrecognizable operand with ','
('mvn r', [4, 0xE1E00000], -1303), # T31.0.2b > T31.3.1 + override : missing register number
('tst r16', [4, 0xE1100000], -1304), # T31.0.2b > T31.3.1 + override : too high reg number
('teq r12', [4, 0xE1300000], -2302), # T31.0.2b > T31.3.1 + override : good dest reg, missing other ops
('cmp ', [4, 0xE1500000], -2303), # T31.0.2b > T31.3.1 + override : missing source 1 reg
('cmn r1,', [4, 0xE1700000], -2304), # T31.0.2b > T31.3.1 + override : missing source operands
('mov r2, ', [4, 0xE1A00000], -2306), # T31.0.2b > T31.3.1 + override : missing source operands
('mvn r3, 3', [4, 0xE1E00000], -2306), # T31.0.2b > T31.3.1 + override : wrong source op 1
('tst r3, #', [4, 0xE1100000], -1603), # > T31.3.1 + override : missing value after '#'
('teq r4, # ', [4, 0xE1300000], -1604), # > T31.3.1 + override : unexpected space after '#'
('cmp r5, #f', [4, 0xE1500000], -1605), # > T31.3.1 + override : unrecognizable info after '#'
('mov r10, #0x102', [4, 0xE1A00000], -1606), # > T31.3.1 + override : impossible fixup for odd rotations
('mvn r11, #\'e\' c', [4, 0xE1E00000], -1607), # > T31.3.1 + override : unexpected text after imm val.
('tst r7, r2, lsl', [4, 0xE1100000], -2205), # > T31.3.1 + override : missing space after shift mode
('teq r9, r4, asr x', [4, 0xE1300000], -2207), # > T31.3.1 + override : wrong info after shift mode
('cmp r0, r8, ror #', [4, 0xE1500000], -1703), # > T31.3.1 + override : missing value after '#'
('cmn r1, r9, lsl # ', [4, 0xE1700000], -1704), # > T31.3.1 + override : unexpected space after '#'
('mov r2, r10, lsr #f', [4, 0xE1A00000], -1705), # > T31.3.1 + override : unrecognizable info after '#'
('mvn r4, r12, ror #-20', [4, 0xE1E00000], -1706), # > T31.3.1 + override : negative number of shifts
('tst r12, #0b1002000', [4, 0xE1100000], -1002), # > T31.3.1 + override : invalid binary digit
('teq r13, #012000900005', [4, 0xE1300000], -1003), # > T31.3.1 + override : invalid octal digit
('cmp r14, #45d', [4, 0xE1500000], -1004), # > T31.3.1 + override : invalid decimal digit
('cmn r15, #0x4X5', [4, 0xE1700000], -1005), # > T31.3.1 + override : invalid hexa digit
('mov r2, #\'\'', [4, 0xE1A00000], -1102), # > T31.3.1 + override : empty char
('mvn r4, #\'\t\'', [4, 0xE1E00000], -1103), # > T31.3.1 + override : illegal character ''
('tst r1, #\' ', [4, 0xE1100000], -1104), # > T31.3.1 + override : unclosed char
('teq r3, #\' 0\'', [4, 0xE1300000], -1105), # > T31.3.1 + override : more than one character
('eorsx', [4, 0xE0200000], -3105), # T31.0.2c > T31.1.2c error: wrong text after 's'
('eorx', [4, 0xE0200000], -3104), # T31.0.2c > T31.1.3 error: wrong text after inst.
('rsb r5, r10, #2', [4, 0xE26A5002], 1000), # T31.0.2b > T31.3.1 success: two regs, one immediate
('add r13, r8, lsl r12', [4, 0xE08DDC18], 1000), # T31.0.2b > T31.3.1 : LSL reg
('adc r14, sp, lsr r0 ', [4, 0xE0AEE03D], 1000), # T31.0.2b > T31.3.1 : LSR reg with trailing space
('sbc r15, r1,asr lr', [4, 0xE0CFFE51], 1000), # T31.0.2b > T31.3.1 : ASR reg no space after ','
('rsc r6, pc, lsr #0x1C ', [4, 0xE0E66E2F], 1000), # T31.0.2b > T31.3.1 : LSR imm with trailing space
('rsc r6, pc, lsr #0x0 ', [4, 0xE0E6600F], 1000), # : LSR #0 -> LSL #0
('orrs r7,r1,asr #0b10101', [4, 0xE1977AC1], 1000), # > T31.1.2b > T31.3.1:ASR bin imm, no space after ','
('orrs r7,r1,asr #0b0', [4, 0xE1977001], 1000), # : ASR #0 -> LSL #0
('bicmi r13, r7, r8, lsl r12 ', [4, 0x41C7DC18], 1000), # > T31.1.1b > T31.3.1 : three regs, shift reg
('andpls r14 , r8 , sp , lsr r10', [4, 0x5018EA3D], 1000), # > T31.1.1c > T31.2.1b > T31.3.1 : cond. + 's'
('eorvss r15,r9,#\'f\'', [4, 0x6239F066], 1000), # > T31.1.1c > T31.2.1b > T31.3.1 : cond.+'s'+ imm.
('subvc r9,#0xC0000034', [4, 0x724991D3], 1000), # T31.0.2c > T31.1.1b > T31.3.1 : one reg + one imm.
('rsbhis r8 , sp , lsr #10', [4, 0x8078852D], 1000), # > T31.1.1c > T31.2.1b > T31.3.1: reg + shifted reg
('addls r9,r1,asr r15', [4, 0x90899F51], 1000), # > T31.1.1b > T31.3.1 : idem with no 's'
('tst r7,r1, #0b10101', [4, 0xE1100000], -2310), # T31.0.2b > T31.3.1 + override : 3 ops with 'tst'
('teq r13,r7,r8,lsl r12', [4, 0xE1300000], -2310), # T31.0.2b > T31.3.1 + override : 3 ops with 'teq'
('cmppl r14,r8,sp,lsr r10', [4, 0x51500000], -2310), # T31.0.2b > T31.3.1 + override : 3 ops with 'cmp'
('cmnvss r15,r9,#\'f\'', [4, 0x61700000], -2310), # T31.0.2b > T31.3.1 + override : 3 ops with 'cmn'
('movvc r1,r9, #0xC000', [4, 0x71A00000], -2311), # T31.0.2b > T31.3.1 + override : 3 ops with 'mov'
('mvnhis r8, lr, sp, lsr pc', [4, 0x81F00000], -2311), # > T31.3.1 + override : 3 os with 'mvn'
('tst r7, #0b10101', [4, 0xE3170015], 1000), # T31.0.2b > T31.3.1 : 'tst' + reg + imm
('teqlss r7,r8,lsl r12', [4, 0x91370C18], 1000), # > T31.1.1c > T31.2.1b > T31.3.1: 'teq'+reg+shifted reg
('cmpge r14, r8', [4, 0xA15E0008], 1000), # > T31.1.1c > T31.3.1 : 'cmp' + reg + reg
('cmnlt r15, #\'f\'', [4, 0xB37F0066], 1000), # > T31.1.1c > T31.3.1 : 'cmn' + reg + char
('movgts r1, #0xC000', [4, 0xC3B01903], 1000), # > T31.1.1c > T31.2.1b > T31.3.1: 'mov' + reg + imm
('mvnle lr, sp, lsr #15', [4, 0xD1E0E7AD], 1000), # > T31.1.1c > T31.3.1 : 'mvn'+reg+shifted reg
('mov r2, #-1', [4, 0xE3E02000], 1000), # T31.0.2b > T31.3.1 : 'mov' + reg + NOT imm
('mvn r3, #0xFFF00FFF', [4, 0xE3A03AFF], 1000), # T31.0.2b > T31.3.1 : 'mvn' + reg + NOT imm
('and r4, #-200', [4, 0xE3C440C7], 1000), # T31.0.2b > T31.3.1 : 'and' + reg + NOT imm
('bic r5, #0xFFC03FFF', [4, 0xE20559FF], 1000), # T31.0.2b > T31.3.1 : 'bic' + reg + NOT imm
('add r6, #-300', [4, 0xE2466F4B], 1000), # T31.0.2b > T31.3.1 : 'add' + reg + NOT imm
('sub r7, #0xFF100000', [4, 0xE287760F], 1000), # T31.0.2b > T31.3.1 : 'mvn' + reg + NOT imm
('cmp r8, #-1000', [4, 0xE3780FFA], 1000), # T31.0.2b > T31.3.1 : 'cmp' + reg + NOT imm
('cmn r9, #0xFFC04000', [4, 0xE35909FF], 1000) # T31.0.2b > T31.3.1 : 'cmn' + reg + NOT imm
]
iml_test = [('', [], -3201), # T32.0.0 error: missing multiplication instr.
(' ', [], -3201), # T32.0.1 > T32.0.0 error: idem with leading space
('2', [], -3203), # T32.0.3 error: unrecognizable instruction
('mul', [], -3202), # T32.0.2a error: missing operands after instr.
('mla ', [4, 0xE0200090], -3202), # T32.0.2b > T32.3.0 error: missing operands after instr.
('umull 2,', [4, 0xE0800090], -1302), # T32.0.2b > T32.3.1b + override : unrecognizable operand with ','
('smull r', [4, 0xE0C00090], -1303), # T32.0.2b > T32.3.1b + override : missing register number
('umlal r16', [4, 0xE0A00090], -1304), # T32.0.2b > T32.3.1b + override : too high reg number
('smlal r12', [4, 0xE0E00090], -3202), # T32.0.2b > T32.3.1a error: good dest reg, missing other ops
('mul ', [4, 0xE0000090], -1301), # T32.0.2b > T32.3.1a + override : missing reg1
('mla r1,', [4, 0xE0210090], -3202), # T32.0.2b > T32.3.1b > T32.4.0 : missing source operands
('umull r2, ', [4, 0xE0802090], -1301), # > T32.4.1b + override : missing reg2
('smull r3, gu', [4, 0xE0C03090], -1302), # > T32.4.1b + override : wrong op 2
('umlal r12, r3, e3', [4, 0xE0A3C090], -1302), # > T32.5.1b + override : wrong op 3
('smlal r3, r4, r5, ', [4, 0xE0E43095], -1301), # > T32.6.1 + override : missing reg4
('mul r3, r4, r5, r6', [4, 0xE0030594], -3207), # > T32.6.1 + override : four regs with 'mul'
('mla r3, r4, r5', [4, 0xE0230594], -3202), # > T32.6.1 + override : three regs with 'mla'
('mul r3, r4, r5', [4, 0xE0030594], 1000), # > T32.5.1a success: three regs with 'mul'
('mla r3, r4, r5, r6', [4, 0xE0236594], 1000), # > T32.6.1 success: four regs with 'mla'
('umull r10, r11, r12, r13', [4, 0xE08BAD9C], 1000), # > T32.6.1 : four regs with 'umull'
('umlal r1, r11, r2, r3', [4, 0xE0AB1392], 1000), # > T32.6.1 : four regs with 'umlal'
('smull r10, r11, lr, r10', [4, 0xE0CBAA9E], 1000), # > T32.6.1 : four regs with 'smull'
('smlal sp, lr, r0, r7', [4, 0xE0EED790], 1000), # > T32.6.1 : four regs with 'smlal'
('mul pc, r0, r7', [4, 0xE0000090], -3208), # > T32.5.1a + override : use of PC as Rd
('mul r0, pc, r8', [4, 0xE0000090], -3208), # > T32.5.1a + override : use of PC as Rm
('mla r0, r7, pc', [4, 0xE0200097], -3208), # > T32.5.1a + override : use of PC as Rs
('umlal r10, pc, r6, r9', [4, 0xE0A0A090], -3208), # + override : use of PC as RdHi
('smlal pc, r9, r8, r7', [4, 0xE0E00090], -3208), # + override : use of PC as RdLo
('mul r3, r3, r5', [4, 0xE0030593], 1000), # + warning : Rd should be different from Rm
('mla r5, r5, r5, r1', [4, 0xE0251595], 1000), # + warning : Rd should be different from Rm
('mla r3, r4, r3, r4', [4, 0xE0234394], 1000), # success : should work
('mla r3, r4, r3, r3', [4, 0xE0233394], 1000), # success : should work
('umull r6, r7, r7, r6', [4, 0xE0876697], 1000), # + warning : RdHi, RdLo and Rm must all be dif
('smull r9, r10, r9,r9', [4, 0xE0CA9999], 1000), # + warning : RdHi, RdLo and Rm must all be dif
('umlal r6, r6, r7, r6', [4, 0xE0A66697], 1000), # + warning : RdHi and RdLo must be different
('smlal r8, r9, r10,r8', [4, 0xE0E9889A], 1000), # success : should work
('muleq', [4, 0xE0000090], -3202), # T32.0.2c > T32.1.1a error : cond & missing ops
('muls', [4, 0xE0000090], -3202), # T32.0.2c > T32.1.2a error : 's'' & missing ops
('mulz', [4, 0xE0000090], -3204), # T32.0.2c > T32.1.3 error : wrong text after
('muleqs', [4, 0x00000090], -3202), # > T32.1.1c > T32.2.1a error : missing ops
('muleqsz', [4, 0x00000090], -3205), # > T32.1.2b > T32.2.1c error : missing ops
('smull r3, r4', [4, 0xE0C03090], -3202), # > T32.4.1a error : missing ops
('smull r3, r4,', [4, 0xE0C43090], -3202), # > T32.5.0 error : missing ops
('smull r3, r4, r5', [4, 0xE0C43095], -3202), # > T32.5.1a error : missing ops
('smull r3, r4, r5,', [4, 0xE0C43095], -3202), # > T32.6.0 error : missing ops
('muleq r3, r4, r5', [4, 0x00030594], 1000), # T32.0.2c > T32.1.1b > success : 'mul' + cond
('mlanes r3, r4, r5, r6', [4, 0x10336594], 1000), # > T32.1.1c > T32.2.1b > : 'mla' + cond + 's'
('umulls r10, r11, r12, r13', [4, 0xE09BAD9C], 1000), # T32.0.2c > T32.1.2b > : 'umull' + 's'
('umlalle r1, r11, r2, r3', [4, 0xD0AB1392], 1000), # T32.0.2c > T32.1.1b > : 'umlal' + cond
('smulllex r10, r11, lr, r10', [4, 0xD0C00090], -3205), # T32.0.2c > T32.1.1c > T32.2.2 : error after cond
('smlalsy sp, lr, r0, r7', [4, 0xE0E00090], -3205) # T32.0.2c > T32.1.2c : error after 's'
]
ibr_test = [('', [], -3301), # T33.0.0 error: missing branch instr.
(' ', [], -3301), # T33.0.1 > T33.0.0 error: idem with leading space
('2', [], -3303), # T33.0.5 error: unrecognizable instruction
('blo', [], -3302), # T33.0.2a error: missing offset after instr.
('bleq ', [4, 0x0B000000], -3302), # T33.0.2b > T33.3.0 : missing offset after instr.
('blox', [4], -3303), # T33.0.2c error: unexpected text after instr.
('bx', [], -3304), # T33.0.3a error: missing reg after instr.
('blx ', [4, 0xE12FFF30], -3304), # T33.0.3b > T33.4.0 error: missing reg after instr.
('blxo', [4, 0xE12FFF30], -3303), # T33.0.3c > T33.2.2 error: unexpected text after instr.
('b', [], -3302), # T33.0.4a error: missing offset after instr.
('bl ', [4, 0xEB000000], -3302), # T33.0.4b > T33.3.0 error: missing offset after instr.
('bly', [4, 0xEB000000], -3303), # T33.0.4c > T33.1.2 error: unexpected text after instr.
('beq', [4, 0xEA000000], -3302), # T33.0.4c > T33.1.1a error: missing offset after instr.
('blne ', [4, 0x1B000000], -3302), # T33.0.4c > T33.1.1b > T 33.3.0 : missing offset after instr.
('blnex', [4, 0x1B000000], -3303), # T33.0.4c > T33.1.1c : unexpected text after instr.
('bxeq', [4, 0xE12FFF10], -3302), # T33.0.3c > T33.2.1a error: missing offset after instr.
('blxeq ', [4, 0x012FFF30], -3304), # T33.0.3c > T33.2.1b > T 33.4.0 : missing offset after instr.
('blxeqx', [4, 0x012FFF30], -3303), # T33.0.3c > T33.2.1c : unexpected text after instr.
('blt f', [4, 0xBA000000], -3305), # T33.0.2b > T33.3.2 error: wrong offset
('bls 0b12', [4, 0x9A000000], -1002), # T33.0.2b > T33.3.1 + override : unexpected binary digit
('blls 0192', [4, 0x9B000000], -1003), # > T33.1.1b > T33.3.1 + override: unexpected octal digit
('bllo -192a', [4, 0x3B000000], -1004), # > T33.1.1b > T33.3.1 + override: unexpected decimal digit
('blvc 0xA3G0', [4, 0x7B000000], -1005), # > T33.1.1b > T33.3.1 + override: unexpected hexa digit
('bvc 0xA30000000', [4, 0x7A000000], -1006), # > T33.3.1 + override: too long hex address
('bxvc 0xA300', [4, 0x712FFF10], -1302), # > T33.2.1b > T33.4.1 + override: unrecognized reg
('blxcc r', [4, 0x312FFF30], -1303), # > T33.2.1b > T33.4.1 + override: missing reg number
('bxcc rf', [4, 0x312FFF10], -1304), # > T33.2.1b > T33.4.1 + override: wrong reg number
('bxmi r16', [4, 0x412FFF10], -1304), # > T33.2.1b > T33.4.1 + override: wrong reg number
('bx r6', [4, 0xE12FFF16], 1000), # T33.0.3b > T33.4.1 success: 'bx' jump
('blxpl r6', [4, 0x512FFF36], 1000), # > T33.2.1b > T33.4.1 success: 'blx' jump
('blxlt r15', [4, 0xB12FFF3F], 1000), # > T33.2.1b > T33.4.1 warning: use of pc (r15)
('b 0xA300', [4, 0xEA0028C0], 1000), # T33.0.4b > T33.3.1 success: 'b' jump
('bl -1300', [4, 0xEBFFFEBB], 1000), # T33.0.4b > T33.3.1 success: 'bl' negative jump
('blt 073000000', [4, 0xBA3B0000], 1000), # > T33.3.1 success: 'blt' octal jump
('bleq 0x730000', [4, 0x0B1CC000], 1000), # > T33.3.1 success: 'bleq' hexa jump
('bhi 0xA30000', [4, 0x8A28C000], 1000), # > T33.3.1 success: 'b' jump
('blgt 0x1302', [4, 0xCB000000], -3307), # > T33.3.1 + override : misaligned address
('bllt 0x73000000', [4, 0xBB000000], -3308), # > T33.3.1 + override : out of range offset
('blal -73000000', [4, 0xEB000000], -3308), # > T33.3.1 + override : out of range negative offset
('bal -7300001', [4, 0xEA000000], -3307) # > T33.3.1 + override : misaligned negative address
]
am2_test = [('', [], -2401), # T24.0.0 error: missing addressing mode
(' ', [], -2401), # T24.0.1 > T24.0.0 error: idem with leading space
('2', [], -2402), # T24.0.3 error: missing '['
('[', [], -2403), # T24.0.2 > T24.1.0 error: missing info after '['
('[2', [], -2403), # T24.0.2 > T24.1.2 : unrecognizable register
('[r', [], -1303), # T24.0.2 > T24.1.1a + override : missing register number
('[ra', [], -1304), # T24.0.2 > T24.1.1a + override : wrong reg number
('[r16', [], -1304), # T24.0.2 > T24.1.1a + override : too high reg number
('[r12', [], -2404), # T24.0.2 > T24.1.1a error: good base reg, missing closure
('[r0 ', [], -2404), # T24.0.2 > T24.1.1a error: missing ',' after base reg
('[r1,', [0x01810000], -2405), # T24.0.2 > T24.1.1b > T24.2.0 : missing displacement
('[r2]!', [0x01820000], -2410), # T24.0.2 > T24.1.1c > T24.7.2 : unexpected text after ']'
('[r3, 3', [0x01830000], -2406), # > T24.1.1b > T24.2.1 > T24.2.6 : wrong displacement
('[r4, ra', [0x01840000], -1304), # > T24.2.1 > T24.2.5a + override: wrong reg number
('[r5, r1a', [0x01850000], -1304), # > T24.2.1 > T24.2.5a + override: wrong reg number
('[r6, +r1', [0x01860000], -2404), # > T24.2.1 > T24.2.2 > T24.3.1a : check positive reg displ.
('[r7, -r6', [0x01070000], -2404), # > T24.2.1 > T24.2.3 > T24.3.1a : check negative reg displ.
('[r8, -', [0x01080000], -2405), # > T24.2.3 > T24.3.0 : EOSeq after '-'
('[r8, -3.2', [0x01080000], -2406), # > T24.2.3 > T24.3.2 : wrong reg after '-'
('[r5, r10, ', [0x0385000A], -2407), # > T24.2.5b > T24.5.1 > T24.5.0 : missing shift mode
('[r7, r2, lsl', [0x03870002], -2408), # > T24.2.5b > T24.5.1 > T24.5.2a: missing space after shift
('[r8, r3, lsr ', [0x03880003], -2408), # > T24.5.2b > T24.6.0 : missing info after shift mode
('[r10, r5, ror r', [0x038A0005], -1702), # > T24.5.2b > T24.6.2 : idem
('[r1, r9, lsl # ', [0x03810009], -1704), # > T24.5.2b > T24.6.1a + override : unexpected space after '#'
('[r3, r11, asr #2', [0x0383000B], -2404), # > T24.5.2b > T24.6.1a : valid scaled reg, missing ']'
('[r8, #', [0x01880000], -2405), # > T24.2.1 > T24.2.4 > T24.4.0 : missing displacement
('[r4, # ', [0x01840000], -2406), # > T24.2.1 > T24.2.4 > T24.4.2 : unexpected space after '#'
('[r5, #\'f\'', [0x01850000], -2406), # > T24.2.1 > T24.2.4 > T24.4.2 : unrecognizable info after '#'
('[r6, #20', [0x01860000], -2404), # > T24.2.1 > T24.2.4 > T24.4.1a : base + imm. displ., missing ']'
('[r8, #-20', [0x01880000], -2404), # > T24.2.1 > T24.2.4 > T24.4.1a : idem for negative imm. displ.
('[r9,#0xC0000034]', [0x1890000], -2411), # > T24.4.1b + override : too long immediate displacement
('[r12, #0b1002000]', [0x018C0000], -1002), # + override : invalid binary digit
('[r13, #012000900005]', [0x018D0000], -1003), # + override : invalid octal digit
('[r14, #45d]', [0x018E0000], -1004), # + override : invalid decimal digit
('[r15, #0x4X5]', [0x018F0000], -1005), # + override : invalid hexa digit
('[ r6, #+0]', [0x01860000], 1000), # > T24.2.4 > T24.4.1b > T24.7.0 : success base + imm. displ.
('[r6, #20]', [0x01860014], 1000), # > T24.2.4 > T24.4.1b > T24.7.0 : success base + imm. displ.
('[r7, #+4095]', [0x01870FFF], 1000), # > T24.2.4 > T24.4.1b > T24.7.0 : maximum positive imm. displ.
('[r8, #-20]', [0x01080014], 1000), # > T24.2.4 > T24.4.1b > T24.7.0 : base + negative imm. displ.
('[r9, #-4095]', [0x01090FFF], 1000), # > T24.2.4 > T24.4.1b > T24.7.0 : minimum negative imm. displ.
('[r10]', [0x018A0000], 1000), # T24.0.2 > T24.1.1c > T24.7.0 : success base only
('[sp ]', [0x018D0000], 1000), # T24.0.2 > T24.1.1c > T24.7.0 : idem with trailing space
('[r9,r1]', [0x03890001], 1000), # > T24.1.1b > T24.2.5c > T24.7.0: success base + reg. displacement
('[ sp , lr ]', [0x038D000E], 1000), # > T24.1.1b > T24.2.5c > T24.7.0: idem with extra spaces
('[r1, +r6]', [0x03810006], 1000), # > T24.2.2 > T24.3.1c > T24.7.0 : check positive reg displ.
('[r6, -r7]', [0x03060007], 1000), # > T24.2.3 > T24.3.1c > T24.7.0 : check negative reg displ.
('[r5, r15]', [0x01850000], -2412), # > T24.2.5b + override : PC not allowed as Rm
('[r5, r10, ]', [0x0385000A], -2409), # > T24.2.5b > T24.5.1 > T24.5.3 : missing shift mode
('[r5, r10, lslx]', [0x0385000A], -2409), # > T24.2.5b > T24.5.1 > T24.5.3 : wrong shift mode
('[r7, +r2, lsl]', [0x03870002], -2409), # > T24.3.1b > T24.5.1 > T24.5.2c : missing space after shift
('[r8, -r3, lsr ]', [0x03080003], -2409), # > T24.3.1b > T24.6.2 : missing info after shift mode
('[r9, r4, asr x]', [0x03890004], -1702), # > T24.5.2b > T24.6.2 : wrong info after shift mode
('[r0, r8, ror #]', [0x03800008], -1703), # > T24.5.2b > T24.6.1a + override : missing value after '#'
('[r2, r10, lsr #f]', [0x0382000A], -1705), # > T24.5.2b > T24.6.1a + override : unrecogn. info after '#'
('[r4, r12, ror #-20]', [0x0384000C], -1706), # > T24.6.1b + override : negative number of shifts
('[r5, r13, lsl #040]', [0x0385000D], -1706), # > T24.6.1b + override : too high number of shifts
('[r5, r13, lsl #0]', [0x0385000D], 1000), # > T24.6.1b > T24.7.0 : true LSL #0
('[r6, lr, lsr #0x1C] ', [0x03860E2E], 1000), # > T24.6.1b > T24.7.1> T24.7.0: success with trailing space
('[r5, r13, lsl #00]', [0x0385000D], 1000), # > T24.6.1b > T24.7.0 : true LSL #0
('[r6, sp, lsr #0x0 ]', [0x0386000D], 1000), # > T24.6.1b > T24.7.0 : converting LSR #0 into LSL #0
('[r7,-r1,asr #0b10101]', [0x03070AC1], 1000), # : ASR bin imm, no space after ','
('[r7,+r1,asr #0b0]', [0x03870001], 1000), # : converting ASR #0 into LSL #0
('[r9, r12, ror #0x1F]', [0x03890FEC], 1000), # : success ROR with 31 shifts
('[r9, r12, ror #0x0]', [0x0389006C], 1000) # : coding ROR #0 as RRX
]
am3_test = [('', [], -2501), # T25.0.0 error: missing addressing mode
(' ', [], -2501), # T25.0.1 > T25.0.0 error: idem with leading space
('2', [], -2502), # T25.0.3 error: missing '['
('[', [], -2503), # T25.0.2 > T25.1.0 error: missing info after '['
('[2', [], -2503), # T25.0.2 > T25.1.2 : unrecognizable register
('[r', [], -1303), # T25.0.2 > T25.1.1a + override : missing register number
('[ra', [], -1304), # T25.0.2 > T25.1.1a + override : wrong reg number
('[r16', [], -1304), # T25.0.2 > T25.1.1a + override : too high reg number
('[r12', [], -2504), # T25.0.2 > T25.1.1a error: good base reg, missing closure
('[r0+', [], -1304), # T25.0.2 > T25.1.1a + override : missing ',' after base reg
('[r1,', [0x01C10000], -2505), # T25.0.2 > T25.1.1b > T25.2.0 : missing displacement
('[r2]!', [0x01C20000], -2510), # T25.0.2 > T25.1.1c > T25.7.2 : unexpected text after ']'
('[r3, 3', [0x01C30000], -2506), # > T25.1.1b > T25.2.1 > T25.2.6 : wrong displacement
('[r4, ra', [0x01C40000], -1304), # > T25.2.1 > T25.2.5a + override: wrong reg number
('[r5, r1a', [0x01C50000], -1304), # > T25.2.1 > T25.2.5a + override: wrong reg number
('[r6, +r1', [0x01C60000], -2504), # > T25.2.1 > T25.2.2 > T25.3.1a : check positive reg displ.
('[r7, -r6', [0x01470000], -2504), # > T25.2.1 > T25.2.3 > T25.3.1a : check negative reg displ.
('[r8, -', [0x01480000], -2505), # > T25.2.3 > T25.3.0 : EOSeq after '-'
('[r8, -3.2', [0x01480000], -2506), # > T25.2.3 > T25.3.2 : wrong reg after '-'
('[r5, r10, ', [0x01C50000], -2513), # > T25.2.5b : scaled reg. displ. not allowed
('[r7, r2, lsl', [0x01C70000], -2513), # > T24.2.5b : idem
('[r8, #', [0x01C80000], -2505), # > T25.2.1 > T25.2.4 > T25.4.0 : missing displacement
('[r4, # ', [0x01C40000], -2506), # > T25.2.1 > T25.2.4 > T25.4.2 : unexpected space after '#'
('[r5, #\'f\'', [0x01C50000], -2506), # > T25.2.1 > T25.2.4 > T25.4.2 : unrecognizable info after '#'
('[r6, #20', [0x01C60000], -2504), # > T25.2.1 > T25.2.4 > T25.4.1a : base + imm. displ., missing ']'
('[r9, #0x134]', [0x1C90000], -2511), # > T25.4.1b + override : too long immediate displacement
('[r12, #0b0001103]', [0x01CC0000], -1002), # + override : invalid binary digit
('[r13, #012009005]', [0x01CD0000], -1003), # + override : invalid octal digit
('[r14, #4+5]', [0x01CE0000], -1004), # + override : invalid decimal digit
('[r15, #0xX45]', [0x01CF0000], -1005), # + override : invalid hexa digit
('[ r6, #+0]', [0x01C60000], 1000), # > T25.2.4 > T25.4.1b > T25.7.0 : success base + imm. displ.
('[r6 ,#195]', [0x01C60C03], 1000), # > T25.2.4 > T25.4.1b > T25.7.0 : success base + imm. displ.
(' [r7, #+255]', [0x01C70F0F], 1000), # > T25.2.4 > T25.4.1b > T25.7.0 : maximum positive imm. displ.
('[r8, # -80]', [0x01480500], 1000), # > T25.2.4 > T25.4.1b > T25.7.0 : base + negative imm. displ.
('[r9, #-255 ]', [0x01490F0F], 1000), # > T25.2.4 > T25.4.1b > T25.7.0 : minimum negative imm. displ.
('[r9,# - 25]', [0x01490109], 1000), # > T25.2.4 > T25.4.1b > T25.7.0 : negative with white spaces
('[r9, # + 25]', [0x01C90109], 1000), # > T25.2.4 > T25.4.1b > T25.7.0 : positive with white spaces
('[r10]', [0x01CA0000], 1000), # T25.0.2 > T25.1.1c > T25.7.0 : success base only
('[sp ]', [0x01CD0000], 1000), # T25.0.2 > T25.1.1c > T25.7.0 : idem with trailing space
('[r9,r1]', [0x01890001], 1000), # > T25.1.1b > T25.2.5c > T25.7.0: success base + reg. displacement
('[ sp , lr ]', [0x018D000E], 1000), # > T25.1.1b > T25.2.5c > T25.7.0: idem with extra spaces
('[r1, +r6]', [0x01810006], 1000), # > T25.2.2 > T25.3.1c > T25.7.0 : check positive reg displ.
('[r1, + r6]', [0x01810006], 1000), # > T25.2.2 > T25.3.1c > T25.7.0 : idem with white space
('[r6, -r7]', [0x01060007], 1000), # > T25.2.3 > T25.3.1c > T25.7.0 : check negative reg displ.
('[r6,- r7] ', [0x01060007], 1000), # > T25.3.1c > T25.7.1 > T25.7.0 : idem with white space
('[r5, r15]', [0x01C50000], -2512), # > T25.2.5b + override : PC not allowed as Rm
('[r5, r10+]', [0x01C50000], -1304), # > T25.2.5b + override : wrong text after reg. number
('[r5, +r10,]', [0x01C50000], -2513) # > T25.2.2 > T25.3.1b : scaled reg. displ. not allowed
]
im2_test = [('', [], -3401), # T34.0.0 error: missing memory transfer inst.
(' ', [], -3401), # T34.0.1 > T34.0.0 error: idem with leading space
('2', [], -3402), # T34.0.3 error: missing 'ld' or 'st'
('ld', [4, 0xE0000000], -3402), # T34.0.2 > T34.1.0 error: missing inst. continuation
('st ', [4, 0xE0000000], -3402), # T34.0.2 > T34.1.4 error: missing inst. continuation
('str', [4, 0xE0000000], -3403), # T34.0.2 > T34.1.1 > T34.2.0 : missing space after inst.
('ldr ', [4, 0xE4100000], -3405), # > T34.1.1 > T34.2.1 > T34.5.0 : missing destination register
('sts', [4, 0xE0000000], -3408), # T34.0.2 > T34.1.2 + override : 's' not allowed for store inst.
('ldx', [4, 0xE0000000], -3402), # T34.0.2 > T34.1.4 : unrecognized mem. transfer inst.
('ldrb', [4, 0xE0000000], -3403), # > T34.1.1 > T34.2.2 > T34.3.0 : missing space after inst.
('strb ', [4, 0xE4400000], -3405), # > T34.2.2 > T34.3.1 > T34.5.0 : missing destination register
('ldrby', [4, 0xE0000000], -3404), # > T34.2.2 > T34.3.2 : wrong text after inst.
('ldrb e', [4, 0xE4500000], -1302), # > T34.3.1 > T34.5.1a + override: unknown reg
('str r', [4, 0xE4000000], -1303), # > T34.2.1 > T34.5.1a + override: missing reg number
('ldr rb', [4, 0xE4100000], -1304), # > T34.2.1 > T34.5.1a + override: wrong reg number
('ldrb r1', [4, 0xE4500000], -3406), # > T34.2.1 > T34.5.1a error: missing ',' after dest. reg
('strb r2,', [4, 0xE4402000], -3407), # > T34.5.1b > T34.6.0 error: missing info after dest. reg
('streq', [4, 0x00000000], -3403), # > T34.2.3 > T34.4.0 : missing space after inst.
('ldrne ', [4, 0x14100000], -3405), # > T34.2.3 > T34.4.1 > T34.5.0 : missing destination register
('strles', [4, 0xD0000000], -3408), # > T34.2.3 > T34.4.4 + override : 's' not allowed for store inst.
('ldrlox', [4, 0x30000000], -3404), # > T34.2.3 > T34.4.5 : unrecognized mem. transfer inst.
('ldrmib', [4, 0x40000000], -3403), # > T34.2.3 > T34.4.2 > T34.3.0 : missing space after inst.
('strmib ', [4, 0x44400000], -3405), # > T34.4.2 > T34.3.1 > T34.5.0 : missing destination register
('ldrhsbx', [4, 0x20000000], -3404), # > T34.4.2 > T34.3.2 : wrong text after inst.
('ldrhsb r2, 2', [4, 0x24502000], -2402), # > T34.6.1 > T34.6.3 + override : missing '['
('strvcb r3, [', [4, 0x74403000], -2403), # > T34.6.3 + override : missing info after '['
('ldrge r4, [2', [4, 0xA4104000], -2403), # > T34.6.3 + override : unrecognizable register
('strltb r5,[r', [4, 0xB4405000], -1303), # > T34.6.3 + override : missing register number
('ldrvc r6, [r16', [4, 0x74106000], -1304), # + override : too high reg number
('ldr lr, [r12', [4, 0xE410E000], -2404), # + override : good base reg, missing closure
('str sp, [r0 ', [4, 0xE400D000], -2404), # + override : missing ',' after base reg
('ldrb r15, [r1,', [4, 0xE450F000], -2405), # + override : missing displacement
('strb pc, [r2]!', [4, 0xE440F000], -2410), # + override : unexpected text after ']'
('ldrvsb r4,[r3, 3', [4, 0x64504000], -2406), # + override : wrong displacement
('strge r5, [r5, r1a', [4, 0xA4005000], -1304), # + override : wrong reg number
('ldrltb r6, [r5, r10, ', [4, 0xB4506000], -2407), # + override : missing shift mode
('strlsb r7, [r7, r2, lsl', [4, 0x94407000], -2408), # + override : missing space after shift
('strgt r9, [r8, r3, lsr ', [4, 0xC4009000], -2408), # + override : missing info after shift mode
('ldr r11, [r10, r5, ror r', [4, 0xE410B000], -1702), # + override : idem
('ldrb r12, [r1, r9, lsl # ', [4, 0xE450C000], -1704), # + override : unexpected space after '#'
('strb r13,[r9,#0xC0000034]', [4, 0xE440D000], -2411), # + override : too long immediate displacement
('ldr r0, [r12, #0b1002000]', [4, 0xE4100000], -1002), # + override : invalid binary digit
('strhi r1, [r13, #018000005]', [4, 0x84001000], -1003), # + override : invalid octal digit
('strlob r2, [r14, #5d4]', [4, 0x34402000], -1004), # + override : invalid decimal digit
('ldrplb r3, [r15, #0x4r]', [4, 0x54503000], -1005), # + override : invalid hexa digit
('ldrb r3, [r15, #0x400000000]', [4, 0xE4503000], -1006), # + override : too big number
('ldrcsb r4, [ r6, #+0]', [4, 0x25D64000], 1000), # > T34.6.3 : success base + imm. displ.
('ldr r5, [r6, #20]', [4, 0xE5965014], 1000), # : success base + imm. displ.
('str r6,[r7, #+4095]', [4, 0xE5876FFF], 1000), # : maximum positive imm. displ.
('ldreqb r7, [r8, #-20]', [4, 0x05587014], 1000), # : base + negative imm. displ.
('strccb r8, [r9, #-4095] ', [4, 0x35498FFF], 1000), # : minimum negative imm. displ.
('ldr r9, [r10]', [4, 0xE59A9000], 1000), # : success base only
('str r10,[r9,+r1]', [4, 0xE789A001], 1000), # : success base + reg. displacement
('str r10, [r5, r15]', [4, 0xE400A000], -2412), # + override : PC not allowed as Rm
('strb r11, [r0, r8, ror #]', [4, 0xE440B000], -1703), # + override : missing value after '#'
('ldrle r12, [r2, r10, lsr #f]', [4, 0xD410C000], -1705), # + override : unrecogn. info after '#'
('strmib r13, [r4, r12, ror #-20]', [4, 0x4440D000], -1706), # override : negative number of shifts
('ldrplb r14, [r5, r13, lsl #040]', [4, 0x5450E000], -1706), # override : too high number of shifts
('ldrvs r15,[r6, lr, lsr #0x1C] ', [4, 0x6796FE2E], 1000), # : success with trailing space
('str r0, [r5, r13, lsl #00]', [4, 0xE785000D], 1000), # : true LSL #0
('ldr r1, [r6, sp, lsr #0x0 ]', [4, 0xE796100D], 1000), # : converting LSR #0 into LSL #0
('str r2, [r7,-r1,asr #0b10101]', [4, 0xE7072AC1], 1000), # : ASR bin imm, no space after ','
('ldr r3 ,[r7,+r1,asr #0b0]', [4, 0xE7973001], 1000), # : converting ASR #0 into LSL #0
('ldrb r4,[r9, r12, ror #0x1F]', [4, 0xE7D94FEC], 1000), # : success ROR with 31 shifts
('strb r5, [r9, r12, ror #0x0]', [4, 0xE7C9506C], 1000) # : coding ROR #0 as RRX
]
im3_test = [('lds', [4, 0xE0000000], -3404), # T34.0.2 > T34.1.2 > T34.8.0 error: wrong memory transfer inst.
('strz', [4, 0xE0000000], -3404), # T34.0.2 > T34.1.1 > T34.2.6 error: wrong memory transfer inst.
('strs', [4, 0xE0000000], -3408), # > T34.1.1 > T34.2.5 + override : 's' not allowed for store inst.
('strh', [4, 0xE00000B0], -3403), # > T34.1.1 > T34.2.4 > T34.9.0 error: missing space after inst.
('ldrs', [4, 0xE0000000], -3404), # > T34.1.1 > T34.2.5 > T34.10.0 : wrong memory transfer inst.
('ldrh ', [4, 0xE01000B0], -3405), # > T34.2.4 > T34.9.1 > T34.11.0 : missing destination reg
('ldrsb', [4, 0xE01000D0], -3403), # > T34.2.5 > T34.10.1 > T34.9.0 : missing space after inst.
('ldrsh', [4, 0xE01000F0], -3403), # > T34.2.5 > T34.10.1 > T34.9.0 : missing space after inst.
('ldrsi', [4, 0xE0000000], -3404), # > T34.2.5 > T34.10.2 : missing space after inst.
('ldrsb ', [4, 0xE01000D0], -3405), # > T34.10.1 > T34.9.1 > T34.11.0: missing destination reg
('ldrsb e', [4, 0xE01000D0], -1302), # > T34.11.1a + override : wrong text after inst.
('ldrsbt', [4, 0xE01000D0], -3404), # > T34.10.1 > T34.9.2 : wrong memory transfer inst.
('ldsb', [4, 0xE01000D0], -3403), # > T34.8.2 > T34.9.0 : missing space after inst.
('ldsh ', [4, 0xE01000F0], -3405), # > T34.8.2 > T34.9.1 > T34.11.0 : missing destination reg
('ldsu ', [4, 0xE0000000], -3404), # T34.0.2 > T34.1.2 > T34.8.3 : wrong memory transfer inst.
('strneh', [4, 0x100000B0], -3403), # > T34.2.3 > T34.4.3 > T34.9.0 : missing space after inst.
('ldscc', [4, 0x30000000], -3404), # > T34.1.2 > T34.8.1 > T34.10.0 : wrong memory transfer inst.
('ldreqs', [4, 0x00000000], -3404), # > T34.2.3 > T34.4.4 > T34.10.0 : wrong memory transfer inst.
('ldrlssb', [4, 0x901000D0], -3403), # > T34.4.4 > T34.10.1 > T34.9.0 : missing space after inst.
('ldshsb r2', [4, 0x201000D0], -3406), # > T34.9.1 > T34.11.1a error: missing ',' after destination reg
('ldrhsh r2,', [4, 0x201020B0], -3407), # > T34.11.1b > T34.12.0 : missing info after dest. reg
('strleh r10, r12', [4, 0xD000A0B0], -2502), # T34.11.1b > T34.12.1 + override : missing '['
('strlsh r10, [12', [4, 0x9000A0B0], -2503), # T34.11.1b > T34.12.1 + override : missing reg after '['
('strloh r8, [r12', [4, 0x300080B0], -2504), # T34.11.1b > T34.12.1 + override : missing closure
('streqh r9, [r1,', [4, 0x000090B0], -2505), # T34.11.1b > T34.12.1 + override : missing displacement
('ldsccb r1,[r2]!', [4, 0x301010D0], -2510), # T34.11.1b > T34.12.1 + override: unexpected text after ']'
('strh r2, [r3, 3', [4, 0xE00020B0], -2506), # + override : wrong displacement
('ldsvch r4, [r5, r1a', [4, 0x701040F0], -1304), # + override : wrong reg number
('ldrvssb r5, [r7, -r6', [4, 0x601050D0], -2504), # + override : check negative reg displ.
('strplh r9, [r5, r10, ', [4, 0x500090B0], -2513), # + override : scaled reg. displ. not allowed
('ldsmib r10, [r9, #0x134]', [4, 0x4010A0D0], -2511), # + override : too long immediate displacement
('ldrgtsb r11 , [ r6, #+0]', [4, 0xC1D6B0D0], 1000), # > T34.11.1b > T34.12.1 success: base + imm. displ.
('strh r12, [r6 ,#195]', [4, 0xE1C6CCB3], 1000), # : base + imm. displ.
('ldrlsh r3, [r10, #-180]', [4, 0x915A3BB4], 1000), # : base + negative imm. displ.
('ldsgeh r13, [r8, # -80]', [4, 0xA158D5F0], 1000), # : base + negative imm. displ.
('ldshsb r14,[r9, #-255 ]', [4, 0x2159EFDF], 1000), # : minimum negative imm. displ.
('strhih pc, [r10]', [4, 0x81CAF0B0], 1000), # : success base only
(' ldrgtsh lr, [ pc ]', [4, 0xC1DFE0F0], 1000), # : idem with trailing space
('ldsvsb r10,[r9,r1]', [4, 0x6199A0D1], 1000), # : success base + reg. displacement
('ldrlssh r0, [ sp , lr ]', [4, 0x919D00FE], 1000), # : idem with extra spaces
('strleh r1, [r6, -r7]', [4, 0xD10610B7], 1000), # : check negative reg displ.
('ldsb r9, [r5, r15]', [4, 0xE01090D0], -2512) # + override : PC not allowed as Rm
]
imm_test = [('ldm', [4, 0xE0000000], -3404), # T34.0.2 > T34.1.3 > T34.13.0 error: wrong memory transfer inst.
('stmz', [4, 0xE0000000], -3404), # T34.0.2 > T34.1.3 > T34.13.3 error: wrong memory transfer inst.
('ldmia', [4, 0xE8900000], -3403), # > T34.13.2 > T34.15.0 : missing space after inst.
('stmdb ', [4, 0xE9000000], -3405), # > T34.15.1 > T34.16.0 : missing destination reg
('ldmibe', [4, 0xE9900000], -3404), # > T34.13.2 > T34.15.2 : wrong memory transfer inst.
('ldmib e', [4, 0xE9900000], -1302), # > T34.16.1a + override : wrong register
('stmne', [4, 0x10000000], -3404), # > T34.13.1 > T34.14.0 : wrong memory transfer inst.
('ldmccda', [4, 0x38100000], -3403), # > T34.14.1 > T34.15.0 : missing space after inst.
('ldmccde', [4, 0x30000000], -3404), # > T34.14.2 error: missing space after inst.
('ldmeqia r', [4, 0x08900000], -1303), # > T34.16.1a + override : missing reg number
('ldmhsfd r2', [4, 0x28900000], -3406), # > T34.16.1a error: missing ',' after destination reg
('ldmhsfa r2,', [4, 0x28120000], -3407), # > T34.16.1b > T34.18.0 : missing info after dest. reg
('stmhiea r2!', [4, 0x89020000], -3406), # > T34.16.1c > T34.17.0 : missing ',' after destination reg
('stmhiea r2!,', [4, 0x89220000], -3407), # > T34.17.2 > T34.18.0 : missing info after dest. reg
('stmea r2!d', [4, 0xE9020000], -3404), # > T34.17.3 error: wrong text after '!'
('stmccib r3,1', [4, 0x39830000], -1502), # > T34.18.1 + override : missing '{'
('ldmmied r4!, {', [4, 0x49B40000], -1503), # + override : missing registers
('ldmplia r5, {1', [4, 0x58950000], -1302), # + override : unknown register identifier
('stmneda r6! , {r', [4, 0x18260000], -1303), # > T34.17.1 + override : missing register number
('stmia r7,{ra', [4, 0xE8870000], -1304), # + override : wrong reg number
('ldmfd r8, {r0', [4, 0xE8980000], -1503), # + override : unclosed single register
('stmed r9, {r14,}', [4, 0xE9890000], -1504), # + override : missing register after ','
('ldmfd r13!, {r4-}', [4, 0xE8BD0000], -1403), # + override : missing second reg in range list
('ldmfd r13!, {r14, }', [4, 0xE8BD0000], -1504), # + override : missing register after ', '
('ldmeqda r10!, {r0}', [4, 0x083A0001], 1000), # > T34.18.1 success: single register
('ldmalib r11 , {r0-r5}', [4, 0xE99B003F], 1000), # : single range
('stmccdb r12!, {pc, r1-r2, sp-r12, r5}', [4, 0x392CB026], 1000), # : several ranges, with spaces
('stmea r13!, {r14,r8}', [4, 0xE92D4100], 1000), # : no space after ','
('ldmfd r13!, { r9 , r13 }', [4, 0xE8BD2200], 1000) # : extra spaces
]
iil_test = [('str r0, =', [4, 0xE4000000], -3409), # > T34.6.2 + override : 'str' cannot use '=' loading
('ldrb r0,=', [4, 0xE4500000], -3409), # > T34.6.2 + override : neither 'ldrb'
('ldrh r0,=', [4, 0xE01000B0], -2502), # > T34.12.1 + override error: nor 'ldrh'
('ldr r0, =', [4, 0xE4100000], -3410), # > T34.6.2 > T34.7.0 error: missing number for immediate load
('ldr r0, = ', [4, 0xE4100000], -3410), # > T34.7.1 > T34.7.0 : idem with tranling space
('ldr r0, =t', [4, 0xE4100000], -3410), # > T34.7.1 > T34.7.3 : idem with tranling rubbish
('ldr r1, =0b00130', [4, 0xE4101000], -1002), # > T34.7.2 + override: invalid binary digit
('ldr r2, =00180', [4, 0xE4102000], -1003), # + override: invalid octal digit
('ldr r3, = -18a', [4, 0xE4103000], -1004), # + override: invalid decimal digit
('ldr r4, =0x10GA', [4, 0xE4104000], -1005), # + override: invalid hexa digit
('ldr r5, =0x100000000', [4, 0xE4105000], -1006), # + override: too big number
('ldr r6, =+0', [4, 0xE59F6FF8, 0], 1000), # > T34.7.2 success: set a relative pc loading
('ldrhi r7, = 00317652', [4, 0x859F7FF8, 0x19FAA], 1000), # : octal number
('ldrlt lr, =-1000', [4, 0xB59FEFF8, -1000], 1000), # : negative number
('ldr pc, = 0x8000', [4, 0xE59FFFF8, 0x8000], 1000) # : hexa number (load PC)
]
imi_test = [('', [], -3501), # T35.0.0 error: missing miscellanea instruction
(' ', [], -3501), # T35.0.1 > T35.0.0 : idem with space
('ldr', [], -3503), # T35.0.4 error: unrecognizable instruction
('push', [], -3502), # T35.0.2a error: missing operands
(' clz', [], -3502), # T35.0.1 > T35.0.3a error: idem with leading space
('pop ', [4, 0xE8BD0000], -3502), # > T35.0.2b > T35.2.0 : idem with a trailing space
('clz ', [4, 0xE1600010], -3502), # > T35.0.3b > T35.4.0 : idem for 'clz'
('clz 2', [4, 0xE1600010], -1302), # > T35.4.1a + override : unrecognizable register
('clz r', [4, 0xE1600010], -1303), # > T35.4.1a + override : missing register number
('clz r16', [4, 0xE1600010], -1304), # > T35.4.1a + override : too high reg number
('push 1', [4, 0xE92D0000], -1502), # > T35.2.1 + override : missing '{'
('pop {', [4, 0xE8BD0000], -1503), # + override : missing registers
('pushne {1', [4, 0x192D0000], -1302), # + override : unknown register identifier
('pophs {r', [4, 0x28BD0000], -1303), # + override : missing register number
('pushhi {ra', [4, 0x892D0000], -1304), # + override : wrong reg number
('poplo {r0', [4, 0x38BD0000], -1503), # + override : unclosed single register
('pushge {r14,}', [4, 0xA92D0000], -1504), # + override : missing register after ','
('popcc {r4-}', [4, 0x38BD0000], -1403), # + override : missing second reg in range list
('pushvs {r14, }', [4, 0x692D0000], -1504), # + override : missing register after ', '
('pusheq', [4, 0xE92D0000], -3502), # T35.0.2c > T35.1.1a error: missing operands
('popcce', [4, 0x38BD0000], -3504), # T35.0.2c > T35.1.1c error: wrong text after inst.
('popce', [4, 0xE8BD0000], -3504), # T35.0.2c > T35.1.2 error: wrong text after inst.
('pushle ', [4, 0xD92D0000], -3502), # > T35.1.1b > T35.2.0 error: missing operands
('clzh', [4, 0xE1600010], -3504), # T35.0.3c > T35.3.2 error: wrong text after inst.
('clzhi', [4, 0xE1600010], -3502), # T35.0.3c > T35.3.1a error: missing operands
('clzhi ', [4, 0x81600010], -3502), # > T35.3.1b > T35.4.0 err: missing operands
('clzhii', [4, 0x81600010], -3504), # T35.0.3c > T35.3.1c error: wrong text after inst.
('clzhs r15,', [4, 0x2160F010], -3502), # > T35.4.1b > T35.5.0 : missing operands
('clzhs r15 z,', [4, 0x21600010], -1304), # > T35.4.1a + override : wrong reg
('clzhs r15, ', [4, 0x2160F010], -3505), # > T35.4.1c > T35.5.2 : wrong info after Rd
('clzls r15,r6', [4, 0x9160F016], 1000), # > T35.4.1b > T35.5.1 : success 'clz' + cond
('pushls {r14}', [4, 0x992D4000], 1000), # > T35.1.1b > T35.2.1 : success 'push' + cond
('pop {r0, r4-r10, r14}', [4, 0xE8BD47F1], 1000) # > T35.2.1 : success 'pop'
]
data_arm = [('', [], -4001), # T40.0.0 error: missing initial hex address
('2', [], -4002), # T40.0.4 error: wrong initial address
('>', [], -4003), # T40.0.2a error: missing space after '>'
('>a', [], -4003), # T40.0.2c error: unexpected char after '>'
(' ', [], -4001), # T40.0.1 > T40.0.0 error: white leading space
('0x', [], -2002), # T40.0.3 + override : leading '0x', missing hex digits
(' 0x8001', [], -2003), # T40.0.1 > T40.0.3 + override : missing space after address
(' 0x8001 ', [0x8001], -4004), # T40.0.1 > T40.0.3 > T40.1.0 error: right address, missing info
('0x10002EF00 .byte 2', [], -2004), # T40.0.3 + override : long hex address (> 2^32)
('0x8000.f', [], -2003), # T40.0.3 + override : missing space after address
('0x8000 .f', [0x8000], -2104), # T40.0.3 > T40.1.1 + override : unknown data dir
('0x8024 .byte', [0x8024], -2102), # T40.0.3 > T40.1.1 + override : address & directive, missing val
('0x8000 .byte ', [0x8000], -2102), # T40.0.3 > T40.1.1 + override : missing data values
('0x8000 .byte2', [0x8000], -2103), # T40.0.3 > T40.1.1 + override : missing space after directive
('0x8024 .byte 23', [0x8024, [1, 23]], 1000), # T40.0.3 > T40.1.1 success: capture one byte
('> ', [0x8025], -4004), # T40.0.2b > T40.2.0 error: missing info after '>'
('> .byte 2', [0x8025, [1, 2]], 1000), # T40.0.2b > T40.2.1 success: .byte directive after '>'
('> .byte 3', [0x8026, [1, 3]], 1000), # T40.0.2b > T40.2.1 success: '>' after '>'
('> .byte 230', [0x8027, [1, 230]], 1000), # T40.0.2b > T40.2.1 success : '>' after .byte (1 value)
('0x802F .byte 23, 0xCB', [0x802F, [1, 23, 0xCB]], 1000), # T40.0.3 > T40.1.1 success: capture two bytes
('0x802F .byte \'e\' c', [0x802F], -2105), # T40.0.3 > T40.1.1 + override : wrong delimiter
('0x802F .byte \'e\', c', [0x802F], -2106), # T40.0.3 > T40.1.1 + override : unrecognizeable info
('0x802F .byte 2000', [0x802F], -2107), # T40.0.3 > T40.1.1 + override : data >= 2**8
('0x901B .hword 2300, 0xCB0', [0x901B, [2, 2300, 0xCB0]], 1000), # T40.0.2b > T40.1.1 / misaligned h
(' > .hword 230', [0x9020, [2, 230]], 1000), # T40.0.2b > T40.2.1 '>' after .hword (2 values)
('0x901A .hword 2300, 0xCB0', [0x901A, [2, 2300, 0xCB0]], 1000), # T40.0.3 > T40.1.1 / aligned h
(' > .hword 320', [0x901E, [2, 320]], 1000), # T40.0.2b > T40.2.1 '>' after .hword (h aligned)
('0xCbf8 .word 230000, 0xCB000', [0xCBF8, [4, 230000, 0xCB000]], 1000), # T40.0.3 > T40.1.1 / aligned w
('0xCbf9 .word 230000, 0xCB000', [0xCBF9, [4, 230000, 0xCB000]], 1000), # / misaligned w (1)
('0xCbfa .word 230000, 0xCB000', [0xCBFA, [4, 230000, 0xCB000]], 1000), # / misaligned w (2)
('0xCbfb .word 230000, 0xCB000', [0xCBFB, [4, 230000, 0xCB000]], 1000), # / misaligned w (3)
('> .word 010', [0xCC04, [4, 8]], 1000), # T40.0.2b > T40.2.1 '>' after .word (2 values)
('0xa03c .ascii \'2\'', [0xA03C, [1, 50]], 1000), # T40.0.3 > T40.1.1 success: .ascii directive
('> .word 0x010', [0xA040, [4, 16]], 1000), # T40.0.2b > T40.2.1 '>' after .ascii (1 value)
('0xa03b .asciz \'2\', \"0xCB\"', [0xA03B, [1, 50, 0, 48, 120, 67, 66, 0]], 1000), # / two strings
('> .word 0b010', [0xA044, [4, 2]], 1000), # T40.0.2b > T40.2.1 '>' after .asciz (7 values)
('0xa03c .ascii \' ', [0xA03C], -1104), # T40.0.3 > T40.1.1 + override : unclosed char
('0xa03c .ascii \" ', [0xA03C], -1204), ('0xa03c .asciz \' ', [0xA03C], -1104), # : unclosed char
('0xa03c .asciz \" ', [0xA03C], -1204), # : unclosed string
('0xa03c .ascii \'\'', [0xA03C], -1102), # : empty char
('0xa03c .ascii \"\"', [0xA03C], -1202), # : empty string
('0xa03c .asciz \'\'', [0xA03C], -1102), # : empty char
('0xa03c .asciz \"\"', [0xA03C], -1202), # : empty string
('0xc30a .ascii \'\t\'', [0xC30A], -1103), # : illegal character ''
('0xc30a .asciz \'\t\'', [0xC30A], -1103), # : idem after .ascii
('0xc30a .ascii \"\t\"', [0xC30A], -1203), # : illegal character ""
('0xc30a .asciz \" \t\"', [0xC30A], -1203), # : idem after valid char
('0x3000 .ascii \' t\'', [0x3000], -1105), # : more than one character
('0x3000 .asciz \' t\'', [0x3000], -1105), # : idem after .ascii
('0x1000 .byte 0b012', [0x1000], -1002), # : unexpected binary digit
('0x2000 .hword 0408', [0x2000], -1003), # : unexpected octal digit
('0x2000 .hword 4oo8', [0x2000], -1004), # : unexpected decimal digit
('0x2000 .hword 408000', [0x2000], -2107), # : out of range dec. number
('0x2000 .hword -48000', [0x2000], -2107), # : out of range neg. number
('0x4000 .word 0x40x', [0x4000], -1005), # : unexpected hexa digit
('0x4000 .word 0x400000000', [0x4000], -1006), # : too long num. (>2^32 bits)
('0x4000 .word 0x4, 0x', [0x4000], -1005), # : unexpected hexa digit
('0xfffffffc .ascii \'0\'', [0xFFFFFFFC, [1, 48]], 1000), # almost in the address space limit
('> .word 0b1', [0x100000000, [4, 1]], -4006), # T40.0.2b > T40.2.1 '>' after .asciz (7 values)
]
idat_arm = [('0x8000 2', [0x8000], -4005), # T40.0.3 > T40.1.7 error: unrecognizable instruction
('0x8004 and', [0x8004], -3102), # T40.0.3 > T40.1.2 + override : missing operands after instr.
('0x8008 eor ', [0x8008], -3102), # T40.0.3 > T40.1.2 + override : missing operands after instr.
('0x800C sub 20,', [0x800C], -1302), # : unrecognizable operand with ','
('0x8010 rsb r', [0x8010], -1303), # : missing register number
('0x8014 add r65', [0x8014], -1304), # : too high reg number
('0x8018 adc r12', [0x8018], -2302), # : good dest reg, missing other ops
('0x801C sbc ', [0x801C], -2303), # : missing dest reg
('0x8020 rsc r1,', [0x8020], -2304), # : missing source operands
('0x8024 orr r2, ', [0x8024], -2306), # : missing source operands
('0x8028 bic r3, gu', [0x8028], -2306), # : wrong source op 1
('0x802C and r12, r3, e3', [0x802C], -2308), # : wrong op 2
('0x8030 eor r3, #', [0x8030], -1603), # : missing value after '#'
('0x8034 sub r4, # ', [0x8034], -1604), # : unexpected space after '#'
('0x8038 rsb r5, #f', [0x8038], -1605), # : unrecognizable info after '#'
('0x803C add r10, #0x1002', [0x803C], -1606), # : impossible fixup for odd rotations
('0x8040 adc r11, #\'c\' 5', [0x8040], -1607), # : unexpected text after imm val.
('0x8044 sbc r10, r1,', [0x8044], -2204), # : missing shift register
('0x8048 rsc r7, r2, lsl', [0x8048], -2205), # : missing space after shift mode
('0x804C orr r9, r4, asr x', [0x804C], -2207), # : wrong info after shift mode
('0x8050 bic r0, r8, ror #', [0x8050], -1703), # : missing value after '#'
('0x8054 and r1, r9, lsl # ', [0x8054], -1704), # : unexpected space after '#'
('0x8058 eor r2, r10, lsr #f3', [0x8058], -1705), # : unrecognizable info after '#'
('0x805C sub r4, r12, ror #-2', [0x805C], -1706), # : negative number of shifts
('0x8060 orrs', [0x8060], -3102), # : missing data instruction operands
('0x8064 teqslo', [0x8064], -3105), # : wrong text after instruction
('0x8068 cmnlyy', [0x8068], -3104), # : unknown instruction condition
('0x8068 cmnls r0, #90', [0x8068, [4, 0x9370005A]], 1000), # T40.0.3 > T40.1.2 success: 1 reg, 1 imm.
('> rsbals r6, r11, #256', [0x806C, [4, 0xE27B6C01]], 1000), # T40.0.2b > T40.2.2 success: 2 regs, 1 imm.
('> addgt r12, r12, lsl r12', [0x8070, [4, 0xC08CCC1C]], 1000), # T40.0.2b > T40.2.2 : LSL reg
('0x8080 adcs r1, r2, lsr r0 ', [0x8080, [4, 0xE0B11032]], 1000), # T40.0.3 > T40.1.2 : LSR reg with space
('> rscles pc, lr, lsr #0x1F ', [0x8084, [4, 0xD0FFFFAE]], 1000), # 40.0.2b > T40.2.2 : LSR imm with space
('0x8088 bicmis r10, r11, r12, lsl r12', [0x8088, [4, 0x41DBAC1C]], 1000), # : three regs, shift reg
('0x8088 bicmis r0, r1, r2, lsl #0', [0x8088, [4, 0x41D10002]], 1000), # : three regs, LSL #0
('0x8088 bicmis r0, r1, r2, ror #0', [0x8088, [4, 0x41D10062]], 1000), # : three regs, ROR #0 -> RRX
('> tst r7,r1, #01010', [0x808C], -2310), # > T40.2.2 + override : 3 ops with 'tst'
('> movvc r1,r9, #0xC000', [0x808C], -2311), # > T40.2.2 + override : 3 ops with 'mov'
('> tst r7, #01010', [0x808C, [4, 0xE3170F82]], 1000), # T40.0.2b > T40.2.2 : 'tst' + reg + imm
('> teqlts r7,r8,lsl #12', [0x8090, [4, 0xB1370608]], 1000), # T40.0.2b > T40.2.2 : 'teq'+reg+shifted reg
('> mov r2, #-100', [0x8094, [4, 0xE3E02063]], 1000), # T40.0.2b > T40.2.2 : 'mov' + reg + NOT imm
('> and r4, #-250', [0x8098, [4, 0xE3C440F9]], 1000), # T40.0.2b > T40.2.2 : 'and' + reg + NOT imm
('> add r6, #-3120', [0x809C, [4, 0xE2466EC3]], 1000), # T40.0.2b > T40.2.2 : 'add' + reg + NOT imm
('0xA0008 cmp r8, #-1004', [0xA0008, [4, 0xE3780FFB]], 1000), # T40.0.3 > T40.1.2 : 'cmp' + reg + NOT imm
('> .byte -1', [0xA000C, [1, 255]], 1000), # T40.0.2b > T40.2.1 : automatic inc. +1
('> bics r5, #-255', [0xA0010, [4, 0xE21550FE]], 1000), # T40.0.2b > T40.2.2 : adjust adr. 3 bytes
('> .hword -2', [0xA0014, [2, 65534]], 1000), # T40.0.2b > T40.2.1 : automatic inc. +2
('> movvss r9,#0xC0000', [0xA0018, [4, 0x63B09703]], 1000), # T40.0.2b > T40.2.2 : adjust adr. 2 bytes
(' > .byte -1, -2, -3', [0xA001C, [1, 255, 254, 253]], 1000), # T40.0.2b > T40.2.1 : automatic inc. +3
(' > cmnne r5, #-256', [0xA0020, [4, 0x13550C01]], 1000), # T40.0.2b > T40.2.2 : adjust adr. 1 byte
('> r5, #-256', [0xA0024], -4005), # T40.0.2b > T40.2.7 : unrecognized inst.
('0xA0025 cmp r9, #1004', [0xA0025, [4, 0xE3590FFB]], 1000), # warning : address missaligned 1 byte
('0xA0026 cmp r10, #1008', [0xA0026, [4, 0xE35A0E3F]], 1000), # warning : address missaligned 1 byte
(' 0xA0027 cmp r11, #1012', [0xA0027, [4, 0xE35B0FFD]], 1000), # warning : address missaligned 1 byte
('0x8068 .word -4', [0x8068, [4, 4294967292]], 1000) # final test: set auto-address as before the first
# test in this series that makes use of '>'
]
imul_arm = [('0x7FFC .word -4', [0x7FFC, [4, 4294967292]], 1000), # set auto-address as before the first use of '>'
('> ', [0x8000], -4005), # T40.0.2b > T40.1.7 error: unrecognizable instruction
('> 2', [0x8000], -4005), # T40.0.2b > T40.1.7 error: unrecognizable instruction
('> mul', [0x8000], -3202), # T40.0.2b > T40.2.3 + override : missing operands after instr.
('> mla ', [0x8000], -3202), # T40.0.2b > T40.2.3 + override : missing operands after instr.
('> umull 2', [0x8000], -1302), # : wrong register
('> umull 2,', [0x8000], -1302), # : wrong register with ','
('> umull r', [0x8000], -1303), # : missing register number
('> smull r65', [0x8000], -1304), # : too high reg number
('> umlal r12', [0x8000], -3202), # : missing other regs
('> mul ', [0x8000], -1301), # : missing other regs
('0x90FC mul r1,', [0x90FC], -3202), # : missing source operands
('> mla r2, ', [0x8000], -1301), # : missing source operands
('> smlal r3, gu', [0x8000], -1302), # : wrong reg2
('> umlal r12, r3, e3', [0x8000], -1302), # : wrong reg3
('> mul r3, r4, r5, r6', [0x8000], -3207), # : four registers with 'mul'
('> smlal r3, r4, r5, ', [0x8000], -1301), # : missing reg4
('> mla r3, r4, r5', [0x8000], -3202), # : three regs with 'mla'
('> mul r1, r10, r8', [0x8000, [4, 0xE001089A]], 1000), # success: three regs with 'mul'
('0xA000 mla r13, r14, r0, r0', [0xA000, [4, 0xE02D009E]], 1000), # success: four regs with 'mla'
('> umull sp, lr, r12, r13', [0xA004, [4, 0xE08EDD9C]], 1000), # success: four regs with 'umull'
('> mul r10, pc, r7', [0xA008], -3208), # + override: use of PC as Rm
('> smulllex r10, r11, lr, r10', [0xA008], -3205), # + override: error after cond
('> mulz', [0xA008], -3204) # + override: wrong text after
]
ijmp_arm = [('0x7FFC .word -4', [0x7FFC, [4, 4294967292]], 1000), # set auto-address as before the first use of '>'
('> blo', [0x8000], -3302), # T40.0.2b > T40.2.4 + override: missing offset
('0x9004 bleq ', [0x9004], -3302), # T40.0.3 > T40.1.4 + override : missing offset
('> blox', [0x8000], -4005), # T40.0.2b > T40.2.4 + override: unexpected text after inst
('0xA0000 bx', [0xA0000], -3304), # T40.0.3 > T40.1.4 + override : missing reg after instr.
('> blxo', [0x8000], -4005), # T40.0.2b > T40.2.4 + override: unexpected text after inst
('0x10 blt f', [0x10], -3305), # T40.0.3 > T40.1.4 + override : wrong offset
('> bls 0b12', [0x8000], -1002), # T40.0.3 > T40.1.4 + override : unexpected binary digit
('> blls 0192', [0x8000], -1003), # : unexpected octal digit
('> bllo -192a', [0x8000], -1004), # : unexpected decimal digit
('> blvc 0xA3G0', [0x8000], -1005), # : unexpected hexa digit
('> bvc 0xA30000000', [0x8000], -1006), # : too long hex address
('> bxvc 0xA300', [0x8000], -1302), # : unrecognized reg
('> blxcc r', [0x8000], -1303), # : missing reg number
('> bxcc rf', [0x8000], -1304), # : wrong reg number
('> bxmi r16', [0x8000], -1304), # : wrong reg number
('> blgt 0x1302', [0x8000], -3307), # : misaligned address
('> bllt 0x73000000', [0x8000], -3308), # : out of range offset
('> blal -73000000', [0x8000], -3308), # : out of range neg. offset
('> bal -7300001', [0x8000], -3307), # : misaligned negative address
('> bx r6 ', [0x8000, [4, 0xE12FFF16]], 1000), # T40.0.2b > T40.2.4 success: 'bx' jump
('> blxpl r6', [0x8004, [4, 0x512FFF36]], 1000), # : 'blx' jump
('0x7A0C blxlt r15', [0x7A0C, [4, 0xB12FFF3F]], 1000), # > T40.1.4 warning: use of pc (r15)
('> b 0xA300', [0x7A10, [4, 0xEA000A3A]], 1000), # > T40.2.4 success: 'b' jump
('0xFFF8 bl 1300', [0xFFF8, [4, 0xEBFFC145]], 1000), # > T40.1.4 success: 'bl' negative jump
('> blt 073000000', [0xFFFC, [4, 0xBA3ABFFF]], 1000), # > T40.2.4 success: 'blt' octal jump
('> bleq 0x730000', [0x10000, [4, 0x0B1C7FFE]], 1000), # > T40.2.4 success: 'bleq' hexa jump
('0x7FF8 bhi 0xA30000', [0x7FF8, [4, 0x8A28A000]], 1000), # > T40.1.4 success: 'bhi' jump
('> bge 0x2008000', [0x7FFC, [4, 0xAA7FFFFF]], 1000), # : forward jump limit
('0x2000000 blhs 0x8', [0x2000000, [4, 0x2B800000]], 1000), # : backward jump limit
('0x400000 blhs 0xC', [0x400000, [4, 0x2BF00001]], 1000), # : another backward jump
('0x4000 blhi 0x4000', [0x4000, [4, 0x8BFFFFFE]], 1000), # : jump onto same address
('0x4000 blhi 0x4008', [0x4000, [4, 0x8B000000]], 1000), # : jump onto advanced pc
('0x4001 blhi 0x4008', [0x4001, [4, 0x8BFFFFFF]], 1000) # : jump from misaligned adr.
]
imem_arm = [('0x7FFC .word -4', [0x7FFC, [4, 4294967292]], 1000), # set auto-address as before the first use of '>'
('> ld', [0x8000], -4005), # T40.0.2b > T40.2.5 + override: missing inst. continuation
('> st ', [0x8000], -4005), # + override: missing inst. continuation
('> str', [0x8000], -3403), # + override: missing space after inst.
('> ldr ', [0x8000], -3405), # + override: missing destination register
('> sts', [0x8000], -3408), # + override: 's' not allowed for store inst.
('> ldx', [0x8000], -4005), # + override: unrecognized mem. transfer inst.
('> ldrby', [0x8000], -3404), # + override: wrong text after inst.
('> ldrb e', [0x8000], -1302), # + override: unknown reg
('> str r', [0x8000], -1303), # + override: missing reg number
('> ldr rb', [0x8000], -1304), # + override: wrong reg number
('> ldrb r1', [0x8000], -3406), # + override: missing ',' after dest. reg
('> strb r2,', [0x8000], -3407), # + override: missing info after dest. reg
('> ldrhsb r2, 2', [0x8000], -2402), # + override: missing '['
('> strvcb r3, [', [0x8000], -2403), # + override: missing info after '['
('> ldrge r4, [2', [0x8000], -2403), # + override: unrecognizable register
('> strltb r5,[r', [0x8000], -1303), # + override: missing register number
('> ldrvc r6, [r16', [0x8000], -1304), # + override: too high reg number
('> ldr lr, [r12', [0x8000], -2404), # + override: good base reg, missing closure
('> ldrb r15, [r1,', [0x8000], -2405), # + override: missing displacement
('> strb pc, [r2]!', [0x8000], -2410), # + override: unexpected text after ']'
('> ldrvsb r4,[r3, 3', [0x8000], -2406), # + override: wrong displacement
('> ldrltb r6, [r5, r10, ', [0x8000], -2407), # + override: missing shift mode
('> strlsb r7, [r7, r2, lsl', [0x8000], -2408), # + override: missing space after shift
('> ldr r11, [r10, r5, ror r', [0x8000], -1702), # + override: missing info after shift mode
('> ldrb r12, [r1, r9, lsl # ', [0x8000], -1704), # + override: unexpected space after '#'
('> strb r13,[r9,#0xC0000034]', [0x8000], -2411), # + override: too long immediate displacement
('> ldr r0, [r12, #0b1002000]', [0x8000], -1002), # + override: invalid binary digit
('> strhi r1, [r13, #018000005]', [0x8000], -1003), # + override: invalid octal digit
('> strlob r2, [r14, #5d4]', [0x8000], -1004), # + override: invalid decimal digit
('> ldrplb r3, [r15, #0x4r]', [0x8000], -1005), # + override: invalid hexa digit
('> ldrb r3, [r15, #0x400000000]', [0x8000], -1006), # + override: too big number
('> ldrcsb r4, [ r6, #+0]', [0x8000, [4, 0x25D64000]], 1000), # success: base + imm. displ.
('> ldr r5, [r6, #20]', [0x8004, [4, 0xE5965014]], 1000), # success: base + imm. displ.
('> str r6,[r7, #+4095]', [0x8008, [4, 0xE5876FFF]], 1000), # success: maximum positive imm. displ.
('> ldreqb r7, [r8, #-20]', [0x800C, [4, 0x05587014]], 1000), # success: base + negative imm. displ.
('> strccb r8, [r9, #-4095] ', [0x8010, [4, 0x35498FFF]], 1000), # : minimum negative imm. displ.
('> ldr r9, [r10]', [0x8014, [4, 0xE59A9000]], 1000), # : base only
('> str r10,[r9,+r1]', [0x8018, [4, 0xE789A001]], 1000), # : base + reg. displacement
('> str r10, [r5, r15]', [0x801C], -2412), # + override: PC not allowed as Rm
('> strb r11, [r0, r8, ror #]', [0x801C], -1703), # + override: missing value after '#'
('> ldrle r12, [r2, r10, lsr #f]', [0x801C], -1705), # + override: unrecogn. info after '#'
('> strmib r13, [r4, r12, ror #-20]', [0x801C], -1706), # + override: negative number of shifts
('> ldrplb r14, [r5, r13, lsl #040]', [0x801C], -1706), # + override: too high number of shifts
('> ldrvs r15,[r6, lr, lsr #0x1C] ', [0x801C, [4, 0x6796FE2E]], 1000), # success: with trailing space
('> str r0, [r5, r13, lsl #00]', [0x8020, [4, 0xE785000D]], 1000), # success: true LSL #0
('0x904A ldr r1, [r6, sp, lsr #0x0 ]', [0x904A, [4, 0xE796100D]], 1000), # : converting LSR #0 into LSL #0
('> str r2, [r7,-r1,asr #0b10101]', [0x9050, [4, 0xE7072AC1]], 1000), # : ASR bin imm, no space after ','
('0x8090 ldr r3 ,[r7,+r1,asr #0b0]', [0x8090, [4, 0xE7973001]], 1000), # : converting ASR #0 into LSL #0
('> ldrb r4,[r9, r12, ror #0x1F]', [0x8094, [4, 0xE7D94FEC]], 1000), # : success ROR with 31 shifts
('> strb r5, [r9, r12, ror #0x0]', [0x8098, [4, 0xE7C9506C]], 1000), # : coding ROR #0 as RRX
('> lds', [0x809C], -3404), # + override: wrong memory transfer inst.
('> strz', [0x809C], -3404), # + override: wrong memory transfer inst.
('> strs', [0x809C], -3408), # + override: 's' not allowed for store inst.
('> ldrsb e', [0x809C], -1302), # + override: wrong text after inst.
('> strleh r10, r12', [0x809C], -2502), # + override: missing '['
('> strlsh r10, [12', [0x809C], -2503), # + override: missing reg after '['
('> strloh r8, [r12', [0x809C], -2504), # + override: missing closure
('> streqh r9, [r1,', [0x809C], -2505), # + override: missing displacement
('> ldsccb r1,[r2]!', [0x809C], -2510), # + override: unexpected text after ']'
('> strh r2, [r3, 3', [0x809C], -2506), # + override: wrong displacement
('> strplh r9, [r5, r10, ', [0x809C], -2513), # + override: scaled reg. displ. not allowed
('> ldsmib r10, [r9, #0x134]', [0x809C], -2511), # + override: too long immediate displacement
('> ldsb r9, [r5, r15]', [0x809C], -2512), # + override: PC not allowed as Rm
('> ldrgtsb r11 , [ r6, #+0]', [0x809C, [4, 0xC1D6B0D0]], 1000), # success: base + imm. displ.
('0x20030 strh r12, [r6 ,#195]', [0x20030, [4, 0xE1C6CCB3]], 1000), # success: base + imm. displ.
('0x2000 ldrlsh r3, [r10, #-180]', [0x2000, [4, 0x915A3BB4]], 1000), # : base + negative imm. displ.
('> stmz', [0x2004], -3404), # + override: wrong memory transfer inst.
('> ldmia', [0x2004], -3403), # + override: missing space after inst.
('> stmdb ', [0x2004], -3405), # + override: missing destination reg
('> ldmhsfd r2', [0x2004], -3406), # + override: missing ',' after destination reg
('> ldmhsfa r2,', [0x2004], -3407), # + override: missing info after dest. reg
('> stmccib r3,1', [0x2004], -1502), # + override: missing '{'
('> ldmmied r4!, {', [0x2004], -1503), # + override: missing registers
('> stmed r9, {r14,}', [0x2004], -1504), # + override: missing register after ','
('> ldmfd r13!, {r4-}', [0x2004], -1403), # + override: missing second reg in range list
('0x70FC ldmalib r11 , {r0-r5}', [0x70FC, [4, 0xE99B003F]], 1000), # success: single range
('> stmccdb r12!, {pc, r1-r2, sp-r12, r5}', [0x7100, [4, 0x392CB026]], 1000), # : several ranges, with spcs
('> str r0, =', [0x7104], -3409), # + override: 'str' cannot use '=' loading
('> ldrh r0,=', [0x7104], -2502), # + override: nor 'ldrh'
('> ldr r0, =t', [0x7104], -3410), # + override: idem with tranling rubbish
('> ldr r5, =0x100000000', [0x7104], -1006), # + override: too big number
('> ldr r6, =+0', [0x8104, [4, 0], 0x7104, [4, 0xE59F6FF8]], 1000), # success: set a relative pc loading
('> ldrhi r7, = 00317652', [0x8108, [4, 0x19FAA], 0x7108, [4, 0x859F7FF8]], 1000), # : octal number
('0x801C ldrlt lr, =-1000', [0x901C, [4, 0xFFFFFC18], 0x801C, [4, 0xB59FEFF8]], 1000), # : negative number
('> ldr pc, = 0x8000', [0x9020, [4, 0x8000], 0x8020, [4, 0xE59FFFF8]], 1000), # : hexa num. (load PC)
('0x801A ldrgt lr, =0x1FF80', [0x901A, [4, 0x1FF80], 0x801A, [4, 0xC59FEFF8]], 1000), # : explicit misalign
('> ldr sp , =0x80000', [0x9020, [4, 0x80000], 0x8020, [4, 0xE59FDFF8]], 1000), # : implicit misalign
('0xfffffffc .ascii \'1\'', [0xFFFFFFFC, [1, 49]], 1000), # almost in the address space limit
('> ldr r0, =8', [0x100001000, [4, 8], 0x100000000, [4, 0xE59F0FF8]], -4006), # crossing addr. space limit
('0xffffeffc .ascii \'2\'', [0xFFFFEFFC, [1, 50]], 1000), # almost in the address space limit
('> ldr r2,=-8', [0x100000000, [4, 0xFFFFFFF8], 0xFFFFF000, [4, 0xE59F2FF8]], -4006) # crossing addr. limit
]
imsc_arm = [('0x7FFC .word -4', [0x7FFC, [4, 4294967292]], 1000), # set auto-address as before the first use of '>'
('> push', [0x8000], -3502), # T40.0.2b > T40.2.6 + override : missing operands
('0x8000 clz 2', [0x8000], -1302), # T40.0.3 > T40.1.6 + override : unrecognizable register
('> clz r', [0x8000], -1303), # + override : missing register number
('> clz r16', [0x8000], -1304), # + override : too high reg number
('> push 1', [0x8000], -1502), # + override : missing '{'
('> pop {', [0x8000], -1503), # + override : missing registers
('> pushge {r14,}', [0x8000], -1504), # + override : missing register after ','
('> popcc {r4-}', [0x8000], -1403), # + override : missing second reg in range list
('0x9004 popcce', [0x9004], -3504), # + override : wrong text after inst.
('> clzhs r15, ', [0x8000], -3505), # + override : wrong info after Rd
('> clzls r15,r6', [0x8000, [4, 0x9160F016]], 1000), # success : 'clz' + cond
('0xA00 pushls {r14}', [0xA00, [4, 0x992D4000]], 1000), # success : 'push' + cond
('> pop {r0, r4-r10, r14}', [0xA04, [4, 0xE8BD47F1]], 1000) # success : 'pop'
]
test_groups = [(number_analyzer, hex_test, 'hexadecimal numbers'),
(number_analyzer, dec_test, 'decimal numbers'),
(number_analyzer, oct_test, 'octal numbers'),
(number_analyzer, bin_test, 'binary numbers'),
(char_analyzer, chr_test, 'single quoted chars'),
(string_analyzer, str_test, 'double quoted strings'),
(data_analyzer, dat_test, 'data directives'),
(address_analyzer, adr_test, 'hex addresses'),
(register_analyzer, reg_test, 'register identifiers'),
(regbit_analyzer, rbt_test, 'registers bit mask'),
(reglst_analyzer, rlt_test, 'registers list mask'),
(immediate_op_analyzer, imo_test, 'immediate operand'),
(immediate_sr_analyzer, ims_test, 'immediate shift register'),
(op2_analyzer, op2_test, 'second operand'),
(opdat_analyzer, opd_test, 'data instruction operands'),
(instdat_analyzer, idt_test, 'data instructions'),
(instmul_analyzer, iml_test, 'multiplication instructions'),
(instjmp_analyzer, ibr_test, 'branch instructions'),
(opldst2_analyzer, am2_test, 'addressing mode 2'),
(opldst3_analyzer, am3_test, 'addressing mode 3'),
(instmem_analyzer, im2_test, 'memory transfer instructions, addressing mode 2'),
(instmem_analyzer, im3_test, 'memory transfer instructions, addressing mode 3'),
(instmem_analyzer, imm_test, 'memory transfer instructions, multiple registers'),
(instmem_analyzer, iil_test, 'memory transfer instructions, immediate load'),
(instmsc_analyzer, imi_test, 'miscellanea instructions'),
(arm_analyzer, data_arm, 'arm data directives'),
(arm_analyzer, idat_arm, 'arm data instructions'),
(arm_analyzer, imul_arm, 'arm multiplication instructions'),
(arm_analyzer, ijmp_arm, 'arm branch instructions'),
(arm_analyzer, imem_arm, 'arm memory transfer instructions'),
(arm_analyzer, imsc_arm, 'arm miscellanea instructions')
]
| true | true |
1c4a4a032bbb9e6042445ab08cd24531a80ce7bd | 2,275 | py | Python | docs/source/conf.py | Gemicai/Gemicai | 1ce3be768979acc7251b4108a59292cba99624d1 | [
"MIT"
] | 5 | 2020-11-16T11:06:51.000Z | 2021-02-23T04:54:30.000Z | docs/source/conf.py | Gemicai/Gemicai | 1ce3be768979acc7251b4108a59292cba99624d1 | [
"MIT"
] | 1 | 2021-08-24T16:21:30.000Z | 2021-08-24T16:21:30.000Z | docs/source/conf.py | Gemicai/Gemicai | 1ce3be768979acc7251b4108a59292cba99624d1 | [
"MIT"
] | 1 | 2021-02-23T04:54:31.000Z | 2021-02-23T04:54:31.000Z | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..')))
# -- Project information -----------------------------------------------------
project = 'Gemicai'
copyright = '2020, Kevin Alberts, Niek Heinen, Mateusz Jaworski, Sieta de Jong'
author = 'Kevin Alberts, Niek Heinen, Mateusz Jaworski, Sieta de Jong'
# The full version, including alpha/beta/rc tags
release = '0.5.0'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.coverage',
'sphinx.ext.viewcode',
'sphinx.ext.intersphinx',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# -- Options for HTML output -------------------------------------------------
import sphinx_glpi_theme
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'glpi'
html_theme_path = sphinx_glpi_theme.get_html_themes_path()
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
| 37.295082 | 88 | 0.665495 |
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..')))
project = 'Gemicai'
copyright = '2020, Kevin Alberts, Niek Heinen, Mateusz Jaworski, Sieta de Jong'
author = 'Kevin Alberts, Niek Heinen, Mateusz Jaworski, Sieta de Jong'
release = '0.5.0'
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.coverage',
'sphinx.ext.viewcode',
'sphinx.ext.intersphinx',
]
templates_path = ['_templates']
exclude_patterns = []
import sphinx_glpi_theme
html_theme = 'glpi'
html_theme_path = sphinx_glpi_theme.get_html_themes_path()
html_static_path = ['_static']
| true | true |
1c4a4bffc43ac8052868c75e692429c7af2a1d60 | 3,508 | py | Python | testproject/testproject/settings.py | io-ma/django-groups-manager | 5ab1e098ac44b319b166b529e7a46c6a83e5ddac | [
"MIT"
] | 1 | 2020-08-20T00:25:26.000Z | 2020-08-20T00:25:26.000Z | testproject/testproject/settings.py | dpineiden/django-groups-manager | d02361e6f2825c174410db676ec3fb28c54e0256 | [
"MIT"
] | null | null | null | testproject/testproject/settings.py | dpineiden/django-groups-manager | d02361e6f2825c174410db676ec3fb28c54e0256 | [
"MIT"
] | null | null | null | """
Django settings for testproject project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
import os
import django
try:
import guardian
has_guardian = True
except ImportError:
has_guardian = False
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '9vg3q-kbo(p^zpom4!*o8*%tfu-14o=3++txo+sxwto)2@=qd='
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_extensions',
# Uncomment for testing templates, and after a `pip install django-bootstrap3`
# 'bootstrap3',
# App test
'groups_manager',
'testproject',
)
if has_guardian:
INSTALLED_APPS += ('guardian',)
MIDDLEWARE = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
MIDDLEWARE_CLASSES = MIDDLEWARE
# django-guardian required settings
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'guardian.backends.ObjectPermissionBackend',
)
ANONYMOUS_USER_ID = -1
ROOT_URLCONF = 'testproject.urls'
WSGI_APPLICATION = 'testproject.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
SESSION_COOKIE_NAME = "testproject"
LOGIN_URL = '/admin/login/'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
]
},
},
]
# Uncomment for testing application settings
"""
GROUPS_MANAGER = {
'AUTH_MODELS_SYNC': True,
'AUTH_MODELS_GET_OR_CREATE': False,
'GROUP_NAME_PREFIX': '',
'GROUP_NAME_SUFFIX': '',
'USER_USERNAME_PREFIX': '',
'USER_USERNAME_SUFFIX': '',
'PERMISSIONS': {
'owner': ['view', 'change', 'delete'],
'group': ['view', 'change'],
'groups_upstream': ['view'],
'groups_downstream': [],
'groups_siblings': ['view'],
},
}
"""
| 23.386667 | 82 | 0.686431 | import os
import django
try:
import guardian
has_guardian = True
except ImportError:
has_guardian = False
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
SECRET_KEY = '9vg3q-kbo(p^zpom4!*o8*%tfu-14o=3++txo+sxwto)2@=qd='
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_extensions',
# Uncomment for testing templates, and after a `pip install django-bootstrap3`
# 'bootstrap3',
# App test
'groups_manager',
'testproject',
)
if has_guardian:
INSTALLED_APPS += ('guardian',)
MIDDLEWARE = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
MIDDLEWARE_CLASSES = MIDDLEWARE
# django-guardian required settings
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'guardian.backends.ObjectPermissionBackend',
)
ANONYMOUS_USER_ID = -1
ROOT_URLCONF = 'testproject.urls'
WSGI_APPLICATION = 'testproject.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
SESSION_COOKIE_NAME = "testproject"
LOGIN_URL = '/admin/login/'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
]
},
},
]
# Uncomment for testing application settings
| true | true |
1c4a4c5663dcdc3e1fe9bb9a01798918c54fc1ba | 1,609 | py | Python | dataset/dataset_inspect.py | Lsplastic/Tensorflow_ssd | f2935079fb8d2cd2288ef5f7a415749243f34542 | [
"Apache-2.0"
] | null | null | null | dataset/dataset_inspect.py | Lsplastic/Tensorflow_ssd | f2935079fb8d2cd2288ef5f7a415749243f34542 | [
"Apache-2.0"
] | null | null | null | dataset/dataset_inspect.py | Lsplastic/Tensorflow_ssd | f2935079fb8d2cd2288ef5f7a415749243f34542 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 Changan Wang
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tensorflow as tf
def count_split_examples(split_path, file_prefix='.tfrecord'):
# Count the total number of examples in all of these shard
num_samples = 0
tfrecords_to_count = tf.gfile.Glob(os.path.join(split_path, file_prefix))
opts = tf.python_io.TFRecordOptions(tf.python_io.TFRecordCompressionType.ZLIB)
for tfrecord_file in tfrecords_to_count:
for record in tf.python_io.tf_record_iterator(tfrecord_file):#, options = opts):
num_samples += 1
return num_samples
if __name__ == '__main__':
print('train:', count_split_examples('/media/rs/7A0EE8880EE83EAF/Detections/SSD/dataset/tfrecords', 'train-?????-of-?????'))
print('val:', count_split_examples('/media/rs/7A0EE8880EE83EAF/Detections/SSD/dataset/tfrecords', 'val-?????-of-?????'))
| 44.694444 | 129 | 0.698571 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tensorflow as tf
def count_split_examples(split_path, file_prefix='.tfrecord'):
num_samples = 0
tfrecords_to_count = tf.gfile.Glob(os.path.join(split_path, file_prefix))
opts = tf.python_io.TFRecordOptions(tf.python_io.TFRecordCompressionType.ZLIB)
for tfrecord_file in tfrecords_to_count:
for record in tf.python_io.tf_record_iterator(tfrecord_file): num_samples += 1
return num_samples
if __name__ == '__main__':
print('train:', count_split_examples('/media/rs/7A0EE8880EE83EAF/Detections/SSD/dataset/tfrecords', 'train-?????-of-?????'))
print('val:', count_split_examples('/media/rs/7A0EE8880EE83EAF/Detections/SSD/dataset/tfrecords', 'val-?????-of-?????'))
| true | true |
1c4a4c7893d539bc2653916b6c930594e9f82081 | 466 | py | Python | data/scripts/templates/object/tangible/ship/attachment/booster/shared_ywing_booster_s01.py | obi-two/GameServer | 7d37024e2291a97d49522610cd8f1dbe5666afc2 | [
"MIT"
] | 20 | 2015-02-23T15:11:56.000Z | 2022-03-18T20:56:48.000Z | data/scripts/templates/object/tangible/ship/attachment/booster/shared_ywing_booster_s01.py | apathyboy/swganh | 665128efe9154611dec4cb5efc61d246dd095984 | [
"MIT"
] | null | null | null | data/scripts/templates/object/tangible/ship/attachment/booster/shared_ywing_booster_s01.py | apathyboy/swganh | 665128efe9154611dec4cb5efc61d246dd095984 | [
"MIT"
] | 20 | 2015-04-04T16:35:59.000Z | 2022-03-24T14:54:37.000Z | #### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/ship/attachment/booster/shared_ywing_booster_s01.iff"
result.attribute_template_id = 8
result.stfName("item_n","ship_attachment")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | 27.411765 | 89 | 0.736052 |
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/ship/attachment/booster/shared_ywing_booster_s01.iff"
result.attribute_template_id = 8
result.stfName("item_n","ship_attachment")
return result | true | true |
1c4a4ca689afea9e81b98745e6c04f99db6d9b09 | 148 | py | Python | app/rockband/apps.py | solattila/rock-band-api | 1521b2913b75c53310ba1b71d77d599966237483 | [
"MIT"
] | null | null | null | app/rockband/apps.py | solattila/rock-band-api | 1521b2913b75c53310ba1b71d77d599966237483 | [
"MIT"
] | null | null | null | app/rockband/apps.py | solattila/rock-band-api | 1521b2913b75c53310ba1b71d77d599966237483 | [
"MIT"
] | null | null | null | from django.apps import AppConfig
class RockbandConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'rockband'
| 21.142857 | 56 | 0.763514 | from django.apps import AppConfig
class RockbandConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'rockband'
| true | true |
1c4a4df4c0837afb763b5667c2f4dc1f6cf6ab1e | 1,129 | py | Python | python/meas_smag.py | dkkim1005/Neural_Network_Quantum_State | 7e94929c5ef65ce87f63bf20c81acaa524adca82 | [
"Unlicense"
] | null | null | null | python/meas_smag.py | dkkim1005/Neural_Network_Quantum_State | 7e94929c5ef65ce87f63bf20c81acaa524adca82 | [
"Unlicense"
] | null | null | null | python/meas_smag.py | dkkim1005/Neural_Network_Quantum_State | 7e94929c5ef65ce87f63bf20c81acaa524adca82 | [
"Unlicense"
] | 1 | 2022-01-26T05:13:38.000Z | 2022-01-26T05:13:38.000Z | #!/usr/bin/env python3
import numpy as np
from pynqs import sampler
floatType = 'float32'
symmType = 'tr'
# hyper parameter sets of rbm and MCMC sampler
kwargs = {
'nInputs' : 16,
'nHiddens' : 4,
'nChains' : 1000,
'seedNumber' : 0,
'seedDistance' : 123456789,
'init_mcmc_steps' : 300
}
# transverse-field strengthes
hfield = '-1.1'
# functor to locate a path of the file
filepath = './temp/build/RBMTrSymmCH-N%dA%dH%sV1'\
%(kwargs['nInputs'], kwargs['nHiddens'], hfield)
kwargs['path_to_load'] = filepath
# total number of measurements
nmeas = 1000
# number of Monte-Carlo steps
nms = 20
# range of the error bar (95% confidence)
Z = 2
rbm = sampler.RBM(floatType = floatType, symmType = symmType)
rbm.init(**kwargs)
mag = np.zeros([nmeas], dtype = floatType)
for i in range(nmeas):
print ('# of measurements: %d'%i, end = '\r')
rbm.do_mcmc_steps(nms)
spinStates = rbm.get_spinStates()
mag[i] = np.mean(np.abs(np.mean(spinStates, axis = 1)))
mag_mean = np.mean(mag)
mag_err = Z*np.sqrt(np.sum((mag - mag_mean)**2)/(nmeas*(nmeas-1)))
print ('<|m|> : %.5E'%mag_mean, ' +/- %.3E'%mag_err)
| 27.536585 | 66 | 0.662533 | import numpy as np
from pynqs import sampler
floatType = 'float32'
symmType = 'tr'
kwargs = {
'nInputs' : 16,
'nHiddens' : 4,
'nChains' : 1000,
'seedNumber' : 0,
'seedDistance' : 123456789,
'init_mcmc_steps' : 300
}
hfield = '-1.1'
filepath = './temp/build/RBMTrSymmCH-N%dA%dH%sV1'\
%(kwargs['nInputs'], kwargs['nHiddens'], hfield)
kwargs['path_to_load'] = filepath
nmeas = 1000
nms = 20
Z = 2
rbm = sampler.RBM(floatType = floatType, symmType = symmType)
rbm.init(**kwargs)
mag = np.zeros([nmeas], dtype = floatType)
for i in range(nmeas):
print ('# of measurements: %d'%i, end = '\r')
rbm.do_mcmc_steps(nms)
spinStates = rbm.get_spinStates()
mag[i] = np.mean(np.abs(np.mean(spinStates, axis = 1)))
mag_mean = np.mean(mag)
mag_err = Z*np.sqrt(np.sum((mag - mag_mean)**2)/(nmeas*(nmeas-1)))
print ('<|m|> : %.5E'%mag_mean, ' +/- %.3E'%mag_err)
| true | true |
1c4a4f10c49f9126358b074d166c4bcbaae00b6b | 6,641 | py | Python | pysph/sph/tests/test_linalg.py | nauaneed/pysph | 9cb9a859934939307c65a25cbf73e4ecc83fea4a | [
"BSD-3-Clause"
] | 293 | 2017-05-26T14:41:15.000Z | 2022-03-28T09:56:16.000Z | pysph/sph/tests/test_linalg.py | nauaneed/pysph | 9cb9a859934939307c65a25cbf73e4ecc83fea4a | [
"BSD-3-Clause"
] | 217 | 2017-05-29T15:48:14.000Z | 2022-03-24T16:16:55.000Z | pysph/sph/tests/test_linalg.py | nauaneed/pysph | 9cb9a859934939307c65a25cbf73e4ecc83fea4a | [
"BSD-3-Clause"
] | 126 | 2017-05-25T19:17:32.000Z | 2022-03-25T11:23:24.000Z | from pysph.sph.wc.linalg import (
augmented_matrix, gj_solve, mat_mult, mat_vec_mult
)
import numpy as np
import unittest
def gj_solve_helper(a, b, n):
m = np.zeros((n, n+1)).ravel().tolist()
augmented_matrix(a, b, n, 1, n, m)
result = [0.0]*n
is_singular = gj_solve(m, n, 1, result)
return is_singular, result
class TestLinalg(unittest.TestCase):
def _to_array(self, x, shape=None):
x = np.asarray(x)
if shape:
x.shape = shape
return x
def test_augmented_matrix(self):
# Given
a = np.random.random((3, 3))
b = np.random.random((3, 2))
res = np.zeros((3, 5)).ravel().tolist()
expect = np.zeros((3, 5))
expect[:, :3] = a
expect[:, 3:] = b
# When
augmented_matrix(a.ravel(), b.ravel(), 3, 2, 3, res)
res = self._to_array(res, (3, 5))
# Then
np.testing.assert_array_almost_equal(res, expect)
def test_augmented_matrix_with_lower_dimension(self):
# Given
a = np.random.random((3, 3))
b = np.random.random((3, 2))
res = np.zeros((3, 5)).ravel().tolist()
expect = np.zeros((2, 4))
expect[:, :2] = a[:2, :2]
expect[:, 2:] = b[:2, :]
expect.resize((3, 5), refcheck=False)
# When
augmented_matrix(a.ravel(), b.ravel(), 2, 2, 3, res)
res = self._to_array(res, (3, 5))
# Then
np.testing.assert_array_almost_equal(res, expect)
def test_augmented_matrix_with_gjsolve_with_lower_dimension(self):
# Given
nmax = 3
mat = np.array([[7., 4., 2.], [8., 9., 4.], [1., 4., 10.]])
b = np.array([5., 4., 2.])
expect = np.linalg.solve(mat[:2, :2], b[:2])
augmat = np.zeros((3, 4)).ravel().tolist()
res = np.zeros(2).ravel().tolist()
# When
augmented_matrix(mat.ravel(), b.ravel(), 2, 1, nmax, augmat)
gj_solve(augmat, 2, 1, res)
# Then
np.testing.assert_array_almost_equal(res, expect)
def test_general_matrix(self):
# Test Gauss Jordan solve.
"""
This is a general matrix which needs partial pivoting to be
solved.
References
----------
http://web.mit.edu/10.001/Web/Course_Notes/GaussElimPivoting.html
"""
n = 4
mat = [[0.02, 0.01, 0., 0.], [1., 2., 1., 0.], [0., 1., 2., 1.],
[0., 0., 100., 200.]]
b = [0.02, 1., 4., 800.]
sing, result = gj_solve_helper(np.ravel(mat), b, n)
mat = np.array(mat)
new_b = np.dot(mat, np.transpose(np.array(result)))
new_b = np.ravel(np.array(new_b))
assert np.allclose(new_b, np.array(b))
self.assertAlmostEqual(sing, 0.0)
def test_band_matrix(self):
n = 3
mat = [[1., -2., 0.], [1., -1., 3.], [2., 5., 0.]]
b = [-3., 1., 0.5]
sing, result = gj_solve_helper(np.ravel(mat), b, n)
mat = np.array(mat)
new_b = np.dot(mat, np.transpose(np.array(result)))
new_b = np.ravel(np.array(new_b))
assert np.allclose(new_b, np.array(b))
self.assertAlmostEqual(sing, 0.0)
def test_dense_matrix(self):
n = 3
mat = [[0.96, 4.6, -3.7], [2.7, 4.3, -0.67], [0.9, 0., -5.]]
b = [2.4, 3.6, -5.8]
sing, result = gj_solve_helper(np.ravel(mat), b, n)
mat = np.array(mat)
new_b = np.dot(mat, np.transpose(np.array(result)))
new_b = np.ravel(np.array(new_b))
assert np.allclose(new_b, np.array(b))
self.assertAlmostEqual(sing, 0.0)
def test_tridiagonal_matrix(self):
n = 4
mat = [[-2., 1., 0., 0.], [1., -2., 1., 0.], [0., 1., -2., 0.],
[0., 0., 1., -2.]]
b = [-1., 0., 0., -5.]
sing, result = gj_solve_helper(np.ravel(mat), b, n)
mat = np.array(mat)
new_b = np.dot(mat, np.transpose(np.array(result)))
new_b = np.ravel(np.array(new_b))
assert np.allclose(new_b, np.array(b))
self.assertAlmostEqual(sing, 0.0)
def test_symmetric_matrix(self):
n = 3
mat = [[0.96, 4.6, -3.7], [4.6, 4.3, -0.67], [-3.7, -0.67, -5.]]
b = [2.4, 3.6, -5.8]
sing, result = gj_solve_helper(np.ravel(mat), b, n)
mat = np.array(mat)
new_b = np.dot(mat, np.transpose(np.array(result)))
new_b = np.ravel(np.array(new_b))
assert np.allclose(new_b, np.array(b))
self.assertAlmostEqual(sing, 0.0)
def test_symmetric_positivedefinite_Matrix(self):
n = 4
mat = [[1., 1., 4., -1.], [1., 5., 0., -1.], [4., 0., 21., -4.],
[-1., -1., -4., 10.]]
b = [2.4, 3.6, -5.8, 0.5]
sing, result = gj_solve_helper(np.ravel(mat), b, n)
mat = np.array(mat)
new_b = np.dot(mat, np.transpose(np.array(result)))
new_b = np.ravel(np.array(new_b))
assert np.allclose(new_b, np.array(b))
self.assertAlmostEqual(sing, 0.0)
def test_inverse(self):
# Given
n = 3
mat = [[1.0, 2.0, 2.5], [2.5, 1.0, 0.0], [0.0, 0.0, 1.0]]
b = np.identity(3).ravel().tolist()
A = np.zeros((3, 6)).ravel().tolist()
augmented_matrix(np.ravel(mat), b, 3, 3, 3, A)
result = np.zeros((3, 3)).ravel().tolist()
# When
sing = gj_solve(A, n, n, result)
# Then
mat = np.asarray(mat)
res = np.asarray(result)
res.shape = 3, 3
np.testing.assert_allclose(res, np.linalg.inv(mat))
self.assertAlmostEqual(sing, 0.0)
def test_matmult(self):
# Given
n = 3
a = np.random.random((3, 3))
b = np.random.random((3, 3))
result = [0.0]*9
# When
mat_mult(a.ravel(), b.ravel(), n, result)
# Then.
expect = np.dot(a, b)
result = np.asarray(result)
result.shape = 3, 3
np.testing.assert_allclose(result, expect)
def test_mat_vec_mult(self):
# Given
n = 3
a = np.random.random((3, 3))
b = np.random.random((3,))
result = [0.0]*3
# When
mat_vec_mult(a.ravel(), b, n, result)
# Then.
expect = np.dot(a, b)
result = np.asarray(result)
np.testing.assert_allclose(result, expect)
def test_singular_matrix(self):
# Given
n = 3
mat = [[1., 1., 0.], [1., 1., 0.], [1., 1., 1.]]
b = [1.0, 1.0, 1.0]
#
sing, result = gj_solve_helper(np.ravel(mat), b, n)
self.assertAlmostEqual(sing, 1.0)
if __name__ == '__main__':
unittest.main()
| 32.237864 | 73 | 0.512573 | from pysph.sph.wc.linalg import (
augmented_matrix, gj_solve, mat_mult, mat_vec_mult
)
import numpy as np
import unittest
def gj_solve_helper(a, b, n):
m = np.zeros((n, n+1)).ravel().tolist()
augmented_matrix(a, b, n, 1, n, m)
result = [0.0]*n
is_singular = gj_solve(m, n, 1, result)
return is_singular, result
class TestLinalg(unittest.TestCase):
def _to_array(self, x, shape=None):
x = np.asarray(x)
if shape:
x.shape = shape
return x
def test_augmented_matrix(self):
a = np.random.random((3, 3))
b = np.random.random((3, 2))
res = np.zeros((3, 5)).ravel().tolist()
expect = np.zeros((3, 5))
expect[:, :3] = a
expect[:, 3:] = b
augmented_matrix(a.ravel(), b.ravel(), 3, 2, 3, res)
res = self._to_array(res, (3, 5))
np.testing.assert_array_almost_equal(res, expect)
def test_augmented_matrix_with_lower_dimension(self):
a = np.random.random((3, 3))
b = np.random.random((3, 2))
res = np.zeros((3, 5)).ravel().tolist()
expect = np.zeros((2, 4))
expect[:, :2] = a[:2, :2]
expect[:, 2:] = b[:2, :]
expect.resize((3, 5), refcheck=False)
augmented_matrix(a.ravel(), b.ravel(), 2, 2, 3, res)
res = self._to_array(res, (3, 5))
np.testing.assert_array_almost_equal(res, expect)
def test_augmented_matrix_with_gjsolve_with_lower_dimension(self):
nmax = 3
mat = np.array([[7., 4., 2.], [8., 9., 4.], [1., 4., 10.]])
b = np.array([5., 4., 2.])
expect = np.linalg.solve(mat[:2, :2], b[:2])
augmat = np.zeros((3, 4)).ravel().tolist()
res = np.zeros(2).ravel().tolist()
augmented_matrix(mat.ravel(), b.ravel(), 2, 1, nmax, augmat)
gj_solve(augmat, 2, 1, res)
np.testing.assert_array_almost_equal(res, expect)
def test_general_matrix(self):
n = 4
mat = [[0.02, 0.01, 0., 0.], [1., 2., 1., 0.], [0., 1., 2., 1.],
[0., 0., 100., 200.]]
b = [0.02, 1., 4., 800.]
sing, result = gj_solve_helper(np.ravel(mat), b, n)
mat = np.array(mat)
new_b = np.dot(mat, np.transpose(np.array(result)))
new_b = np.ravel(np.array(new_b))
assert np.allclose(new_b, np.array(b))
self.assertAlmostEqual(sing, 0.0)
def test_band_matrix(self):
n = 3
mat = [[1., -2., 0.], [1., -1., 3.], [2., 5., 0.]]
b = [-3., 1., 0.5]
sing, result = gj_solve_helper(np.ravel(mat), b, n)
mat = np.array(mat)
new_b = np.dot(mat, np.transpose(np.array(result)))
new_b = np.ravel(np.array(new_b))
assert np.allclose(new_b, np.array(b))
self.assertAlmostEqual(sing, 0.0)
def test_dense_matrix(self):
n = 3
mat = [[0.96, 4.6, -3.7], [2.7, 4.3, -0.67], [0.9, 0., -5.]]
b = [2.4, 3.6, -5.8]
sing, result = gj_solve_helper(np.ravel(mat), b, n)
mat = np.array(mat)
new_b = np.dot(mat, np.transpose(np.array(result)))
new_b = np.ravel(np.array(new_b))
assert np.allclose(new_b, np.array(b))
self.assertAlmostEqual(sing, 0.0)
def test_tridiagonal_matrix(self):
n = 4
mat = [[-2., 1., 0., 0.], [1., -2., 1., 0.], [0., 1., -2., 0.],
[0., 0., 1., -2.]]
b = [-1., 0., 0., -5.]
sing, result = gj_solve_helper(np.ravel(mat), b, n)
mat = np.array(mat)
new_b = np.dot(mat, np.transpose(np.array(result)))
new_b = np.ravel(np.array(new_b))
assert np.allclose(new_b, np.array(b))
self.assertAlmostEqual(sing, 0.0)
def test_symmetric_matrix(self):
n = 3
mat = [[0.96, 4.6, -3.7], [4.6, 4.3, -0.67], [-3.7, -0.67, -5.]]
b = [2.4, 3.6, -5.8]
sing, result = gj_solve_helper(np.ravel(mat), b, n)
mat = np.array(mat)
new_b = np.dot(mat, np.transpose(np.array(result)))
new_b = np.ravel(np.array(new_b))
assert np.allclose(new_b, np.array(b))
self.assertAlmostEqual(sing, 0.0)
def test_symmetric_positivedefinite_Matrix(self):
n = 4
mat = [[1., 1., 4., -1.], [1., 5., 0., -1.], [4., 0., 21., -4.],
[-1., -1., -4., 10.]]
b = [2.4, 3.6, -5.8, 0.5]
sing, result = gj_solve_helper(np.ravel(mat), b, n)
mat = np.array(mat)
new_b = np.dot(mat, np.transpose(np.array(result)))
new_b = np.ravel(np.array(new_b))
assert np.allclose(new_b, np.array(b))
self.assertAlmostEqual(sing, 0.0)
def test_inverse(self):
n = 3
mat = [[1.0, 2.0, 2.5], [2.5, 1.0, 0.0], [0.0, 0.0, 1.0]]
b = np.identity(3).ravel().tolist()
A = np.zeros((3, 6)).ravel().tolist()
augmented_matrix(np.ravel(mat), b, 3, 3, 3, A)
result = np.zeros((3, 3)).ravel().tolist()
sing = gj_solve(A, n, n, result)
mat = np.asarray(mat)
res = np.asarray(result)
res.shape = 3, 3
np.testing.assert_allclose(res, np.linalg.inv(mat))
self.assertAlmostEqual(sing, 0.0)
def test_matmult(self):
n = 3
a = np.random.random((3, 3))
b = np.random.random((3, 3))
result = [0.0]*9
mat_mult(a.ravel(), b.ravel(), n, result)
expect = np.dot(a, b)
result = np.asarray(result)
result.shape = 3, 3
np.testing.assert_allclose(result, expect)
def test_mat_vec_mult(self):
n = 3
a = np.random.random((3, 3))
b = np.random.random((3,))
result = [0.0]*3
mat_vec_mult(a.ravel(), b, n, result)
expect = np.dot(a, b)
result = np.asarray(result)
np.testing.assert_allclose(result, expect)
def test_singular_matrix(self):
n = 3
mat = [[1., 1., 0.], [1., 1., 0.], [1., 1., 1.]]
b = [1.0, 1.0, 1.0]
sing, result = gj_solve_helper(np.ravel(mat), b, n)
self.assertAlmostEqual(sing, 1.0)
if __name__ == '__main__':
unittest.main()
| true | true |
1c4a4fe749a6730b16f54520019eb9f262e581a6 | 13,887 | py | Python | cvxpy/reductions/solvers/conic_solvers/conic_solver.py | QiuWJX/cvxpy | fd1c225b0cdf541618e292cae1a4c7ea25ddc934 | [
"ECL-2.0",
"Apache-2.0"
] | 556 | 2021-04-20T03:19:49.000Z | 2022-03-30T12:31:38.000Z | cvxpy/reductions/solvers/conic_solvers/conic_solver.py | QiuWJX/cvxpy | fd1c225b0cdf541618e292cae1a4c7ea25ddc934 | [
"ECL-2.0",
"Apache-2.0"
] | 358 | 2021-04-20T08:17:49.000Z | 2022-03-31T21:16:28.000Z | cvxpy/reductions/solvers/conic_solvers/conic_solver.py | phschiele/cvxpy | a43aed7447b87f6d0fbc6f71ae5c7b84183f3369 | [
"ECL-2.0",
"Apache-2.0"
] | 131 | 2021-04-21T09:00:12.000Z | 2022-03-29T04:43:51.000Z | """
Copyright 2017 Robin Verschueren, 2017 Akshay Agrawal
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from typing import Tuple
import numpy as np
import scipy.sparse as sp
import cvxpy.settings as s
from cvxpy.constraints import PSD, SOC, ExpCone, NonNeg, PowCone3D, Zero
from cvxpy.reductions.cvx_attr2constr import convex_attributes
from cvxpy.reductions.dcp2cone.cone_matrix_stuffing import ParamConeProg
from cvxpy.reductions.solution import Solution, failure_solution
from cvxpy.reductions.solvers import utilities
from cvxpy.reductions.solvers.solver import Solver
# NOTE(akshayka): Small changes to this file can lead to drastic
# performance regressions. If you are making a change to this file,
# make sure to run cvxpy/tests/test_benchmarks.py to ensure that you have
# not introduced a regression.
class LinearOperator:
"""A wrapper for linear operators."""
def __init__(self, linear_op, shape: Tuple[int, ...]) -> None:
if sp.issparse(linear_op):
self._matmul = lambda X: linear_op @ X
else:
self._matmul = linear_op
self.shape = shape
def __call__(self, X):
return self._matmul(X)
def as_linear_operator(linear_op):
if isinstance(linear_op, LinearOperator):
return linear_op
elif sp.issparse(linear_op):
return LinearOperator(linear_op, linear_op.shape)
def as_block_diag_linear_operator(matrices) -> LinearOperator:
"""Block diag of SciPy sparse matrices or linear operators."""
linear_operators = [as_linear_operator(op) for op in matrices]
nrows = [op.shape[0] for op in linear_operators]
ncols = [op.shape[1] for op in linear_operators]
m, n = sum(nrows), sum(ncols)
col_indices = np.append(0, np.cumsum(ncols))
def matmul(X):
outputs = []
for i, op in enumerate(linear_operators):
Xi = X[col_indices[i]:col_indices[i + 1]]
outputs.append(op(Xi))
return sp.vstack(outputs)
return LinearOperator(matmul, (m, n))
# Utility method for formatting a ConeDims instance into a dictionary
# that can be supplied to solvers.
def dims_to_solver_dict(cone_dims):
cones = {
'f': cone_dims.zero,
'l': cone_dims.nonneg,
'q': cone_dims.soc,
'ep': cone_dims.exp,
's': cone_dims.psd,
'p': cone_dims.p3d
}
return cones
class ConicSolver(Solver):
"""Conic solver class with reduction semantics
"""
# The key that maps to ConeDims in the data returned by apply().
DIMS = "dims"
# Every conic solver must support Zero and NonNeg constraints.
SUPPORTED_CONSTRAINTS = [Zero, NonNeg]
# Some solvers cannot solve problems that do not have constraints.
# For such solvers, REQUIRES_CONSTR should be set to True.
REQUIRES_CONSTR = False
# If a solver supports exponential cones, it must specify the corresponding order
# The cvxpy standard for the exponential cone is:
# K_e = closure{(x,y,z) | z >= y * exp(x/y), y>0}.
# Whenever a solver uses this convention, EXP_CONE_ORDER should be [0, 1, 2].
EXP_CONE_ORDER = None
def accepts(self, problem):
return (isinstance(problem, ParamConeProg)
and (self.MIP_CAPABLE or not problem.is_mixed_integer())
and not convex_attributes([problem.x])
and (len(problem.constraints) > 0 or not self.REQUIRES_CONSTR)
and all(type(c) in self.SUPPORTED_CONSTRAINTS for c in
problem.constraints))
@staticmethod
def get_spacing_matrix(shape: Tuple[int, ...], spacing, streak, num_blocks, offset):
"""Returns a sparse matrix that spaces out an expression.
Parameters
----------
shape : tuple
(rows in matrix, columns in matrix)
spacing : int
The number of rows between the start of each non-zero block.
streak: int
The number of elements in each block.
num_blocks : int
The number of non-zero blocks.
offset : int
The number of zero rows at the beginning of the matrix.
Returns
-------
SciPy CSC matrix
A sparse matrix
"""
num_values = num_blocks * streak
val_arr = np.ones(num_values, dtype=np.float64)
streak_plus_spacing = streak + spacing
row_arr = np.arange(0, num_blocks * streak_plus_spacing).reshape(
num_blocks, streak_plus_spacing)[:, :streak].flatten() + offset
col_arr = np.arange(num_values)
return sp.csc_matrix((val_arr, (row_arr, col_arr)), shape)
@staticmethod
def psd_format_mat(constr):
"""Return a matrix to multiply by PSD constraint coefficients.
"""
# Default is identity.
return sp.eye(constr.size, format='csc')
def format_constraints(self, problem, exp_cone_order):
"""
Returns a ParamConeProg whose problem data tensors will yield the
coefficient "A" and offset "b" for the constraint in the following
formats:
Linear equations: (A, b) such that A * x + b == 0,
Linear inequalities: (A, b) such that A * x + b >= 0,
Second order cone: (A, b) such that A * x + b in SOC,
Exponential cone: (A, b) such that A * x + b in EXP,
Semidefinite cone: (A, b) such that A * x + b in PSD,
The CVXPY standard for the exponential cone is:
K_e = closure{(x,y,z) | z >= y * exp(x/y), y>0}.
Whenever a solver uses this convention, EXP_CONE_ORDER should be
[0, 1, 2].
The CVXPY standard for the second order cone is:
SOC(n) = { x : x[0] >= norm(x[1:n], 2) }.
All currently supported solvers use this convention.
Args:
problem : ParamConeProg
The problem that is the provenance of the constraint.
exp_cone_order: list
A list indicating how the exponential cone arguments are ordered.
Returns:
ParamConeProg with structured A.
"""
# Create a matrix to reshape constraints, then replicate for each
# variable entry.
restruct_mat = [] # Form a block diagonal matrix.
for constr in problem.constraints:
total_height = sum([arg.size for arg in constr.args])
if type(constr) == Zero:
restruct_mat.append(-sp.eye(constr.size, format='csr'))
elif type(constr) == NonNeg:
restruct_mat.append(sp.eye(constr.size, format='csr'))
elif type(constr) == SOC:
# Group each t row with appropriate X rows.
assert constr.axis == 0, 'SOC must be lowered to axis == 0'
# Interleave the rows of coeffs[0] and coeffs[1]:
# coeffs[0][0, :]
# coeffs[1][0:gap-1, :]
# coeffs[0][1, :]
# coeffs[1][gap-1:2*(gap-1), :]
t_spacer = ConicSolver.get_spacing_matrix(
shape=(total_height, constr.args[0].size),
spacing=constr.args[1].shape[0],
streak=1,
num_blocks=constr.args[0].size,
offset=0,
)
X_spacer = ConicSolver.get_spacing_matrix(
shape=(total_height, constr.args[1].size),
spacing=1,
streak=constr.args[1].shape[0],
num_blocks=constr.args[0].size,
offset=1,
)
restruct_mat.append(sp.hstack([t_spacer, X_spacer]))
elif type(constr) == ExpCone:
arg_mats = []
for i, arg in enumerate(constr.args):
space_mat = ConicSolver.get_spacing_matrix(
shape=(total_height, arg.size),
spacing=len(exp_cone_order) - 1,
streak=1,
num_blocks=arg.size,
offset=exp_cone_order[i],
)
arg_mats.append(space_mat)
restruct_mat.append(sp.hstack(arg_mats))
elif type(constr) == PowCone3D:
arg_mats = []
for i, arg in enumerate(constr.args):
space_mat = ConicSolver.get_spacing_matrix(
shape=(total_height, arg.size), spacing=2,
streak=1, num_blocks=arg.size, offset=i,
)
arg_mats.append(space_mat)
restruct_mat.append(sp.hstack(arg_mats))
elif type(constr) == PSD:
restruct_mat.append(self.psd_format_mat(constr))
else:
raise ValueError("Unsupported constraint type.")
# Form new ParamConeProg
if restruct_mat:
# TODO(akshayka): profile to see whether using linear operators
# or bmat is faster
restruct_mat = as_block_diag_linear_operator(restruct_mat)
# this is equivalent to but _much_ faster than:
# restruct_mat_rep = sp.block_diag([restruct_mat]*(problem.x.size + 1))
# restruct_A = restruct_mat_rep * problem.A
unspecified, remainder = divmod(problem.A.shape[0] *
problem.A.shape[1],
restruct_mat.shape[1])
reshaped_A = problem.A.reshape(restruct_mat.shape[1],
unspecified, order='F').tocsr()
restructured_A = restruct_mat(reshaped_A).tocoo()
# Because of a bug in scipy versions < 1.20, `reshape`
# can overflow if indices are int32s.
restructured_A.row = restructured_A.row.astype(np.int64)
restructured_A.col = restructured_A.col.astype(np.int64)
restructured_A = restructured_A.reshape(
np.int64(restruct_mat.shape[0]) * (np.int64(problem.x.size) + 1),
problem.A.shape[1], order='F')
else:
restructured_A = problem.A
new_param_cone_prog = ParamConeProg(problem.c,
problem.x,
restructured_A,
problem.variables,
problem.var_id_to_col,
problem.constraints,
problem.parameters,
problem.param_id_to_col,
formatted=True)
return new_param_cone_prog
def invert(self, solution, inverse_data):
"""Returns the solution to the original problem given the inverse_data.
"""
status = solution['status']
if status in s.SOLUTION_PRESENT:
opt_val = solution['value']
primal_vars = {inverse_data[self.VAR_ID]: solution['primal']}
eq_dual = utilities.get_dual_values(
solution['eq_dual'],
utilities.extract_dual_value,
inverse_data[Solver.EQ_CONSTR])
leq_dual = utilities.get_dual_values(
solution['ineq_dual'],
utilities.extract_dual_value,
inverse_data[Solver.NEQ_CONSTR])
eq_dual.update(leq_dual)
dual_vars = eq_dual
return Solution(status, opt_val, primal_vars, dual_vars, {})
else:
return failure_solution(status)
def _prepare_data_and_inv_data(self, problem):
data = {}
inv_data = {self.VAR_ID: problem.x.id}
# Format constraints
#
# By default cvxpy follows the SCS convention, which requires
# constraints to be specified in the following order:
# 1. zero cone
# 2. non-negative orthant
# 3. soc
# 4. psd
# 5. exponential
# 6. three-dimensional power cones
if not problem.formatted:
problem = self.format_constraints(problem, self.EXP_CONE_ORDER)
data[s.PARAM_PROB] = problem
data[self.DIMS] = problem.cone_dims
inv_data[self.DIMS] = problem.cone_dims
constr_map = problem.constr_map
inv_data[self.EQ_CONSTR] = constr_map[Zero]
inv_data[self.NEQ_CONSTR] = constr_map[NonNeg] + constr_map[SOC] + \
constr_map[PSD] + constr_map[ExpCone] + constr_map[PowCone3D]
return problem, data, inv_data
def apply(self, problem):
"""Returns a new problem and data for inverting the new solution.
Returns
-------
tuple
(dict of arguments needed for the solver, inverse data)
"""
# This is a reference implementation following SCS conventions
# Implementations for other solvers may amend or override the implementation entirely
problem, data, inv_data = self._prepare_data_and_inv_data(problem)
# Apply parameter values.
# Obtain A, b such that Ax + s = b, s \in cones.
c, d, A, b = problem.apply_parameters()
data[s.C] = c
inv_data[s.OFFSET] = d
data[s.A] = -A
data[s.B] = b
return data, inv_data
| 40.605263 | 93 | 0.585728 | from typing import Tuple
import numpy as np
import scipy.sparse as sp
import cvxpy.settings as s
from cvxpy.constraints import PSD, SOC, ExpCone, NonNeg, PowCone3D, Zero
from cvxpy.reductions.cvx_attr2constr import convex_attributes
from cvxpy.reductions.dcp2cone.cone_matrix_stuffing import ParamConeProg
from cvxpy.reductions.solution import Solution, failure_solution
from cvxpy.reductions.solvers import utilities
from cvxpy.reductions.solvers.solver import Solver
class LinearOperator:
def __init__(self, linear_op, shape: Tuple[int, ...]) -> None:
if sp.issparse(linear_op):
self._matmul = lambda X: linear_op @ X
else:
self._matmul = linear_op
self.shape = shape
def __call__(self, X):
return self._matmul(X)
def as_linear_operator(linear_op):
if isinstance(linear_op, LinearOperator):
return linear_op
elif sp.issparse(linear_op):
return LinearOperator(linear_op, linear_op.shape)
def as_block_diag_linear_operator(matrices) -> LinearOperator:
linear_operators = [as_linear_operator(op) for op in matrices]
nrows = [op.shape[0] for op in linear_operators]
ncols = [op.shape[1] for op in linear_operators]
m, n = sum(nrows), sum(ncols)
col_indices = np.append(0, np.cumsum(ncols))
def matmul(X):
outputs = []
for i, op in enumerate(linear_operators):
Xi = X[col_indices[i]:col_indices[i + 1]]
outputs.append(op(Xi))
return sp.vstack(outputs)
return LinearOperator(matmul, (m, n))
def dims_to_solver_dict(cone_dims):
cones = {
'f': cone_dims.zero,
'l': cone_dims.nonneg,
'q': cone_dims.soc,
'ep': cone_dims.exp,
's': cone_dims.psd,
'p': cone_dims.p3d
}
return cones
class ConicSolver(Solver):
DIMS = "dims"
SUPPORTED_CONSTRAINTS = [Zero, NonNeg]
REQUIRES_CONSTR = False
EXP_CONE_ORDER = None
def accepts(self, problem):
return (isinstance(problem, ParamConeProg)
and (self.MIP_CAPABLE or not problem.is_mixed_integer())
and not convex_attributes([problem.x])
and (len(problem.constraints) > 0 or not self.REQUIRES_CONSTR)
and all(type(c) in self.SUPPORTED_CONSTRAINTS for c in
problem.constraints))
@staticmethod
def get_spacing_matrix(shape: Tuple[int, ...], spacing, streak, num_blocks, offset):
num_values = num_blocks * streak
val_arr = np.ones(num_values, dtype=np.float64)
streak_plus_spacing = streak + spacing
row_arr = np.arange(0, num_blocks * streak_plus_spacing).reshape(
num_blocks, streak_plus_spacing)[:, :streak].flatten() + offset
col_arr = np.arange(num_values)
return sp.csc_matrix((val_arr, (row_arr, col_arr)), shape)
@staticmethod
def psd_format_mat(constr):
return sp.eye(constr.size, format='csc')
def format_constraints(self, problem, exp_cone_order):
restruct_mat = [] for constr in problem.constraints:
total_height = sum([arg.size for arg in constr.args])
if type(constr) == Zero:
restruct_mat.append(-sp.eye(constr.size, format='csr'))
elif type(constr) == NonNeg:
restruct_mat.append(sp.eye(constr.size, format='csr'))
elif type(constr) == SOC:
assert constr.axis == 0, 'SOC must be lowered to axis == 0'
t_spacer = ConicSolver.get_spacing_matrix(
shape=(total_height, constr.args[0].size),
spacing=constr.args[1].shape[0],
streak=1,
num_blocks=constr.args[0].size,
offset=0,
)
X_spacer = ConicSolver.get_spacing_matrix(
shape=(total_height, constr.args[1].size),
spacing=1,
streak=constr.args[1].shape[0],
num_blocks=constr.args[0].size,
offset=1,
)
restruct_mat.append(sp.hstack([t_spacer, X_spacer]))
elif type(constr) == ExpCone:
arg_mats = []
for i, arg in enumerate(constr.args):
space_mat = ConicSolver.get_spacing_matrix(
shape=(total_height, arg.size),
spacing=len(exp_cone_order) - 1,
streak=1,
num_blocks=arg.size,
offset=exp_cone_order[i],
)
arg_mats.append(space_mat)
restruct_mat.append(sp.hstack(arg_mats))
elif type(constr) == PowCone3D:
arg_mats = []
for i, arg in enumerate(constr.args):
space_mat = ConicSolver.get_spacing_matrix(
shape=(total_height, arg.size), spacing=2,
streak=1, num_blocks=arg.size, offset=i,
)
arg_mats.append(space_mat)
restruct_mat.append(sp.hstack(arg_mats))
elif type(constr) == PSD:
restruct_mat.append(self.psd_format_mat(constr))
else:
raise ValueError("Unsupported constraint type.")
if restruct_mat:
restruct_mat = as_block_diag_linear_operator(restruct_mat)
unspecified, remainder = divmod(problem.A.shape[0] *
problem.A.shape[1],
restruct_mat.shape[1])
reshaped_A = problem.A.reshape(restruct_mat.shape[1],
unspecified, order='F').tocsr()
restructured_A = restruct_mat(reshaped_A).tocoo()
restructured_A.row = restructured_A.row.astype(np.int64)
restructured_A.col = restructured_A.col.astype(np.int64)
restructured_A = restructured_A.reshape(
np.int64(restruct_mat.shape[0]) * (np.int64(problem.x.size) + 1),
problem.A.shape[1], order='F')
else:
restructured_A = problem.A
new_param_cone_prog = ParamConeProg(problem.c,
problem.x,
restructured_A,
problem.variables,
problem.var_id_to_col,
problem.constraints,
problem.parameters,
problem.param_id_to_col,
formatted=True)
return new_param_cone_prog
def invert(self, solution, inverse_data):
status = solution['status']
if status in s.SOLUTION_PRESENT:
opt_val = solution['value']
primal_vars = {inverse_data[self.VAR_ID]: solution['primal']}
eq_dual = utilities.get_dual_values(
solution['eq_dual'],
utilities.extract_dual_value,
inverse_data[Solver.EQ_CONSTR])
leq_dual = utilities.get_dual_values(
solution['ineq_dual'],
utilities.extract_dual_value,
inverse_data[Solver.NEQ_CONSTR])
eq_dual.update(leq_dual)
dual_vars = eq_dual
return Solution(status, opt_val, primal_vars, dual_vars, {})
else:
return failure_solution(status)
def _prepare_data_and_inv_data(self, problem):
data = {}
inv_data = {self.VAR_ID: problem.x.id}
if not problem.formatted:
problem = self.format_constraints(problem, self.EXP_CONE_ORDER)
data[s.PARAM_PROB] = problem
data[self.DIMS] = problem.cone_dims
inv_data[self.DIMS] = problem.cone_dims
constr_map = problem.constr_map
inv_data[self.EQ_CONSTR] = constr_map[Zero]
inv_data[self.NEQ_CONSTR] = constr_map[NonNeg] + constr_map[SOC] + \
constr_map[PSD] + constr_map[ExpCone] + constr_map[PowCone3D]
return problem, data, inv_data
def apply(self, problem):
problem, data, inv_data = self._prepare_data_and_inv_data(problem)
c, d, A, b = problem.apply_parameters()
data[s.C] = c
inv_data[s.OFFSET] = d
data[s.A] = -A
data[s.B] = b
return data, inv_data
| true | true |
1c4a4ffc7df2bbba0362421473040901614f36f3 | 4,959 | py | Python | projects/RAMADDA_publish/sphinx/source/conf.py | Unidata/drilsdown | 55aca7168fb390f31c36729605401564e9b82c56 | [
"MIT"
] | 3 | 2018-05-25T00:19:12.000Z | 2021-01-08T15:54:36.000Z | projects/RAMADDA_publish/sphinx/source/conf.py | suvarchal/drilsdown | e82f58396f640fef847353caf1bd4b2bf016c7a6 | [
"MIT"
] | 11 | 2017-10-31T20:15:24.000Z | 2019-12-16T21:01:55.000Z | projects/RAMADDA_publish/sphinx/source/conf.py | suvarchal/drilsdown | e82f58396f640fef847353caf1bd4b2bf016c7a6 | [
"MIT"
] | 10 | 2018-02-08T22:23:28.000Z | 2019-09-29T23:25:19.000Z | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'RAMADDA Publish'
copyright = ''
author = 'Suvarchal'
# The short X.Y version
version = ''
# The full version, including alpha/beta/rc tags
release = '1.3'
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.githubpages',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
#html_theme = 'alabaster'
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'RAMADDAPublishdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'RAMADDAPublish.tex', 'RAMADDA Publish Documentation',
'Suvarchal', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'ramaddapublish', 'RAMADDA Publish Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'RAMADDAPublish', 'RAMADDA Publish Documentation',
author, 'RAMADDAPublish', 'One line description of project.',
'Miscellaneous'),
]
# -- Extension configuration -------------------------------------------------
| 30.423313 | 79 | 0.654971 |
project = 'RAMADDA Publish'
copyright = ''
author = 'Suvarchal'
version = ''
release = '1.3'
extensions = [
'sphinx.ext.githubpages',
]
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
language = None
exclude_patterns = []
pygments_style = 'sphinx'
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
html_static_path = ['_static']
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'RAMADDAPublishdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'RAMADDAPublish.tex', 'RAMADDA Publish Documentation',
'Suvarchal', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'ramaddapublish', 'RAMADDA Publish Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'RAMADDAPublish', 'RAMADDA Publish Documentation',
author, 'RAMADDAPublish', 'One line description of project.',
'Miscellaneous'),
]
# -- Extension configuration -------------------------------------------------
| true | true |
1c4a51c816367f7be461f80db74b01f5bb2fc407 | 124 | py | Python | model/group.py | Den21rus/barancev_training | 892cd38ffde0954278ea2cebe72379b9db55a29c | [
"Apache-2.0"
] | null | null | null | model/group.py | Den21rus/barancev_training | 892cd38ffde0954278ea2cebe72379b9db55a29c | [
"Apache-2.0"
] | null | null | null | model/group.py | Den21rus/barancev_training | 892cd38ffde0954278ea2cebe72379b9db55a29c | [
"Apache-2.0"
] | null | null | null |
class Group:
def __init__(self, username, password):
self.username = username
self.password = password | 20.666667 | 43 | 0.653226 |
class Group:
def __init__(self, username, password):
self.username = username
self.password = password | true | true |
1c4a522a10fa8856197c75ae296fd1e45edb4dc0 | 5,150 | py | Python | official/vision/detection/executor/detection_executor.py | Silas-Asamoah/models | 833e6939acb42f695b0ae3765f98fe494f06115c | [
"Apache-2.0"
] | 2 | 2019-11-30T03:43:50.000Z | 2019-11-30T03:43:55.000Z | official/vision/detection/executor/detection_executor.py | utpal0401/models | 426b2c6e894c22ffb17f32581305ea87c3b8b377 | [
"Apache-2.0"
] | 1 | 2021-03-31T21:30:38.000Z | 2021-03-31T21:30:38.000Z | official/vision/detection/executor/detection_executor.py | utpal0401/models | 426b2c6e894c22ffb17f32581305ea87c3b8b377 | [
"Apache-2.0"
] | 2 | 2019-11-10T07:48:51.000Z | 2020-02-04T04:17:41.000Z | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""An executor class for running model on TensorFlow 2.0."""
from __future__ import absolute_import
from __future__ import division
# from __future__ import google_type_annotations
from __future__ import print_function
from absl import logging
import os
import json
import tensorflow.compat.v2 as tf
from official.modeling.training import distributed_executor as executor
class DetectionDistributedExecutor(executor.DistributedExecutor):
"""Detection specific customer training loop executor.
Subclasses the DistributedExecutor and adds support for numpy based metrics.
"""
def __init__(self,
predict_post_process_fn=None,
trainable_variables_filter=None,
**kwargs):
super(DetectionDistributedExecutor, self).__init__(**kwargs)
params = kwargs['params']
if predict_post_process_fn:
assert callable(predict_post_process_fn)
if trainable_variables_filter:
assert callable(trainable_variables_filter)
self._predict_post_process_fn = predict_post_process_fn
self._trainable_variables_filter = trainable_variables_filter
def _create_replicated_step(self,
strategy,
model,
loss_fn,
optimizer,
metric=None):
trainable_variables = model.trainable_variables
if self._trainable_variables_filter:
trainable_variables = self._trainable_variables_filter(
trainable_variables)
logging.info('Filter trainable variables from %d to %d',
len(model.trainable_variables), len(trainable_variables))
def _replicated_step(inputs):
"""Replicated training step."""
inputs, labels = inputs
with tf.GradientTape() as tape:
outputs = model(inputs, training=True)
all_losses = loss_fn(labels, outputs)
losses = {}
for k, v in all_losses.items():
v = tf.reduce_mean(v) / strategy.num_replicas_in_sync
losses[k] = v
loss = losses['total_loss']
if isinstance(metric, tf.keras.metrics.Metric):
metric.update_state(labels, outputs)
else:
logging.error('train metric is not an instance of '
'tf.keras.metrics.Metric.')
grads = tape.gradient(loss, trainable_variables)
optimizer.apply_gradients(zip(grads, trainable_variables))
return loss
return _replicated_step
def _create_test_step(self, strategy, model, metric):
"""Creates a distributed test step."""
@tf.function
def test_step(iterator):
"""Calculates evaluation metrics on distributed devices."""
def _test_step_fn(inputs):
"""Replicated accuracy calculation."""
inputs, labels = inputs
model_outputs = model(inputs, training=False)
if self._predict_post_process_fn:
labels, prediction_outputs = self._predict_post_process_fn(
labels, model_outputs)
return labels, prediction_outputs
labels, outputs = strategy.experimental_run_v2(
_test_step_fn, args=(next(iterator),))
outputs = tf.nest.map_structure(strategy.experimental_local_results,
outputs)
labels = tf.nest.map_structure(strategy.experimental_local_results,
labels)
return labels, outputs
return test_step
def _run_evaluation(self, test_step, current_training_step, metric,
test_iterator):
"""Runs validation steps and aggregate metrics."""
if not test_iterator or not metric:
logging.warning(
'Both test_iterator (%s) and metrics (%s) must not be None.',
test_iterator, metric)
return None
logging.info('Running evaluation after step: %s.', current_training_step)
while True:
try:
labels, outputs = test_step(test_iterator)
if metric:
metric.update_state(labels, outputs)
except (StopIteration, tf.errors.OutOfRangeError):
break
metric_result = metric.result()
if isinstance(metric, tf.keras.metrics.Metric):
metric_result = tf.nest.map_structure(lambda x: x.numpy().astype(float),
metric_result)
logging.info('Step: [%d] Validation metric = %s', current_training_step,
metric_result)
return metric_result
| 37.867647 | 80 | 0.66 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import logging
import os
import json
import tensorflow.compat.v2 as tf
from official.modeling.training import distributed_executor as executor
class DetectionDistributedExecutor(executor.DistributedExecutor):
def __init__(self,
predict_post_process_fn=None,
trainable_variables_filter=None,
**kwargs):
super(DetectionDistributedExecutor, self).__init__(**kwargs)
params = kwargs['params']
if predict_post_process_fn:
assert callable(predict_post_process_fn)
if trainable_variables_filter:
assert callable(trainable_variables_filter)
self._predict_post_process_fn = predict_post_process_fn
self._trainable_variables_filter = trainable_variables_filter
def _create_replicated_step(self,
strategy,
model,
loss_fn,
optimizer,
metric=None):
trainable_variables = model.trainable_variables
if self._trainable_variables_filter:
trainable_variables = self._trainable_variables_filter(
trainable_variables)
logging.info('Filter trainable variables from %d to %d',
len(model.trainable_variables), len(trainable_variables))
def _replicated_step(inputs):
inputs, labels = inputs
with tf.GradientTape() as tape:
outputs = model(inputs, training=True)
all_losses = loss_fn(labels, outputs)
losses = {}
for k, v in all_losses.items():
v = tf.reduce_mean(v) / strategy.num_replicas_in_sync
losses[k] = v
loss = losses['total_loss']
if isinstance(metric, tf.keras.metrics.Metric):
metric.update_state(labels, outputs)
else:
logging.error('train metric is not an instance of '
'tf.keras.metrics.Metric.')
grads = tape.gradient(loss, trainable_variables)
optimizer.apply_gradients(zip(grads, trainable_variables))
return loss
return _replicated_step
def _create_test_step(self, strategy, model, metric):
@tf.function
def test_step(iterator):
def _test_step_fn(inputs):
inputs, labels = inputs
model_outputs = model(inputs, training=False)
if self._predict_post_process_fn:
labels, prediction_outputs = self._predict_post_process_fn(
labels, model_outputs)
return labels, prediction_outputs
labels, outputs = strategy.experimental_run_v2(
_test_step_fn, args=(next(iterator),))
outputs = tf.nest.map_structure(strategy.experimental_local_results,
outputs)
labels = tf.nest.map_structure(strategy.experimental_local_results,
labels)
return labels, outputs
return test_step
def _run_evaluation(self, test_step, current_training_step, metric,
test_iterator):
if not test_iterator or not metric:
logging.warning(
'Both test_iterator (%s) and metrics (%s) must not be None.',
test_iterator, metric)
return None
logging.info('Running evaluation after step: %s.', current_training_step)
while True:
try:
labels, outputs = test_step(test_iterator)
if metric:
metric.update_state(labels, outputs)
except (StopIteration, tf.errors.OutOfRangeError):
break
metric_result = metric.result()
if isinstance(metric, tf.keras.metrics.Metric):
metric_result = tf.nest.map_structure(lambda x: x.numpy().astype(float),
metric_result)
logging.info('Step: [%d] Validation metric = %s', current_training_step,
metric_result)
return metric_result
| true | true |
1c4a52e6f133e6c9b67ce57eaeec57e4ff28a9dd | 2,392 | py | Python | tests/wallet/test_wallet_interested_store.py | Chinilla/chinilla-blockchain | 59bebcf94e65b74fbb53ad4929bbd79cb28be619 | [
"Apache-2.0"
] | null | null | null | tests/wallet/test_wallet_interested_store.py | Chinilla/chinilla-blockchain | 59bebcf94e65b74fbb53ad4929bbd79cb28be619 | [
"Apache-2.0"
] | null | null | null | tests/wallet/test_wallet_interested_store.py | Chinilla/chinilla-blockchain | 59bebcf94e65b74fbb53ad4929bbd79cb28be619 | [
"Apache-2.0"
] | null | null | null | from pathlib import Path
from secrets import token_bytes
import aiosqlite
import pytest
from chinilla.types.blockchain_format.coin import Coin
from chinilla.util.db_wrapper import DBWrapper
from chinilla.util.ints import uint64
from chinilla.wallet.wallet_interested_store import WalletInterestedStore
class TestWalletInterestedStore:
@pytest.mark.asyncio
async def test_store(self):
db_filename = Path("wallet_interested_store_test.db")
if db_filename.exists():
db_filename.unlink()
db_connection = await aiosqlite.connect(db_filename)
db_wrapper = DBWrapper(db_connection)
store = await WalletInterestedStore.create(db_wrapper)
try:
coin_1 = Coin(token_bytes(32), token_bytes(32), uint64(12312))
coin_2 = Coin(token_bytes(32), token_bytes(32), uint64(12312))
assert (await store.get_interested_coin_ids()) == []
await store.add_interested_coin_id(coin_1.name())
assert (await store.get_interested_coin_ids()) == [coin_1.name()]
await store.add_interested_coin_id(coin_1.name())
assert (await store.get_interested_coin_ids()) == [coin_1.name()]
await store.add_interested_coin_id(coin_2.name())
assert set(await store.get_interested_coin_ids()) == {coin_1.name(), coin_2.name()}
puzzle_hash = token_bytes(32)
assert len(await store.get_interested_puzzle_hashes()) == 0
await store.add_interested_puzzle_hash(puzzle_hash, 2)
assert len(await store.get_interested_puzzle_hashes()) == 1
await store.add_interested_puzzle_hash(puzzle_hash, 2)
assert len(await store.get_interested_puzzle_hashes()) == 1
assert (await store.get_interested_puzzle_hash_wallet_id(puzzle_hash)) == 2
await store.add_interested_puzzle_hash(puzzle_hash, 3)
assert len(await store.get_interested_puzzle_hashes()) == 1
assert (await store.get_interested_puzzle_hash_wallet_id(puzzle_hash)) == 3
await store.remove_interested_puzzle_hash(puzzle_hash)
assert (await store.get_interested_puzzle_hash_wallet_id(puzzle_hash)) is None
assert len(await store.get_interested_puzzle_hashes()) == 0
finally:
await db_connection.close()
db_filename.unlink()
| 45.132075 | 95 | 0.695234 | from pathlib import Path
from secrets import token_bytes
import aiosqlite
import pytest
from chinilla.types.blockchain_format.coin import Coin
from chinilla.util.db_wrapper import DBWrapper
from chinilla.util.ints import uint64
from chinilla.wallet.wallet_interested_store import WalletInterestedStore
class TestWalletInterestedStore:
@pytest.mark.asyncio
async def test_store(self):
db_filename = Path("wallet_interested_store_test.db")
if db_filename.exists():
db_filename.unlink()
db_connection = await aiosqlite.connect(db_filename)
db_wrapper = DBWrapper(db_connection)
store = await WalletInterestedStore.create(db_wrapper)
try:
coin_1 = Coin(token_bytes(32), token_bytes(32), uint64(12312))
coin_2 = Coin(token_bytes(32), token_bytes(32), uint64(12312))
assert (await store.get_interested_coin_ids()) == []
await store.add_interested_coin_id(coin_1.name())
assert (await store.get_interested_coin_ids()) == [coin_1.name()]
await store.add_interested_coin_id(coin_1.name())
assert (await store.get_interested_coin_ids()) == [coin_1.name()]
await store.add_interested_coin_id(coin_2.name())
assert set(await store.get_interested_coin_ids()) == {coin_1.name(), coin_2.name()}
puzzle_hash = token_bytes(32)
assert len(await store.get_interested_puzzle_hashes()) == 0
await store.add_interested_puzzle_hash(puzzle_hash, 2)
assert len(await store.get_interested_puzzle_hashes()) == 1
await store.add_interested_puzzle_hash(puzzle_hash, 2)
assert len(await store.get_interested_puzzle_hashes()) == 1
assert (await store.get_interested_puzzle_hash_wallet_id(puzzle_hash)) == 2
await store.add_interested_puzzle_hash(puzzle_hash, 3)
assert len(await store.get_interested_puzzle_hashes()) == 1
assert (await store.get_interested_puzzle_hash_wallet_id(puzzle_hash)) == 3
await store.remove_interested_puzzle_hash(puzzle_hash)
assert (await store.get_interested_puzzle_hash_wallet_id(puzzle_hash)) is None
assert len(await store.get_interested_puzzle_hashes()) == 0
finally:
await db_connection.close()
db_filename.unlink()
| true | true |
1c4a530ffc5d9d6be0b083b255751a4622ff6ed6 | 645 | py | Python | teal_algos/bubble.py | Taneristique/TEAL | e8860741be02a98b2562d36da46864e7bdc9594b | [
"MIT"
] | null | null | null | teal_algos/bubble.py | Taneristique/TEAL | e8860741be02a98b2562d36da46864e7bdc9594b | [
"MIT"
] | null | null | null | teal_algos/bubble.py | Taneristique/TEAL | e8860741be02a98b2562d36da46864e7bdc9594b | [
"MIT"
] | null | null | null | import time
start=time.time()
def bubble(x):
"""Function takes list element x as parameter which is consist of numbers"""
for i in range(len(x)-1): #number of the loops
for j in range(1,len(x)): #number of the comperations
if x[j-1]>x[j]: #swap i[j] with i[j-1]
chg=x[j]
x[j]=x[j-1]
x[j-1]=chg
if x[j-1]==x[j] or x[j-1]<x[j]: #do nothing if two elements are equal or first element little than second one.
pass
print('step ',i+1 ,x)
bubble([29,32,4,11,2,3])
end=time.time()
print(f'Runtime of algorithm : {end-start}') | 40.3125 | 122 | 0.542636 | import time
start=time.time()
def bubble(x):
for i in range(len(x)-1): for j in range(1,len(x)): if x[j-1]>x[j]: chg=x[j]
x[j]=x[j-1]
x[j-1]=chg
if x[j-1]==x[j] or x[j-1]<x[j]: pass
print('step ',i+1 ,x)
bubble([29,32,4,11,2,3])
end=time.time()
print(f'Runtime of algorithm : {end-start}') | true | true |
1c4a5380b287324f5ee930287fd0199210559d6b | 4,852 | py | Python | bin/additem.py | CakeLancelot/UnityPackFF | ee3368b16aec3c6b95c70778105dfcbf7379647f | [
"MIT"
] | 6 | 2020-11-03T13:23:40.000Z | 2021-10-06T15:25:29.000Z | bin/additem.py | CakeLancelot/UnityPackFF | ee3368b16aec3c6b95c70778105dfcbf7379647f | [
"MIT"
] | 1 | 2021-02-15T20:16:40.000Z | 2021-02-15T20:16:40.000Z | bin/additem.py | CakeLancelot/UnityPackFF | ee3368b16aec3c6b95c70778105dfcbf7379647f | [
"MIT"
] | 10 | 2020-11-03T15:08:10.000Z | 2022-02-13T07:32:52.000Z | #!/usr/bin/env python3
# Adds a (retextured) item into the game. Will need to be modified slightly
# to add items other than armor. Remember to use dumpxdt.py (and make it read
# the generated _new TableData!) so your server allows you to spawn the
# new items. Will also need to be modified to work with girls' or unisex items.
from unitypack.asset import Asset
from unitypack.object import FFOrderedDict
from unitypack.modding import import_texture
# asset bundles
TABLEDATA_PATH = 'CustomAssetBundle-1dca92eecee4742d985b799d8226666d'
CHARTEX_PATH = 'CustomAssetBundle-aa120043d3c634fe9adfb5cbe08e6970'
ICONS_PATH = 'CustomAssetBundle-784fa24bcf2da4f5eabe9547958616eb'
# template items
TEMPL_ITEMID = 152 # changing this is one way to change the base model
TEMPL_TEXTURE_PATHID = 589 # these other two can stay the same
TEMPL_ICON_PATHID = 1000
# new item properties
ITEM_TEXTURE_PATH = 'shirt_davestrider2.png'
ITEM_ICON_PATH = 'shirt_davestrider2_icon.png'
ITEM_NAME = 'Dave Strider Shirt'
ITEM_COMMENT = 'Dave Strider from Homestuck! (I know nothing about this character)'
ITEM_TEXTURE_NAME = 'shirt_davestrider2'
ITEM_TYPE = 'Shirts' # one of Shirts, Pants, Shoes, Hat, Glass, Back, Weapon, Vehicle
ITEM_DEFENSE = 50
def findnexticon(tabledata, typ):
xdtdata = tabledata.objects[7].contents
categories = ['Shirts', 'Pants', 'Shoes', 'Hat', 'Glass', 'Back', 'Weapon', 'Vehicle']
ret = 1
for cat in categories:
icontable = xdtdata['m_p' + cat + 'ItemTable']['m_pItemIconData']
if icontable[1]['m_iIconType'] == typ:
ret = max(ret, *[x['m_iIconNumber'] for x in icontable])
return ret + 1
def fromtempl(table, src, dst):
table.append(FFOrderedDict())
for k, v in table[src].items():
table[dst][k] = v
def mod_tabledata(tabledata):
itemtable = tabledata.objects[7].contents['m_p' + ITEM_TYPE + 'ItemTable']
itemid = len(itemtable['m_pItemData'])
assert len(itemtable['m_pItemData']) == len(itemtable['m_pItemStringData'])
# construct item object
fromtempl(itemtable['m_pItemData'], TEMPL_ITEMID, itemid)
# fix item id
itemtable['m_pItemData'][itemid]['m_iItemNumber'] = itemid
itemtable['m_pItemData'][itemid]['m_iItemName'] = itemid
itemtable['m_pItemData'][itemid]['m_iComment'] = itemid
# configure properties
itemtable['m_pItemData'][itemid]['m_iDefenseRat'] = ITEM_DEFENSE
# ...and any other changes you want
# construct item strings object
fromtempl(itemtable['m_pItemStringData'], TEMPL_ITEMID, itemid)
# set strings
itemtable['m_pItemStringData'][itemid]['m_strName'] = ITEM_NAME
itemtable['m_pItemStringData'][itemid]['m_strComment'] = ITEM_COMMENT
meshid = len(itemtable['m_pItemMeshData'])
templ_meshid = itemtable['m_pItemData'][TEMPL_ITEMID]['m_iMesh']
itemtable['m_pItemData'][itemid]['m_iMesh'] = meshid
# construct item mesh info object
fromtempl(itemtable['m_pItemMeshData'], templ_meshid, meshid)
itemtable['m_pItemMeshData'][meshid]['m_pstrMTextureString'] = ITEM_TEXTURE_NAME
# female texture
# itemtable['m_pItemMeshData'][meshid]['m_pstrFTextureString'] = ITEM_TEXTURE_NAME
iconnum = findnexticon(tabledata, 3)
# construct icon object
iconid = len(itemtable['m_pItemIconData'])
itemtable['m_pItemIconData'].append(FFOrderedDict())
itemtable['m_pItemIconData'][iconid]['m_iIconType'] = 3
itemtable['m_pItemIconData'][iconid]['m_iIconNumber'] = iconnum
itemtable['m_pItemData'][itemid]['m_iIcon'] = iconid
print('added itemid {} to tabledata.\n\tmeshid: {}, iconid: {}, iconum: {}'
.format(itemid, meshid, iconid, iconnum))
return iconnum
def mod_texture(asset, imgpath, load_path, name, templ_pathid, comp='dxt1'):
obj = asset.add_object(28)
import_texture(obj._contents, imgpath, name, comp)
ab_ent = asset.add2ab(load_path, obj.path_id)
print('inserted texture.\n\tpath_id: {}'.format(obj.path_id))
def main():
print('inserting {}...'.format(ITEM_NAME))
print('modding TableData...')
with open(TABLEDATA_PATH, 'rb') as f:
tabledata = Asset.from_file(f)
iconnum = mod_tabledata(tabledata)
with open(TABLEDATA_PATH + '_new', 'wb') as outf:
tabledata.save(outf)
icon_name = 'cosicon_{}'.format(iconnum)
icon_path = 'icons/{}.png'.format(icon_name)
print('icon_name: {}, icon_path: {}'.format(icon_name, icon_path))
print('modding CharTexture...')
with open(CHARTEX_PATH, 'rb') as f:
chartex = Asset.from_file(f)
mod_texture(chartex, ITEM_TEXTURE_PATH, 'texture/' + ITEM_TEXTURE_NAME + '.dds',
ITEM_TEXTURE_NAME, TEMPL_TEXTURE_PATHID)
with open(CHARTEX_PATH + '_new', 'wb') as outf:
chartex.save(outf)
print('modding Icons...')
with open(ICONS_PATH, 'rb') as f:
icons = Asset.from_file(f)
mod_texture(icons, ITEM_ICON_PATH, icon_path, icon_name, TEMPL_ICON_PATHID, 'dxt5')
with open(ICONS_PATH + '_new', 'wb') as outf:
icons.save(outf)
print('done.')
if __name__ == '__main__':
main()
| 32.783784 | 87 | 0.739489 |
from unitypack.asset import Asset
from unitypack.object import FFOrderedDict
from unitypack.modding import import_texture
# asset bundles
TABLEDATA_PATH = 'CustomAssetBundle-1dca92eecee4742d985b799d8226666d'
CHARTEX_PATH = 'CustomAssetBundle-aa120043d3c634fe9adfb5cbe08e6970'
ICONS_PATH = 'CustomAssetBundle-784fa24bcf2da4f5eabe9547958616eb'
# template items
TEMPL_ITEMID = 152 # changing this is one way to change the base model
TEMPL_TEXTURE_PATHID = 589 # these other two can stay the same
TEMPL_ICON_PATHID = 1000
# new item properties
ITEM_TEXTURE_PATH = 'shirt_davestrider2.png'
ITEM_ICON_PATH = 'shirt_davestrider2_icon.png'
ITEM_NAME = 'Dave Strider Shirt'
ITEM_COMMENT = 'Dave Strider from Homestuck! (I know nothing about this character)'
ITEM_TEXTURE_NAME = 'shirt_davestrider2'
ITEM_TYPE = 'Shirts' # one of Shirts, Pants, Shoes, Hat, Glass, Back, Weapon, Vehicle
ITEM_DEFENSE = 50
def findnexticon(tabledata, typ):
xdtdata = tabledata.objects[7].contents
categories = ['Shirts', 'Pants', 'Shoes', 'Hat', 'Glass', 'Back', 'Weapon', 'Vehicle']
ret = 1
for cat in categories:
icontable = xdtdata['m_p' + cat + 'ItemTable']['m_pItemIconData']
if icontable[1]['m_iIconType'] == typ:
ret = max(ret, *[x['m_iIconNumber'] for x in icontable])
return ret + 1
def fromtempl(table, src, dst):
table.append(FFOrderedDict())
for k, v in table[src].items():
table[dst][k] = v
def mod_tabledata(tabledata):
itemtable = tabledata.objects[7].contents['m_p' + ITEM_TYPE + 'ItemTable']
itemid = len(itemtable['m_pItemData'])
assert len(itemtable['m_pItemData']) == len(itemtable['m_pItemStringData'])
# construct item object
fromtempl(itemtable['m_pItemData'], TEMPL_ITEMID, itemid)
# fix item id
itemtable['m_pItemData'][itemid]['m_iItemNumber'] = itemid
itemtable['m_pItemData'][itemid]['m_iItemName'] = itemid
itemtable['m_pItemData'][itemid]['m_iComment'] = itemid
# configure properties
itemtable['m_pItemData'][itemid]['m_iDefenseRat'] = ITEM_DEFENSE
# ...and any other changes you want
# construct item strings object
fromtempl(itemtable['m_pItemStringData'], TEMPL_ITEMID, itemid)
# set strings
itemtable['m_pItemStringData'][itemid]['m_strName'] = ITEM_NAME
itemtable['m_pItemStringData'][itemid]['m_strComment'] = ITEM_COMMENT
meshid = len(itemtable['m_pItemMeshData'])
templ_meshid = itemtable['m_pItemData'][TEMPL_ITEMID]['m_iMesh']
itemtable['m_pItemData'][itemid]['m_iMesh'] = meshid
# construct item mesh info object
fromtempl(itemtable['m_pItemMeshData'], templ_meshid, meshid)
itemtable['m_pItemMeshData'][meshid]['m_pstrMTextureString'] = ITEM_TEXTURE_NAME
# female texture
# itemtable['m_pItemMeshData'][meshid]['m_pstrFTextureString'] = ITEM_TEXTURE_NAME
iconnum = findnexticon(tabledata, 3)
# construct icon object
iconid = len(itemtable['m_pItemIconData'])
itemtable['m_pItemIconData'].append(FFOrderedDict())
itemtable['m_pItemIconData'][iconid]['m_iIconType'] = 3
itemtable['m_pItemIconData'][iconid]['m_iIconNumber'] = iconnum
itemtable['m_pItemData'][itemid]['m_iIcon'] = iconid
print('added itemid {} to tabledata.\n\tmeshid: {}, iconid: {}, iconum: {}'
.format(itemid, meshid, iconid, iconnum))
return iconnum
def mod_texture(asset, imgpath, load_path, name, templ_pathid, comp='dxt1'):
obj = asset.add_object(28)
import_texture(obj._contents, imgpath, name, comp)
ab_ent = asset.add2ab(load_path, obj.path_id)
print('inserted texture.\n\tpath_id: {}'.format(obj.path_id))
def main():
print('inserting {}...'.format(ITEM_NAME))
print('modding TableData...')
with open(TABLEDATA_PATH, 'rb') as f:
tabledata = Asset.from_file(f)
iconnum = mod_tabledata(tabledata)
with open(TABLEDATA_PATH + '_new', 'wb') as outf:
tabledata.save(outf)
icon_name = 'cosicon_{}'.format(iconnum)
icon_path = 'icons/{}.png'.format(icon_name)
print('icon_name: {}, icon_path: {}'.format(icon_name, icon_path))
print('modding CharTexture...')
with open(CHARTEX_PATH, 'rb') as f:
chartex = Asset.from_file(f)
mod_texture(chartex, ITEM_TEXTURE_PATH, 'texture/' + ITEM_TEXTURE_NAME + '.dds',
ITEM_TEXTURE_NAME, TEMPL_TEXTURE_PATHID)
with open(CHARTEX_PATH + '_new', 'wb') as outf:
chartex.save(outf)
print('modding Icons...')
with open(ICONS_PATH, 'rb') as f:
icons = Asset.from_file(f)
mod_texture(icons, ITEM_ICON_PATH, icon_path, icon_name, TEMPL_ICON_PATHID, 'dxt5')
with open(ICONS_PATH + '_new', 'wb') as outf:
icons.save(outf)
print('done.')
if __name__ == '__main__':
main()
| true | true |
1c4a540b90827e76c2c1079b89be91800a5d28c8 | 126 | py | Python | vit/formatter/wait_epoch.py | kinifwyne/vit | e2cbafce922b1e09c4a66e7dc9592c51fe628e9d | [
"MIT"
] | 179 | 2020-07-28T08:21:51.000Z | 2022-03-30T21:39:37.000Z | vit/formatter/wait_epoch.py | kinifwyne/vit | e2cbafce922b1e09c4a66e7dc9592c51fe628e9d | [
"MIT"
] | 255 | 2017-02-01T11:49:12.000Z | 2020-07-26T22:31:25.000Z | vit/formatter/wait_epoch.py | kinifwyne/vit | e2cbafce922b1e09c4a66e7dc9592c51fe628e9d | [
"MIT"
] | 26 | 2017-01-17T20:31:13.000Z | 2020-06-17T13:09:01.000Z | from vit.formatter.wait import Wait
class WaitEpoch(Wait):
def format(self, wait, task):
return self.epoch(wait)
| 21 | 35 | 0.698413 | from vit.formatter.wait import Wait
class WaitEpoch(Wait):
def format(self, wait, task):
return self.epoch(wait)
| true | true |
1c4a549672ab2b68bbe9ad8488637c7d44891b43 | 2,819 | py | Python | test/lint/check-doc.py | VaderCoinProject/vadercoin | b513c794b014d40e5aad281dd1f54845c46d216c | [
"MIT"
] | null | null | null | test/lint/check-doc.py | VaderCoinProject/vadercoin | b513c794b014d40e5aad281dd1f54845c46d216c | [
"MIT"
] | null | null | null | test/lint/check-doc.py | VaderCoinProject/vadercoin | b513c794b014d40e5aad281dd1f54845c46d216c | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2015-2020 The Vadercoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
This checks if all command line args are documented.
Return value is 0 to indicate no error.
Author: @MarcoFalke
'''
from subprocess import check_output
import re
FOLDER_GREP = 'src'
FOLDER_TEST = 'src/test/'
REGEX_ARG = r'(?:ForceSet|SoftSet|Get|Is)(?:Bool)?Args?(?:Set)?\("(-[^"]+)"'
REGEX_DOC = r'AddArg\("(-[^"=]+?)(?:=|")'
CMD_ROOT_DIR = '$(git rev-parse --show-toplevel)/{}'.format(FOLDER_GREP)
CMD_GREP_ARGS = r"git grep --perl-regexp '{}' -- {} ':(exclude){}'".format(REGEX_ARG, CMD_ROOT_DIR, FOLDER_TEST)
CMD_GREP_WALLET_ARGS = r"git grep --function-context 'void WalletInit::AddWalletOptions' -- {} | grep AddArg".format(CMD_ROOT_DIR)
CMD_GREP_WALLET_HIDDEN_ARGS = r"git grep --function-context 'void DummyWalletInit::AddWalletOptions' -- {}".format(CMD_ROOT_DIR)
CMD_GREP_DOCS = r"git grep --perl-regexp '{}' {}".format(REGEX_DOC, CMD_ROOT_DIR)
# list unsupported, deprecated and duplicate args as they need no documentation
SET_DOC_OPTIONAL = set(['-h', '-help', '-dbcrashratio', '-forcecompactdb', '-zapwallettxes'])
def lint_missing_argument_documentation():
used = check_output(CMD_GREP_ARGS, shell=True).decode('utf8').strip()
docd = check_output(CMD_GREP_DOCS, shell=True).decode('utf8').strip()
args_used = set(re.findall(re.compile(REGEX_ARG), used))
args_docd = set(re.findall(re.compile(REGEX_DOC), docd)).union(SET_DOC_OPTIONAL)
args_need_doc = args_used.difference(args_docd)
args_unknown = args_docd.difference(args_used)
print("Args used : {}".format(len(args_used)))
print("Args documented : {}".format(len(args_docd)))
print("Args undocumented: {}".format(len(args_need_doc)))
print(args_need_doc)
print("Args unknown : {}".format(len(args_unknown)))
print(args_unknown)
assert 0 == len(args_need_doc), "Please document the following arguments: {}".format(args_need_doc)
def lint_missing_hidden_wallet_args():
wallet_args = check_output(CMD_GREP_WALLET_ARGS, shell=True).decode('utf8').strip()
wallet_hidden_args = check_output(CMD_GREP_WALLET_HIDDEN_ARGS, shell=True).decode('utf8').strip()
wallet_args = set(re.findall(re.compile(REGEX_DOC), wallet_args))
wallet_hidden_args = set(re.findall(re.compile(r' "([^"=]+)'), wallet_hidden_args))
hidden_missing = wallet_args.difference(wallet_hidden_args)
if hidden_missing:
assert 0, "Please add {} to the hidden args in DummyWalletInit::AddWalletOptions".format(hidden_missing)
def main():
lint_missing_argument_documentation()
lint_missing_hidden_wallet_args()
if __name__ == "__main__":
main()
| 42.074627 | 130 | 0.723306 |
from subprocess import check_output
import re
FOLDER_GREP = 'src'
FOLDER_TEST = 'src/test/'
REGEX_ARG = r'(?:ForceSet|SoftSet|Get|Is)(?:Bool)?Args?(?:Set)?\("(-[^"]+)"'
REGEX_DOC = r'AddArg\("(-[^"=]+?)(?:=|")'
CMD_ROOT_DIR = '$(git rev-parse --show-toplevel)/{}'.format(FOLDER_GREP)
CMD_GREP_ARGS = r"git grep --perl-regexp '{}' -- {} ':(exclude){}'".format(REGEX_ARG, CMD_ROOT_DIR, FOLDER_TEST)
CMD_GREP_WALLET_ARGS = r"git grep --function-context 'void WalletInit::AddWalletOptions' -- {} | grep AddArg".format(CMD_ROOT_DIR)
CMD_GREP_WALLET_HIDDEN_ARGS = r"git grep --function-context 'void DummyWalletInit::AddWalletOptions' -- {}".format(CMD_ROOT_DIR)
CMD_GREP_DOCS = r"git grep --perl-regexp '{}' {}".format(REGEX_DOC, CMD_ROOT_DIR)
SET_DOC_OPTIONAL = set(['-h', '-help', '-dbcrashratio', '-forcecompactdb', '-zapwallettxes'])
def lint_missing_argument_documentation():
used = check_output(CMD_GREP_ARGS, shell=True).decode('utf8').strip()
docd = check_output(CMD_GREP_DOCS, shell=True).decode('utf8').strip()
args_used = set(re.findall(re.compile(REGEX_ARG), used))
args_docd = set(re.findall(re.compile(REGEX_DOC), docd)).union(SET_DOC_OPTIONAL)
args_need_doc = args_used.difference(args_docd)
args_unknown = args_docd.difference(args_used)
print("Args used : {}".format(len(args_used)))
print("Args documented : {}".format(len(args_docd)))
print("Args undocumented: {}".format(len(args_need_doc)))
print(args_need_doc)
print("Args unknown : {}".format(len(args_unknown)))
print(args_unknown)
assert 0 == len(args_need_doc), "Please document the following arguments: {}".format(args_need_doc)
def lint_missing_hidden_wallet_args():
wallet_args = check_output(CMD_GREP_WALLET_ARGS, shell=True).decode('utf8').strip()
wallet_hidden_args = check_output(CMD_GREP_WALLET_HIDDEN_ARGS, shell=True).decode('utf8').strip()
wallet_args = set(re.findall(re.compile(REGEX_DOC), wallet_args))
wallet_hidden_args = set(re.findall(re.compile(r' "([^"=]+)'), wallet_hidden_args))
hidden_missing = wallet_args.difference(wallet_hidden_args)
if hidden_missing:
assert 0, "Please add {} to the hidden args in DummyWalletInit::AddWalletOptions".format(hidden_missing)
def main():
lint_missing_argument_documentation()
lint_missing_hidden_wallet_args()
if __name__ == "__main__":
main()
| true | true |
1c4a564f9a4cae704bf503183576b795d60fbbf4 | 142 | py | Python | playrcc/src/gui/__init__.py | Gloryness/playrcc | 3816a935f19c786db59ba5a46a98cc527053cc29 | [
"MIT"
] | 4 | 2020-09-24T14:25:01.000Z | 2020-11-02T22:18:12.000Z | playrcc/src/gui/__init__.py | Gloryness/playrcc | 3816a935f19c786db59ba5a46a98cc527053cc29 | [
"MIT"
] | null | null | null | playrcc/src/gui/__init__.py | Gloryness/playrcc | 3816a935f19c786db59ba5a46a98cc527053cc29 | [
"MIT"
] | null | null | null | from .mainwindow import SecretCodeWindow
__title__ = 'gui'
__author__ = 'Goryness'
__license__ = 'MIT License'
__all__ = ['SecretCodeWindow'] | 23.666667 | 40 | 0.774648 | from .mainwindow import SecretCodeWindow
__title__ = 'gui'
__author__ = 'Goryness'
__license__ = 'MIT License'
__all__ = ['SecretCodeWindow'] | true | true |
1c4a5693a9652bf3ef6a37798165c3bbc52518da | 4,605 | py | Python | melodic/src/ros_comm/rospy/src/rospy/__init__.py | disorn-inc/ROS-melodic-python3-Opencv-4.1.1-CUDA | 3d265bb64712e3cd7dfa0ad56d78fcdebafdb4b0 | [
"BSD-3-Clause"
] | 2 | 2021-07-14T12:33:55.000Z | 2021-11-21T07:14:13.000Z | melodic/src/ros_comm/rospy/src/rospy/__init__.py | disorn-inc/ROS-melodic-python3-Opencv-4.1.1-CUDA | 3d265bb64712e3cd7dfa0ad56d78fcdebafdb4b0 | [
"BSD-3-Clause"
] | null | null | null | melodic/src/ros_comm/rospy/src/rospy/__init__.py | disorn-inc/ROS-melodic-python3-Opencv-4.1.1-CUDA | 3d265bb64712e3cd7dfa0ad56d78fcdebafdb4b0 | [
"BSD-3-Clause"
] | null | null | null | # Software License Agreement (BSD License)
#
# Copyright (c) 2008, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Copyright (c) 2008, Willow Garage, Inc.
# Revision $Id$
"""
ROS client library for Python.
See U{http://ros.org/wiki/rospy}
@author: Ken Conley (kwc)
"""
# import symbols into rospy namespace
# NOTE: there are much better ways to configure python module
# dictionaries, but the rospy codebase isn't quite in shape for that
# yet
from std_msgs.msg import Header
from .client import spin, myargv, init_node, \
get_published_topics, \
wait_for_message, \
get_master, \
on_shutdown, \
get_param, get_param_cached, get_param_names, set_param, delete_param, has_param, search_param,\
DEBUG, INFO, WARN, ERROR, FATAL
from .timer import sleep, Rate, Timer
from .core import is_shutdown, signal_shutdown, \
get_node_uri, get_ros_root, \
logdebug, logwarn, loginfo, logout, logerr, logfatal, \
logdebug_throttle, logwarn_throttle, loginfo_throttle, logerr_throttle, logfatal_throttle, \
logdebug_throttle_identical, logwarn_throttle_identical, loginfo_throttle_identical, logerr_throttle_identical, logfatal_throttle_identical, \
logdebug_once, logwarn_once, loginfo_once, logerr_once, logfatal_once, \
parse_rosrpc_uri
from .exceptions import *
from .msg import AnyMsg
from .msproxy import MasterProxy
from .names import get_name, get_caller_id, get_namespace, resolve_name, remap_name
from .rostime import Time, Duration, get_rostime, get_time
from .service import ServiceException
# - use tcp ros implementation of services
from .impl.tcpros_service import Service, ServiceProxy, wait_for_service
from .topics import Message, SubscribeListener, Publisher, Subscriber
## \defgroup validators Validators
## \defgroup clientapi Client API
__all__ = [
'Header',
'spin',
'myargv',
'init_node',
'get_master',
'get_published_topics',
'wait_for_service',
'on_shutdown',
'get_param',
'get_param_cached',
'get_param_names',
'set_param',
'delete_param',
'has_param',
'search_param',
'sleep',
'Rate',
'DEBUG',
'INFO',
'WARN',
'ERROR',
'FATAL',
'is_shutdown',
'signal_shutdown',
'get_node_uri',
'get_ros_root',
'logdebug',
'logwarn', 'loginfo',
'logout', 'logerr', 'logfatal',
'logdebug_throttle',
'logwarn_throttle', 'loginfo_throttle',
'logerr_throttle', 'logfatal_throttle',
'logdebug_once',
'logwarn_once', 'loginfo_once',
'logerr_once', 'logfatal_once',
'parse_rosrpc_uri',
'MasterProxy',
'NodeProxy',
'ROSException',
'ROSSerializationException',
'ROSInitException',
'ROSInterruptException',
'ROSInternalException',
'TransportException',
'TransportTerminated',
'TransportInitError',
'AnyMsg', 'Message',
'get_name',
'get_caller_id',
'get_namespace',
'resolve_name',
'remap_name',
'Time', 'Duration', 'get_rostime', 'get_time',
'ServiceException',
'Service', 'ServiceProxy',
'SubscribeListener', 'Publisher', 'Subscriber',
]
| 33.860294 | 146 | 0.728339 |
# yet
from std_msgs.msg import Header
from .client import spin, myargv, init_node, \
get_published_topics, \
wait_for_message, \
get_master, \
on_shutdown, \
get_param, get_param_cached, get_param_names, set_param, delete_param, has_param, search_param,\
DEBUG, INFO, WARN, ERROR, FATAL
from .timer import sleep, Rate, Timer
from .core import is_shutdown, signal_shutdown, \
get_node_uri, get_ros_root, \
logdebug, logwarn, loginfo, logout, logerr, logfatal, \
logdebug_throttle, logwarn_throttle, loginfo_throttle, logerr_throttle, logfatal_throttle, \
logdebug_throttle_identical, logwarn_throttle_identical, loginfo_throttle_identical, logerr_throttle_identical, logfatal_throttle_identical, \
logdebug_once, logwarn_once, loginfo_once, logerr_once, logfatal_once, \
parse_rosrpc_uri
from .exceptions import *
from .msg import AnyMsg
from .msproxy import MasterProxy
from .names import get_name, get_caller_id, get_namespace, resolve_name, remap_name
from .rostime import Time, Duration, get_rostime, get_time
from .service import ServiceException
# - use tcp ros implementation of services
from .impl.tcpros_service import Service, ServiceProxy, wait_for_service
from .topics import Message, SubscribeListener, Publisher, Subscriber
## \defgroup validators Validators
## \defgroup clientapi Client API
__all__ = [
'Header',
'spin',
'myargv',
'init_node',
'get_master',
'get_published_topics',
'wait_for_service',
'on_shutdown',
'get_param',
'get_param_cached',
'get_param_names',
'set_param',
'delete_param',
'has_param',
'search_param',
'sleep',
'Rate',
'DEBUG',
'INFO',
'WARN',
'ERROR',
'FATAL',
'is_shutdown',
'signal_shutdown',
'get_node_uri',
'get_ros_root',
'logdebug',
'logwarn', 'loginfo',
'logout', 'logerr', 'logfatal',
'logdebug_throttle',
'logwarn_throttle', 'loginfo_throttle',
'logerr_throttle', 'logfatal_throttle',
'logdebug_once',
'logwarn_once', 'loginfo_once',
'logerr_once', 'logfatal_once',
'parse_rosrpc_uri',
'MasterProxy',
'NodeProxy',
'ROSException',
'ROSSerializationException',
'ROSInitException',
'ROSInterruptException',
'ROSInternalException',
'TransportException',
'TransportTerminated',
'TransportInitError',
'AnyMsg', 'Message',
'get_name',
'get_caller_id',
'get_namespace',
'resolve_name',
'remap_name',
'Time', 'Duration', 'get_rostime', 'get_time',
'ServiceException',
'Service', 'ServiceProxy',
'SubscribeListener', 'Publisher', 'Subscriber',
]
| true | true |
1c4a56aeca55d753088ec0ac5ef51be958e3e1da | 823 | py | Python | packages/__init__.py | fetchai/agents-yoti | d71d57508079e5cd3854037bc3c473e24915af6f | [
"Apache-2.0"
] | 4 | 2021-01-19T17:53:58.000Z | 2021-09-08T05:28:58.000Z | packages/__init__.py | fetchai/agents-yoti | d71d57508079e5cd3854037bc3c473e24915af6f | [
"Apache-2.0"
] | null | null | null | packages/__init__.py | fetchai/agents-yoti | d71d57508079e5cd3854037bc3c473e24915af6f | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2019 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""AEA packages folder."""
| 39.190476 | 80 | 0.575942 | true | true |
|
1c4a571b018141a2957d2b7a89a4c12fd814a302 | 286 | py | Python | frappe/patches/v12_0/remove_deprecated_fields_from_doctype.py | jimmyrianto/frappe | 40051410436b11e0415c8c8f0a8335bdd572ce6e | [
"MIT"
] | 5 | 2017-09-12T15:56:31.000Z | 2022-03-09T13:50:21.000Z | frappe/patches/v12_0/remove_deprecated_fields_from_doctype.py | alexbow2008/frappe | ce592a40b4c5e80a9c6cbdc541105218bf98c966 | [
"MIT"
] | 212 | 2017-08-16T13:03:18.000Z | 2020-10-06T12:26:21.000Z | frappe/patches/v12_0/remove_deprecated_fields_from_doctype.py | alexbow2008/frappe | ce592a40b4c5e80a9c6cbdc541105218bf98c966 | [
"MIT"
] | 14 | 2020-11-04T11:22:44.000Z | 2022-02-01T20:59:37.000Z | import frappe
def execute():
frappe.reload_doc('core', 'doctype', 'doctype')
frappe.model.delete_fields({
'DocType': ['hide_heading', 'image_view', 'read_only_onload']
}, delete=1)
frappe.db.sql('''
DELETE from `tabProperty Setter`
WHERE property = 'read_only_onload'
''')
| 22 | 63 | 0.695804 | import frappe
def execute():
frappe.reload_doc('core', 'doctype', 'doctype')
frappe.model.delete_fields({
'DocType': ['hide_heading', 'image_view', 'read_only_onload']
}, delete=1)
frappe.db.sql('''
DELETE from `tabProperty Setter`
WHERE property = 'read_only_onload'
''')
| true | true |
1c4a57858093aea769da74e5245781bdca0980dc | 44,092 | py | Python | sdk/python/pulumi_azure_native/network/v20190501/_inputs.py | pulumi-bot/pulumi-azure-native | f7b9490b5211544318e455e5cceafe47b628e12c | [
"Apache-2.0"
] | 31 | 2020-09-21T09:41:01.000Z | 2021-02-26T13:21:59.000Z | sdk/python/pulumi_azure_native/network/v20190501/_inputs.py | pulumi-bot/pulumi-azure-native | f7b9490b5211544318e455e5cceafe47b628e12c | [
"Apache-2.0"
] | 231 | 2020-09-21T09:38:45.000Z | 2021-03-01T11:16:03.000Z | sdk/python/pulumi_azure_native/network/v20190501/_inputs.py | pulumi-bot/pulumi-azure-native | f7b9490b5211544318e455e5cceafe47b628e12c | [
"Apache-2.0"
] | 4 | 2020-09-29T14:14:59.000Z | 2021-02-10T20:38:16.000Z | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from ._enums import *
__all__ = [
'BackendArgs',
'BackendPoolArgs',
'BackendPoolsSettingsArgs',
'CacheConfigurationArgs',
'ForwardingConfigurationArgs',
'FrontendEndpointArgs',
'FrontendEndpointUpdateParametersWebApplicationFirewallPolicyLinkArgs',
'HealthProbeSettingsModelArgs',
'LoadBalancingSettingsModelArgs',
'RedirectConfigurationArgs',
'RoutingRuleArgs',
'SubResourceArgs',
]
@pulumi.input_type
class BackendArgs:
def __init__(__self__, *,
address: Optional[pulumi.Input[str]] = None,
backend_host_header: Optional[pulumi.Input[str]] = None,
enabled_state: Optional[pulumi.Input[Union[str, 'BackendEnabledState']]] = None,
http_port: Optional[pulumi.Input[int]] = None,
https_port: Optional[pulumi.Input[int]] = None,
priority: Optional[pulumi.Input[int]] = None,
weight: Optional[pulumi.Input[int]] = None):
"""
Backend address of a frontDoor load balancer.
:param pulumi.Input[str] address: Location of the backend (IP address or FQDN)
:param pulumi.Input[str] backend_host_header: The value to use as the host header sent to the backend. If blank or unspecified, this defaults to the incoming host.
:param pulumi.Input[Union[str, 'BackendEnabledState']] enabled_state: Whether to enable use of this backend. Permitted values are 'Enabled' or 'Disabled'
:param pulumi.Input[int] http_port: The HTTP TCP port number. Must be between 1 and 65535.
:param pulumi.Input[int] https_port: The HTTPS TCP port number. Must be between 1 and 65535.
:param pulumi.Input[int] priority: Priority to use for load balancing. Higher priorities will not be used for load balancing if any lower priority backend is healthy.
:param pulumi.Input[int] weight: Weight of this endpoint for load balancing purposes.
"""
if address is not None:
pulumi.set(__self__, "address", address)
if backend_host_header is not None:
pulumi.set(__self__, "backend_host_header", backend_host_header)
if enabled_state is not None:
pulumi.set(__self__, "enabled_state", enabled_state)
if http_port is not None:
pulumi.set(__self__, "http_port", http_port)
if https_port is not None:
pulumi.set(__self__, "https_port", https_port)
if priority is not None:
pulumi.set(__self__, "priority", priority)
if weight is not None:
pulumi.set(__self__, "weight", weight)
@property
@pulumi.getter
def address(self) -> Optional[pulumi.Input[str]]:
"""
Location of the backend (IP address or FQDN)
"""
return pulumi.get(self, "address")
@address.setter
def address(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "address", value)
@property
@pulumi.getter(name="backendHostHeader")
def backend_host_header(self) -> Optional[pulumi.Input[str]]:
"""
The value to use as the host header sent to the backend. If blank or unspecified, this defaults to the incoming host.
"""
return pulumi.get(self, "backend_host_header")
@backend_host_header.setter
def backend_host_header(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "backend_host_header", value)
@property
@pulumi.getter(name="enabledState")
def enabled_state(self) -> Optional[pulumi.Input[Union[str, 'BackendEnabledState']]]:
"""
Whether to enable use of this backend. Permitted values are 'Enabled' or 'Disabled'
"""
return pulumi.get(self, "enabled_state")
@enabled_state.setter
def enabled_state(self, value: Optional[pulumi.Input[Union[str, 'BackendEnabledState']]]):
pulumi.set(self, "enabled_state", value)
@property
@pulumi.getter(name="httpPort")
def http_port(self) -> Optional[pulumi.Input[int]]:
"""
The HTTP TCP port number. Must be between 1 and 65535.
"""
return pulumi.get(self, "http_port")
@http_port.setter
def http_port(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "http_port", value)
@property
@pulumi.getter(name="httpsPort")
def https_port(self) -> Optional[pulumi.Input[int]]:
"""
The HTTPS TCP port number. Must be between 1 and 65535.
"""
return pulumi.get(self, "https_port")
@https_port.setter
def https_port(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "https_port", value)
@property
@pulumi.getter
def priority(self) -> Optional[pulumi.Input[int]]:
"""
Priority to use for load balancing. Higher priorities will not be used for load balancing if any lower priority backend is healthy.
"""
return pulumi.get(self, "priority")
@priority.setter
def priority(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "priority", value)
@property
@pulumi.getter
def weight(self) -> Optional[pulumi.Input[int]]:
"""
Weight of this endpoint for load balancing purposes.
"""
return pulumi.get(self, "weight")
@weight.setter
def weight(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "weight", value)
@pulumi.input_type
class BackendPoolArgs:
def __init__(__self__, *,
backends: Optional[pulumi.Input[Sequence[pulumi.Input['BackendArgs']]]] = None,
health_probe_settings: Optional[pulumi.Input['SubResourceArgs']] = None,
id: Optional[pulumi.Input[str]] = None,
load_balancing_settings: Optional[pulumi.Input['SubResourceArgs']] = None,
name: Optional[pulumi.Input[str]] = None):
"""
A backend pool is a collection of backends that can be routed to.
:param pulumi.Input[Sequence[pulumi.Input['BackendArgs']]] backends: The set of backends for this pool
:param pulumi.Input['SubResourceArgs'] health_probe_settings: L7 health probe settings for a backend pool
:param pulumi.Input[str] id: Resource ID.
:param pulumi.Input['SubResourceArgs'] load_balancing_settings: Load balancing settings for a backend pool
:param pulumi.Input[str] name: Resource name.
"""
if backends is not None:
pulumi.set(__self__, "backends", backends)
if health_probe_settings is not None:
pulumi.set(__self__, "health_probe_settings", health_probe_settings)
if id is not None:
pulumi.set(__self__, "id", id)
if load_balancing_settings is not None:
pulumi.set(__self__, "load_balancing_settings", load_balancing_settings)
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def backends(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['BackendArgs']]]]:
"""
The set of backends for this pool
"""
return pulumi.get(self, "backends")
@backends.setter
def backends(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['BackendArgs']]]]):
pulumi.set(self, "backends", value)
@property
@pulumi.getter(name="healthProbeSettings")
def health_probe_settings(self) -> Optional[pulumi.Input['SubResourceArgs']]:
"""
L7 health probe settings for a backend pool
"""
return pulumi.get(self, "health_probe_settings")
@health_probe_settings.setter
def health_probe_settings(self, value: Optional[pulumi.Input['SubResourceArgs']]):
pulumi.set(self, "health_probe_settings", value)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@property
@pulumi.getter(name="loadBalancingSettings")
def load_balancing_settings(self) -> Optional[pulumi.Input['SubResourceArgs']]:
"""
Load balancing settings for a backend pool
"""
return pulumi.get(self, "load_balancing_settings")
@load_balancing_settings.setter
def load_balancing_settings(self, value: Optional[pulumi.Input['SubResourceArgs']]):
pulumi.set(self, "load_balancing_settings", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Resource name.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@pulumi.input_type
class BackendPoolsSettingsArgs:
def __init__(__self__, *,
enforce_certificate_name_check: Optional[pulumi.Input[Union[str, 'EnforceCertificateNameCheckEnabledState']]] = None,
send_recv_timeout_seconds: Optional[pulumi.Input[int]] = None):
"""
Settings that apply to all backend pools.
:param pulumi.Input[Union[str, 'EnforceCertificateNameCheckEnabledState']] enforce_certificate_name_check: Whether to enforce certificate name check on HTTPS requests to all backend pools. No effect on non-HTTPS requests.
:param pulumi.Input[int] send_recv_timeout_seconds: Send and receive timeout on forwarding request to the backend. When timeout is reached, the request fails and returns.
"""
if enforce_certificate_name_check is None:
enforce_certificate_name_check = 'Enabled'
if enforce_certificate_name_check is not None:
pulumi.set(__self__, "enforce_certificate_name_check", enforce_certificate_name_check)
if send_recv_timeout_seconds is not None:
pulumi.set(__self__, "send_recv_timeout_seconds", send_recv_timeout_seconds)
@property
@pulumi.getter(name="enforceCertificateNameCheck")
def enforce_certificate_name_check(self) -> Optional[pulumi.Input[Union[str, 'EnforceCertificateNameCheckEnabledState']]]:
"""
Whether to enforce certificate name check on HTTPS requests to all backend pools. No effect on non-HTTPS requests.
"""
return pulumi.get(self, "enforce_certificate_name_check")
@enforce_certificate_name_check.setter
def enforce_certificate_name_check(self, value: Optional[pulumi.Input[Union[str, 'EnforceCertificateNameCheckEnabledState']]]):
pulumi.set(self, "enforce_certificate_name_check", value)
@property
@pulumi.getter(name="sendRecvTimeoutSeconds")
def send_recv_timeout_seconds(self) -> Optional[pulumi.Input[int]]:
"""
Send and receive timeout on forwarding request to the backend. When timeout is reached, the request fails and returns.
"""
return pulumi.get(self, "send_recv_timeout_seconds")
@send_recv_timeout_seconds.setter
def send_recv_timeout_seconds(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "send_recv_timeout_seconds", value)
@pulumi.input_type
class CacheConfigurationArgs:
def __init__(__self__, *,
dynamic_compression: Optional[pulumi.Input[Union[str, 'DynamicCompressionEnabled']]] = None,
query_parameter_strip_directive: Optional[pulumi.Input[Union[str, 'FrontDoorQuery']]] = None):
"""
Caching settings for a caching-type route. To disable caching, do not provide a cacheConfiguration object.
:param pulumi.Input[Union[str, 'DynamicCompressionEnabled']] dynamic_compression: Whether to use dynamic compression for cached content
:param pulumi.Input[Union[str, 'FrontDoorQuery']] query_parameter_strip_directive: Treatment of URL query terms when forming the cache key.
"""
if dynamic_compression is not None:
pulumi.set(__self__, "dynamic_compression", dynamic_compression)
if query_parameter_strip_directive is not None:
pulumi.set(__self__, "query_parameter_strip_directive", query_parameter_strip_directive)
@property
@pulumi.getter(name="dynamicCompression")
def dynamic_compression(self) -> Optional[pulumi.Input[Union[str, 'DynamicCompressionEnabled']]]:
"""
Whether to use dynamic compression for cached content
"""
return pulumi.get(self, "dynamic_compression")
@dynamic_compression.setter
def dynamic_compression(self, value: Optional[pulumi.Input[Union[str, 'DynamicCompressionEnabled']]]):
pulumi.set(self, "dynamic_compression", value)
@property
@pulumi.getter(name="queryParameterStripDirective")
def query_parameter_strip_directive(self) -> Optional[pulumi.Input[Union[str, 'FrontDoorQuery']]]:
"""
Treatment of URL query terms when forming the cache key.
"""
return pulumi.get(self, "query_parameter_strip_directive")
@query_parameter_strip_directive.setter
def query_parameter_strip_directive(self, value: Optional[pulumi.Input[Union[str, 'FrontDoorQuery']]]):
pulumi.set(self, "query_parameter_strip_directive", value)
@pulumi.input_type
class ForwardingConfigurationArgs:
def __init__(__self__, *,
odata_type: pulumi.Input[str],
backend_pool: Optional[pulumi.Input['SubResourceArgs']] = None,
cache_configuration: Optional[pulumi.Input['CacheConfigurationArgs']] = None,
custom_forwarding_path: Optional[pulumi.Input[str]] = None,
forwarding_protocol: Optional[pulumi.Input[Union[str, 'FrontDoorForwardingProtocol']]] = None):
"""
Describes Forwarding Route.
:param pulumi.Input[str] odata_type:
Expected value is '#Microsoft.Azure.FrontDoor.Models.FrontdoorForwardingConfiguration'.
:param pulumi.Input['SubResourceArgs'] backend_pool: A reference to the BackendPool which this rule routes to.
:param pulumi.Input['CacheConfigurationArgs'] cache_configuration: The caching configuration associated with this rule.
:param pulumi.Input[str] custom_forwarding_path: A custom path used to rewrite resource paths matched by this rule. Leave empty to use incoming path.
:param pulumi.Input[Union[str, 'FrontDoorForwardingProtocol']] forwarding_protocol: Protocol this rule will use when forwarding traffic to backends.
"""
pulumi.set(__self__, "odata_type", '#Microsoft.Azure.FrontDoor.Models.FrontdoorForwardingConfiguration')
if backend_pool is not None:
pulumi.set(__self__, "backend_pool", backend_pool)
if cache_configuration is not None:
pulumi.set(__self__, "cache_configuration", cache_configuration)
if custom_forwarding_path is not None:
pulumi.set(__self__, "custom_forwarding_path", custom_forwarding_path)
if forwarding_protocol is not None:
pulumi.set(__self__, "forwarding_protocol", forwarding_protocol)
@property
@pulumi.getter(name="odataType")
def odata_type(self) -> pulumi.Input[str]:
"""
Expected value is '#Microsoft.Azure.FrontDoor.Models.FrontdoorForwardingConfiguration'.
"""
return pulumi.get(self, "odata_type")
@odata_type.setter
def odata_type(self, value: pulumi.Input[str]):
pulumi.set(self, "odata_type", value)
@property
@pulumi.getter(name="backendPool")
def backend_pool(self) -> Optional[pulumi.Input['SubResourceArgs']]:
"""
A reference to the BackendPool which this rule routes to.
"""
return pulumi.get(self, "backend_pool")
@backend_pool.setter
def backend_pool(self, value: Optional[pulumi.Input['SubResourceArgs']]):
pulumi.set(self, "backend_pool", value)
@property
@pulumi.getter(name="cacheConfiguration")
def cache_configuration(self) -> Optional[pulumi.Input['CacheConfigurationArgs']]:
"""
The caching configuration associated with this rule.
"""
return pulumi.get(self, "cache_configuration")
@cache_configuration.setter
def cache_configuration(self, value: Optional[pulumi.Input['CacheConfigurationArgs']]):
pulumi.set(self, "cache_configuration", value)
@property
@pulumi.getter(name="customForwardingPath")
def custom_forwarding_path(self) -> Optional[pulumi.Input[str]]:
"""
A custom path used to rewrite resource paths matched by this rule. Leave empty to use incoming path.
"""
return pulumi.get(self, "custom_forwarding_path")
@custom_forwarding_path.setter
def custom_forwarding_path(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "custom_forwarding_path", value)
@property
@pulumi.getter(name="forwardingProtocol")
def forwarding_protocol(self) -> Optional[pulumi.Input[Union[str, 'FrontDoorForwardingProtocol']]]:
"""
Protocol this rule will use when forwarding traffic to backends.
"""
return pulumi.get(self, "forwarding_protocol")
@forwarding_protocol.setter
def forwarding_protocol(self, value: Optional[pulumi.Input[Union[str, 'FrontDoorForwardingProtocol']]]):
pulumi.set(self, "forwarding_protocol", value)
@pulumi.input_type
class FrontendEndpointArgs:
def __init__(__self__, *,
host_name: Optional[pulumi.Input[str]] = None,
id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
session_affinity_enabled_state: Optional[pulumi.Input[Union[str, 'SessionAffinityEnabledState']]] = None,
session_affinity_ttl_seconds: Optional[pulumi.Input[int]] = None,
web_application_firewall_policy_link: Optional[pulumi.Input['FrontendEndpointUpdateParametersWebApplicationFirewallPolicyLinkArgs']] = None):
"""
A frontend endpoint used for routing.
:param pulumi.Input[str] host_name: The host name of the frontendEndpoint. Must be a domain name.
:param pulumi.Input[str] id: Resource ID.
:param pulumi.Input[str] name: Resource name.
:param pulumi.Input[Union[str, 'SessionAffinityEnabledState']] session_affinity_enabled_state: Whether to allow session affinity on this host. Valid options are 'Enabled' or 'Disabled'
:param pulumi.Input[int] session_affinity_ttl_seconds: UNUSED. This field will be ignored. The TTL to use in seconds for session affinity, if applicable.
:param pulumi.Input['FrontendEndpointUpdateParametersWebApplicationFirewallPolicyLinkArgs'] web_application_firewall_policy_link: Defines the Web Application Firewall policy for each host (if applicable)
"""
if host_name is not None:
pulumi.set(__self__, "host_name", host_name)
if id is not None:
pulumi.set(__self__, "id", id)
if name is not None:
pulumi.set(__self__, "name", name)
if session_affinity_enabled_state is not None:
pulumi.set(__self__, "session_affinity_enabled_state", session_affinity_enabled_state)
if session_affinity_ttl_seconds is not None:
pulumi.set(__self__, "session_affinity_ttl_seconds", session_affinity_ttl_seconds)
if web_application_firewall_policy_link is not None:
pulumi.set(__self__, "web_application_firewall_policy_link", web_application_firewall_policy_link)
@property
@pulumi.getter(name="hostName")
def host_name(self) -> Optional[pulumi.Input[str]]:
"""
The host name of the frontendEndpoint. Must be a domain name.
"""
return pulumi.get(self, "host_name")
@host_name.setter
def host_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "host_name", value)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Resource name.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="sessionAffinityEnabledState")
def session_affinity_enabled_state(self) -> Optional[pulumi.Input[Union[str, 'SessionAffinityEnabledState']]]:
"""
Whether to allow session affinity on this host. Valid options are 'Enabled' or 'Disabled'
"""
return pulumi.get(self, "session_affinity_enabled_state")
@session_affinity_enabled_state.setter
def session_affinity_enabled_state(self, value: Optional[pulumi.Input[Union[str, 'SessionAffinityEnabledState']]]):
pulumi.set(self, "session_affinity_enabled_state", value)
@property
@pulumi.getter(name="sessionAffinityTtlSeconds")
def session_affinity_ttl_seconds(self) -> Optional[pulumi.Input[int]]:
"""
UNUSED. This field will be ignored. The TTL to use in seconds for session affinity, if applicable.
"""
return pulumi.get(self, "session_affinity_ttl_seconds")
@session_affinity_ttl_seconds.setter
def session_affinity_ttl_seconds(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "session_affinity_ttl_seconds", value)
@property
@pulumi.getter(name="webApplicationFirewallPolicyLink")
def web_application_firewall_policy_link(self) -> Optional[pulumi.Input['FrontendEndpointUpdateParametersWebApplicationFirewallPolicyLinkArgs']]:
"""
Defines the Web Application Firewall policy for each host (if applicable)
"""
return pulumi.get(self, "web_application_firewall_policy_link")
@web_application_firewall_policy_link.setter
def web_application_firewall_policy_link(self, value: Optional[pulumi.Input['FrontendEndpointUpdateParametersWebApplicationFirewallPolicyLinkArgs']]):
pulumi.set(self, "web_application_firewall_policy_link", value)
@pulumi.input_type
class FrontendEndpointUpdateParametersWebApplicationFirewallPolicyLinkArgs:
def __init__(__self__, *,
id: Optional[pulumi.Input[str]] = None):
"""
Defines the Web Application Firewall policy for each host (if applicable)
:param pulumi.Input[str] id: Resource ID.
"""
if id is not None:
pulumi.set(__self__, "id", id)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@pulumi.input_type
class HealthProbeSettingsModelArgs:
def __init__(__self__, *,
enabled_state: Optional[pulumi.Input[Union[str, 'HealthProbeEnabled']]] = None,
health_probe_method: Optional[pulumi.Input[Union[str, 'FrontDoorHealthProbeMethod']]] = None,
id: Optional[pulumi.Input[str]] = None,
interval_in_seconds: Optional[pulumi.Input[int]] = None,
name: Optional[pulumi.Input[str]] = None,
path: Optional[pulumi.Input[str]] = None,
protocol: Optional[pulumi.Input[Union[str, 'FrontDoorProtocol']]] = None):
"""
Load balancing settings for a backend pool
:param pulumi.Input[Union[str, 'HealthProbeEnabled']] enabled_state: Whether to enable health probes to be made against backends defined under backendPools. Health probes can only be disabled if there is a single enabled backend in single enabled backend pool.
:param pulumi.Input[Union[str, 'FrontDoorHealthProbeMethod']] health_probe_method: Configures which HTTP method to use to probe the backends defined under backendPools.
:param pulumi.Input[str] id: Resource ID.
:param pulumi.Input[int] interval_in_seconds: The number of seconds between health probes.
:param pulumi.Input[str] name: Resource name.
:param pulumi.Input[str] path: The path to use for the health probe. Default is /
:param pulumi.Input[Union[str, 'FrontDoorProtocol']] protocol: Protocol scheme to use for this probe
"""
if enabled_state is not None:
pulumi.set(__self__, "enabled_state", enabled_state)
if health_probe_method is None:
health_probe_method = 'HEAD'
if health_probe_method is not None:
pulumi.set(__self__, "health_probe_method", health_probe_method)
if id is not None:
pulumi.set(__self__, "id", id)
if interval_in_seconds is not None:
pulumi.set(__self__, "interval_in_seconds", interval_in_seconds)
if name is not None:
pulumi.set(__self__, "name", name)
if path is not None:
pulumi.set(__self__, "path", path)
if protocol is not None:
pulumi.set(__self__, "protocol", protocol)
@property
@pulumi.getter(name="enabledState")
def enabled_state(self) -> Optional[pulumi.Input[Union[str, 'HealthProbeEnabled']]]:
"""
Whether to enable health probes to be made against backends defined under backendPools. Health probes can only be disabled if there is a single enabled backend in single enabled backend pool.
"""
return pulumi.get(self, "enabled_state")
@enabled_state.setter
def enabled_state(self, value: Optional[pulumi.Input[Union[str, 'HealthProbeEnabled']]]):
pulumi.set(self, "enabled_state", value)
@property
@pulumi.getter(name="healthProbeMethod")
def health_probe_method(self) -> Optional[pulumi.Input[Union[str, 'FrontDoorHealthProbeMethod']]]:
"""
Configures which HTTP method to use to probe the backends defined under backendPools.
"""
return pulumi.get(self, "health_probe_method")
@health_probe_method.setter
def health_probe_method(self, value: Optional[pulumi.Input[Union[str, 'FrontDoorHealthProbeMethod']]]):
pulumi.set(self, "health_probe_method", value)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@property
@pulumi.getter(name="intervalInSeconds")
def interval_in_seconds(self) -> Optional[pulumi.Input[int]]:
"""
The number of seconds between health probes.
"""
return pulumi.get(self, "interval_in_seconds")
@interval_in_seconds.setter
def interval_in_seconds(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "interval_in_seconds", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Resource name.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def path(self) -> Optional[pulumi.Input[str]]:
"""
The path to use for the health probe. Default is /
"""
return pulumi.get(self, "path")
@path.setter
def path(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "path", value)
@property
@pulumi.getter
def protocol(self) -> Optional[pulumi.Input[Union[str, 'FrontDoorProtocol']]]:
"""
Protocol scheme to use for this probe
"""
return pulumi.get(self, "protocol")
@protocol.setter
def protocol(self, value: Optional[pulumi.Input[Union[str, 'FrontDoorProtocol']]]):
pulumi.set(self, "protocol", value)
@pulumi.input_type
class LoadBalancingSettingsModelArgs:
def __init__(__self__, *,
additional_latency_milliseconds: Optional[pulumi.Input[int]] = None,
id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
sample_size: Optional[pulumi.Input[int]] = None,
successful_samples_required: Optional[pulumi.Input[int]] = None):
"""
Load balancing settings for a backend pool
:param pulumi.Input[int] additional_latency_milliseconds: The additional latency in milliseconds for probes to fall into the lowest latency bucket
:param pulumi.Input[str] id: Resource ID.
:param pulumi.Input[str] name: Resource name.
:param pulumi.Input[int] sample_size: The number of samples to consider for load balancing decisions
:param pulumi.Input[int] successful_samples_required: The number of samples within the sample period that must succeed
"""
if additional_latency_milliseconds is not None:
pulumi.set(__self__, "additional_latency_milliseconds", additional_latency_milliseconds)
if id is not None:
pulumi.set(__self__, "id", id)
if name is not None:
pulumi.set(__self__, "name", name)
if sample_size is not None:
pulumi.set(__self__, "sample_size", sample_size)
if successful_samples_required is not None:
pulumi.set(__self__, "successful_samples_required", successful_samples_required)
@property
@pulumi.getter(name="additionalLatencyMilliseconds")
def additional_latency_milliseconds(self) -> Optional[pulumi.Input[int]]:
"""
The additional latency in milliseconds for probes to fall into the lowest latency bucket
"""
return pulumi.get(self, "additional_latency_milliseconds")
@additional_latency_milliseconds.setter
def additional_latency_milliseconds(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "additional_latency_milliseconds", value)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Resource name.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="sampleSize")
def sample_size(self) -> Optional[pulumi.Input[int]]:
"""
The number of samples to consider for load balancing decisions
"""
return pulumi.get(self, "sample_size")
@sample_size.setter
def sample_size(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "sample_size", value)
@property
@pulumi.getter(name="successfulSamplesRequired")
def successful_samples_required(self) -> Optional[pulumi.Input[int]]:
"""
The number of samples within the sample period that must succeed
"""
return pulumi.get(self, "successful_samples_required")
@successful_samples_required.setter
def successful_samples_required(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "successful_samples_required", value)
@pulumi.input_type
class RedirectConfigurationArgs:
def __init__(__self__, *,
odata_type: pulumi.Input[str],
custom_fragment: Optional[pulumi.Input[str]] = None,
custom_host: Optional[pulumi.Input[str]] = None,
custom_path: Optional[pulumi.Input[str]] = None,
custom_query_string: Optional[pulumi.Input[str]] = None,
redirect_protocol: Optional[pulumi.Input[Union[str, 'FrontDoorRedirectProtocol']]] = None,
redirect_type: Optional[pulumi.Input[Union[str, 'FrontDoorRedirectType']]] = None):
"""
Describes Redirect Route.
:param pulumi.Input[str] odata_type:
Expected value is '#Microsoft.Azure.FrontDoor.Models.FrontdoorRedirectConfiguration'.
:param pulumi.Input[str] custom_fragment: Fragment to add to the redirect URL. Fragment is the part of the URL that comes after #. Do not include the #.
:param pulumi.Input[str] custom_host: Host to redirect. Leave empty to use the incoming host as the destination host.
:param pulumi.Input[str] custom_path: The full path to redirect. Path cannot be empty and must start with /. Leave empty to use the incoming path as destination path.
:param pulumi.Input[str] custom_query_string: The set of query strings to be placed in the redirect URL. Setting this value would replace any existing query string; leave empty to preserve the incoming query string. Query string must be in <key>=<value> format. The first ? and & will be added automatically so do not include them in the front, but do separate multiple query strings with &.
:param pulumi.Input[Union[str, 'FrontDoorRedirectProtocol']] redirect_protocol: The protocol of the destination to where the traffic is redirected
:param pulumi.Input[Union[str, 'FrontDoorRedirectType']] redirect_type: The redirect type the rule will use when redirecting traffic.
"""
pulumi.set(__self__, "odata_type", '#Microsoft.Azure.FrontDoor.Models.FrontdoorRedirectConfiguration')
if custom_fragment is not None:
pulumi.set(__self__, "custom_fragment", custom_fragment)
if custom_host is not None:
pulumi.set(__self__, "custom_host", custom_host)
if custom_path is not None:
pulumi.set(__self__, "custom_path", custom_path)
if custom_query_string is not None:
pulumi.set(__self__, "custom_query_string", custom_query_string)
if redirect_protocol is not None:
pulumi.set(__self__, "redirect_protocol", redirect_protocol)
if redirect_type is not None:
pulumi.set(__self__, "redirect_type", redirect_type)
@property
@pulumi.getter(name="odataType")
def odata_type(self) -> pulumi.Input[str]:
"""
Expected value is '#Microsoft.Azure.FrontDoor.Models.FrontdoorRedirectConfiguration'.
"""
return pulumi.get(self, "odata_type")
@odata_type.setter
def odata_type(self, value: pulumi.Input[str]):
pulumi.set(self, "odata_type", value)
@property
@pulumi.getter(name="customFragment")
def custom_fragment(self) -> Optional[pulumi.Input[str]]:
"""
Fragment to add to the redirect URL. Fragment is the part of the URL that comes after #. Do not include the #.
"""
return pulumi.get(self, "custom_fragment")
@custom_fragment.setter
def custom_fragment(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "custom_fragment", value)
@property
@pulumi.getter(name="customHost")
def custom_host(self) -> Optional[pulumi.Input[str]]:
"""
Host to redirect. Leave empty to use the incoming host as the destination host.
"""
return pulumi.get(self, "custom_host")
@custom_host.setter
def custom_host(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "custom_host", value)
@property
@pulumi.getter(name="customPath")
def custom_path(self) -> Optional[pulumi.Input[str]]:
"""
The full path to redirect. Path cannot be empty and must start with /. Leave empty to use the incoming path as destination path.
"""
return pulumi.get(self, "custom_path")
@custom_path.setter
def custom_path(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "custom_path", value)
@property
@pulumi.getter(name="customQueryString")
def custom_query_string(self) -> Optional[pulumi.Input[str]]:
"""
The set of query strings to be placed in the redirect URL. Setting this value would replace any existing query string; leave empty to preserve the incoming query string. Query string must be in <key>=<value> format. The first ? and & will be added automatically so do not include them in the front, but do separate multiple query strings with &.
"""
return pulumi.get(self, "custom_query_string")
@custom_query_string.setter
def custom_query_string(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "custom_query_string", value)
@property
@pulumi.getter(name="redirectProtocol")
def redirect_protocol(self) -> Optional[pulumi.Input[Union[str, 'FrontDoorRedirectProtocol']]]:
"""
The protocol of the destination to where the traffic is redirected
"""
return pulumi.get(self, "redirect_protocol")
@redirect_protocol.setter
def redirect_protocol(self, value: Optional[pulumi.Input[Union[str, 'FrontDoorRedirectProtocol']]]):
pulumi.set(self, "redirect_protocol", value)
@property
@pulumi.getter(name="redirectType")
def redirect_type(self) -> Optional[pulumi.Input[Union[str, 'FrontDoorRedirectType']]]:
"""
The redirect type the rule will use when redirecting traffic.
"""
return pulumi.get(self, "redirect_type")
@redirect_type.setter
def redirect_type(self, value: Optional[pulumi.Input[Union[str, 'FrontDoorRedirectType']]]):
pulumi.set(self, "redirect_type", value)
@pulumi.input_type
class RoutingRuleArgs:
def __init__(__self__, *,
accepted_protocols: Optional[pulumi.Input[Sequence[pulumi.Input[Union[str, 'FrontDoorProtocol']]]]] = None,
enabled_state: Optional[pulumi.Input[Union[str, 'RoutingRuleEnabledState']]] = None,
frontend_endpoints: Optional[pulumi.Input[Sequence[pulumi.Input['SubResourceArgs']]]] = None,
id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
patterns_to_match: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
route_configuration: Optional[pulumi.Input[Union['ForwardingConfigurationArgs', 'RedirectConfigurationArgs']]] = None):
"""
A routing rule represents a specification for traffic to treat and where to send it, along with health probe information.
:param pulumi.Input[Sequence[pulumi.Input[Union[str, 'FrontDoorProtocol']]]] accepted_protocols: Protocol schemes to match for this rule
:param pulumi.Input[Union[str, 'RoutingRuleEnabledState']] enabled_state: Whether to enable use of this rule. Permitted values are 'Enabled' or 'Disabled'
:param pulumi.Input[Sequence[pulumi.Input['SubResourceArgs']]] frontend_endpoints: Frontend endpoints associated with this rule
:param pulumi.Input[str] id: Resource ID.
:param pulumi.Input[str] name: Resource name.
:param pulumi.Input[Sequence[pulumi.Input[str]]] patterns_to_match: The route patterns of the rule.
:param pulumi.Input[Union['ForwardingConfigurationArgs', 'RedirectConfigurationArgs']] route_configuration: A reference to the routing configuration.
"""
if accepted_protocols is not None:
pulumi.set(__self__, "accepted_protocols", accepted_protocols)
if enabled_state is not None:
pulumi.set(__self__, "enabled_state", enabled_state)
if frontend_endpoints is not None:
pulumi.set(__self__, "frontend_endpoints", frontend_endpoints)
if id is not None:
pulumi.set(__self__, "id", id)
if name is not None:
pulumi.set(__self__, "name", name)
if patterns_to_match is not None:
pulumi.set(__self__, "patterns_to_match", patterns_to_match)
if route_configuration is not None:
pulumi.set(__self__, "route_configuration", route_configuration)
@property
@pulumi.getter(name="acceptedProtocols")
def accepted_protocols(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[Union[str, 'FrontDoorProtocol']]]]]:
"""
Protocol schemes to match for this rule
"""
return pulumi.get(self, "accepted_protocols")
@accepted_protocols.setter
def accepted_protocols(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[Union[str, 'FrontDoorProtocol']]]]]):
pulumi.set(self, "accepted_protocols", value)
@property
@pulumi.getter(name="enabledState")
def enabled_state(self) -> Optional[pulumi.Input[Union[str, 'RoutingRuleEnabledState']]]:
"""
Whether to enable use of this rule. Permitted values are 'Enabled' or 'Disabled'
"""
return pulumi.get(self, "enabled_state")
@enabled_state.setter
def enabled_state(self, value: Optional[pulumi.Input[Union[str, 'RoutingRuleEnabledState']]]):
pulumi.set(self, "enabled_state", value)
@property
@pulumi.getter(name="frontendEndpoints")
def frontend_endpoints(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SubResourceArgs']]]]:
"""
Frontend endpoints associated with this rule
"""
return pulumi.get(self, "frontend_endpoints")
@frontend_endpoints.setter
def frontend_endpoints(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SubResourceArgs']]]]):
pulumi.set(self, "frontend_endpoints", value)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Resource name.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="patternsToMatch")
def patterns_to_match(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
The route patterns of the rule.
"""
return pulumi.get(self, "patterns_to_match")
@patterns_to_match.setter
def patterns_to_match(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "patterns_to_match", value)
@property
@pulumi.getter(name="routeConfiguration")
def route_configuration(self) -> Optional[pulumi.Input[Union['ForwardingConfigurationArgs', 'RedirectConfigurationArgs']]]:
"""
A reference to the routing configuration.
"""
return pulumi.get(self, "route_configuration")
@route_configuration.setter
def route_configuration(self, value: Optional[pulumi.Input[Union['ForwardingConfigurationArgs', 'RedirectConfigurationArgs']]]):
pulumi.set(self, "route_configuration", value)
@pulumi.input_type
class SubResourceArgs:
def __init__(__self__, *,
id: Optional[pulumi.Input[str]] = None):
"""
Reference to another subresource.
:param pulumi.Input[str] id: Resource ID.
"""
if id is not None:
pulumi.set(__self__, "id", id)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
| 43.698712 | 399 | 0.673909 |
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from ._enums import *
__all__ = [
'BackendArgs',
'BackendPoolArgs',
'BackendPoolsSettingsArgs',
'CacheConfigurationArgs',
'ForwardingConfigurationArgs',
'FrontendEndpointArgs',
'FrontendEndpointUpdateParametersWebApplicationFirewallPolicyLinkArgs',
'HealthProbeSettingsModelArgs',
'LoadBalancingSettingsModelArgs',
'RedirectConfigurationArgs',
'RoutingRuleArgs',
'SubResourceArgs',
]
@pulumi.input_type
class BackendArgs:
def __init__(__self__, *,
address: Optional[pulumi.Input[str]] = None,
backend_host_header: Optional[pulumi.Input[str]] = None,
enabled_state: Optional[pulumi.Input[Union[str, 'BackendEnabledState']]] = None,
http_port: Optional[pulumi.Input[int]] = None,
https_port: Optional[pulumi.Input[int]] = None,
priority: Optional[pulumi.Input[int]] = None,
weight: Optional[pulumi.Input[int]] = None):
if address is not None:
pulumi.set(__self__, "address", address)
if backend_host_header is not None:
pulumi.set(__self__, "backend_host_header", backend_host_header)
if enabled_state is not None:
pulumi.set(__self__, "enabled_state", enabled_state)
if http_port is not None:
pulumi.set(__self__, "http_port", http_port)
if https_port is not None:
pulumi.set(__self__, "https_port", https_port)
if priority is not None:
pulumi.set(__self__, "priority", priority)
if weight is not None:
pulumi.set(__self__, "weight", weight)
@property
@pulumi.getter
def address(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "address")
@address.setter
def address(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "address", value)
@property
@pulumi.getter(name="backendHostHeader")
def backend_host_header(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "backend_host_header")
@backend_host_header.setter
def backend_host_header(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "backend_host_header", value)
@property
@pulumi.getter(name="enabledState")
def enabled_state(self) -> Optional[pulumi.Input[Union[str, 'BackendEnabledState']]]:
return pulumi.get(self, "enabled_state")
@enabled_state.setter
def enabled_state(self, value: Optional[pulumi.Input[Union[str, 'BackendEnabledState']]]):
pulumi.set(self, "enabled_state", value)
@property
@pulumi.getter(name="httpPort")
def http_port(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "http_port")
@http_port.setter
def http_port(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "http_port", value)
@property
@pulumi.getter(name="httpsPort")
def https_port(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "https_port")
@https_port.setter
def https_port(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "https_port", value)
@property
@pulumi.getter
def priority(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "priority")
@priority.setter
def priority(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "priority", value)
@property
@pulumi.getter
def weight(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "weight")
@weight.setter
def weight(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "weight", value)
@pulumi.input_type
class BackendPoolArgs:
def __init__(__self__, *,
backends: Optional[pulumi.Input[Sequence[pulumi.Input['BackendArgs']]]] = None,
health_probe_settings: Optional[pulumi.Input['SubResourceArgs']] = None,
id: Optional[pulumi.Input[str]] = None,
load_balancing_settings: Optional[pulumi.Input['SubResourceArgs']] = None,
name: Optional[pulumi.Input[str]] = None):
if backends is not None:
pulumi.set(__self__, "backends", backends)
if health_probe_settings is not None:
pulumi.set(__self__, "health_probe_settings", health_probe_settings)
if id is not None:
pulumi.set(__self__, "id", id)
if load_balancing_settings is not None:
pulumi.set(__self__, "load_balancing_settings", load_balancing_settings)
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def backends(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['BackendArgs']]]]:
return pulumi.get(self, "backends")
@backends.setter
def backends(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['BackendArgs']]]]):
pulumi.set(self, "backends", value)
@property
@pulumi.getter(name="healthProbeSettings")
def health_probe_settings(self) -> Optional[pulumi.Input['SubResourceArgs']]:
return pulumi.get(self, "health_probe_settings")
@health_probe_settings.setter
def health_probe_settings(self, value: Optional[pulumi.Input['SubResourceArgs']]):
pulumi.set(self, "health_probe_settings", value)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@property
@pulumi.getter(name="loadBalancingSettings")
def load_balancing_settings(self) -> Optional[pulumi.Input['SubResourceArgs']]:
return pulumi.get(self, "load_balancing_settings")
@load_balancing_settings.setter
def load_balancing_settings(self, value: Optional[pulumi.Input['SubResourceArgs']]):
pulumi.set(self, "load_balancing_settings", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@pulumi.input_type
class BackendPoolsSettingsArgs:
def __init__(__self__, *,
enforce_certificate_name_check: Optional[pulumi.Input[Union[str, 'EnforceCertificateNameCheckEnabledState']]] = None,
send_recv_timeout_seconds: Optional[pulumi.Input[int]] = None):
if enforce_certificate_name_check is None:
enforce_certificate_name_check = 'Enabled'
if enforce_certificate_name_check is not None:
pulumi.set(__self__, "enforce_certificate_name_check", enforce_certificate_name_check)
if send_recv_timeout_seconds is not None:
pulumi.set(__self__, "send_recv_timeout_seconds", send_recv_timeout_seconds)
@property
@pulumi.getter(name="enforceCertificateNameCheck")
def enforce_certificate_name_check(self) -> Optional[pulumi.Input[Union[str, 'EnforceCertificateNameCheckEnabledState']]]:
return pulumi.get(self, "enforce_certificate_name_check")
@enforce_certificate_name_check.setter
def enforce_certificate_name_check(self, value: Optional[pulumi.Input[Union[str, 'EnforceCertificateNameCheckEnabledState']]]):
pulumi.set(self, "enforce_certificate_name_check", value)
@property
@pulumi.getter(name="sendRecvTimeoutSeconds")
def send_recv_timeout_seconds(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "send_recv_timeout_seconds")
@send_recv_timeout_seconds.setter
def send_recv_timeout_seconds(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "send_recv_timeout_seconds", value)
@pulumi.input_type
class CacheConfigurationArgs:
def __init__(__self__, *,
dynamic_compression: Optional[pulumi.Input[Union[str, 'DynamicCompressionEnabled']]] = None,
query_parameter_strip_directive: Optional[pulumi.Input[Union[str, 'FrontDoorQuery']]] = None):
if dynamic_compression is not None:
pulumi.set(__self__, "dynamic_compression", dynamic_compression)
if query_parameter_strip_directive is not None:
pulumi.set(__self__, "query_parameter_strip_directive", query_parameter_strip_directive)
@property
@pulumi.getter(name="dynamicCompression")
def dynamic_compression(self) -> Optional[pulumi.Input[Union[str, 'DynamicCompressionEnabled']]]:
return pulumi.get(self, "dynamic_compression")
@dynamic_compression.setter
def dynamic_compression(self, value: Optional[pulumi.Input[Union[str, 'DynamicCompressionEnabled']]]):
pulumi.set(self, "dynamic_compression", value)
@property
@pulumi.getter(name="queryParameterStripDirective")
def query_parameter_strip_directive(self) -> Optional[pulumi.Input[Union[str, 'FrontDoorQuery']]]:
return pulumi.get(self, "query_parameter_strip_directive")
@query_parameter_strip_directive.setter
def query_parameter_strip_directive(self, value: Optional[pulumi.Input[Union[str, 'FrontDoorQuery']]]):
pulumi.set(self, "query_parameter_strip_directive", value)
@pulumi.input_type
class ForwardingConfigurationArgs:
def __init__(__self__, *,
odata_type: pulumi.Input[str],
backend_pool: Optional[pulumi.Input['SubResourceArgs']] = None,
cache_configuration: Optional[pulumi.Input['CacheConfigurationArgs']] = None,
custom_forwarding_path: Optional[pulumi.Input[str]] = None,
forwarding_protocol: Optional[pulumi.Input[Union[str, 'FrontDoorForwardingProtocol']]] = None):
pulumi.set(__self__, "odata_type", ' if backend_pool is not None:
pulumi.set(__self__, "backend_pool", backend_pool)
if cache_configuration is not None:
pulumi.set(__self__, "cache_configuration", cache_configuration)
if custom_forwarding_path is not None:
pulumi.set(__self__, "custom_forwarding_path", custom_forwarding_path)
if forwarding_protocol is not None:
pulumi.set(__self__, "forwarding_protocol", forwarding_protocol)
@property
@pulumi.getter(name="odataType")
def odata_type(self) -> pulumi.Input[str]:
return pulumi.get(self, "odata_type")
@odata_type.setter
def odata_type(self, value: pulumi.Input[str]):
pulumi.set(self, "odata_type", value)
@property
@pulumi.getter(name="backendPool")
def backend_pool(self) -> Optional[pulumi.Input['SubResourceArgs']]:
return pulumi.get(self, "backend_pool")
@backend_pool.setter
def backend_pool(self, value: Optional[pulumi.Input['SubResourceArgs']]):
pulumi.set(self, "backend_pool", value)
@property
@pulumi.getter(name="cacheConfiguration")
def cache_configuration(self) -> Optional[pulumi.Input['CacheConfigurationArgs']]:
return pulumi.get(self, "cache_configuration")
@cache_configuration.setter
def cache_configuration(self, value: Optional[pulumi.Input['CacheConfigurationArgs']]):
pulumi.set(self, "cache_configuration", value)
@property
@pulumi.getter(name="customForwardingPath")
def custom_forwarding_path(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "custom_forwarding_path")
@custom_forwarding_path.setter
def custom_forwarding_path(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "custom_forwarding_path", value)
@property
@pulumi.getter(name="forwardingProtocol")
def forwarding_protocol(self) -> Optional[pulumi.Input[Union[str, 'FrontDoorForwardingProtocol']]]:
return pulumi.get(self, "forwarding_protocol")
@forwarding_protocol.setter
def forwarding_protocol(self, value: Optional[pulumi.Input[Union[str, 'FrontDoorForwardingProtocol']]]):
pulumi.set(self, "forwarding_protocol", value)
@pulumi.input_type
class FrontendEndpointArgs:
def __init__(__self__, *,
host_name: Optional[pulumi.Input[str]] = None,
id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
session_affinity_enabled_state: Optional[pulumi.Input[Union[str, 'SessionAffinityEnabledState']]] = None,
session_affinity_ttl_seconds: Optional[pulumi.Input[int]] = None,
web_application_firewall_policy_link: Optional[pulumi.Input['FrontendEndpointUpdateParametersWebApplicationFirewallPolicyLinkArgs']] = None):
if host_name is not None:
pulumi.set(__self__, "host_name", host_name)
if id is not None:
pulumi.set(__self__, "id", id)
if name is not None:
pulumi.set(__self__, "name", name)
if session_affinity_enabled_state is not None:
pulumi.set(__self__, "session_affinity_enabled_state", session_affinity_enabled_state)
if session_affinity_ttl_seconds is not None:
pulumi.set(__self__, "session_affinity_ttl_seconds", session_affinity_ttl_seconds)
if web_application_firewall_policy_link is not None:
pulumi.set(__self__, "web_application_firewall_policy_link", web_application_firewall_policy_link)
@property
@pulumi.getter(name="hostName")
def host_name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "host_name")
@host_name.setter
def host_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "host_name", value)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="sessionAffinityEnabledState")
def session_affinity_enabled_state(self) -> Optional[pulumi.Input[Union[str, 'SessionAffinityEnabledState']]]:
return pulumi.get(self, "session_affinity_enabled_state")
@session_affinity_enabled_state.setter
def session_affinity_enabled_state(self, value: Optional[pulumi.Input[Union[str, 'SessionAffinityEnabledState']]]):
pulumi.set(self, "session_affinity_enabled_state", value)
@property
@pulumi.getter(name="sessionAffinityTtlSeconds")
def session_affinity_ttl_seconds(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "session_affinity_ttl_seconds")
@session_affinity_ttl_seconds.setter
def session_affinity_ttl_seconds(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "session_affinity_ttl_seconds", value)
@property
@pulumi.getter(name="webApplicationFirewallPolicyLink")
def web_application_firewall_policy_link(self) -> Optional[pulumi.Input['FrontendEndpointUpdateParametersWebApplicationFirewallPolicyLinkArgs']]:
return pulumi.get(self, "web_application_firewall_policy_link")
@web_application_firewall_policy_link.setter
def web_application_firewall_policy_link(self, value: Optional[pulumi.Input['FrontendEndpointUpdateParametersWebApplicationFirewallPolicyLinkArgs']]):
pulumi.set(self, "web_application_firewall_policy_link", value)
@pulumi.input_type
class FrontendEndpointUpdateParametersWebApplicationFirewallPolicyLinkArgs:
def __init__(__self__, *,
id: Optional[pulumi.Input[str]] = None):
if id is not None:
pulumi.set(__self__, "id", id)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@pulumi.input_type
class HealthProbeSettingsModelArgs:
def __init__(__self__, *,
enabled_state: Optional[pulumi.Input[Union[str, 'HealthProbeEnabled']]] = None,
health_probe_method: Optional[pulumi.Input[Union[str, 'FrontDoorHealthProbeMethod']]] = None,
id: Optional[pulumi.Input[str]] = None,
interval_in_seconds: Optional[pulumi.Input[int]] = None,
name: Optional[pulumi.Input[str]] = None,
path: Optional[pulumi.Input[str]] = None,
protocol: Optional[pulumi.Input[Union[str, 'FrontDoorProtocol']]] = None):
if enabled_state is not None:
pulumi.set(__self__, "enabled_state", enabled_state)
if health_probe_method is None:
health_probe_method = 'HEAD'
if health_probe_method is not None:
pulumi.set(__self__, "health_probe_method", health_probe_method)
if id is not None:
pulumi.set(__self__, "id", id)
if interval_in_seconds is not None:
pulumi.set(__self__, "interval_in_seconds", interval_in_seconds)
if name is not None:
pulumi.set(__self__, "name", name)
if path is not None:
pulumi.set(__self__, "path", path)
if protocol is not None:
pulumi.set(__self__, "protocol", protocol)
@property
@pulumi.getter(name="enabledState")
def enabled_state(self) -> Optional[pulumi.Input[Union[str, 'HealthProbeEnabled']]]:
return pulumi.get(self, "enabled_state")
@enabled_state.setter
def enabled_state(self, value: Optional[pulumi.Input[Union[str, 'HealthProbeEnabled']]]):
pulumi.set(self, "enabled_state", value)
@property
@pulumi.getter(name="healthProbeMethod")
def health_probe_method(self) -> Optional[pulumi.Input[Union[str, 'FrontDoorHealthProbeMethod']]]:
return pulumi.get(self, "health_probe_method")
@health_probe_method.setter
def health_probe_method(self, value: Optional[pulumi.Input[Union[str, 'FrontDoorHealthProbeMethod']]]):
pulumi.set(self, "health_probe_method", value)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@property
@pulumi.getter(name="intervalInSeconds")
def interval_in_seconds(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "interval_in_seconds")
@interval_in_seconds.setter
def interval_in_seconds(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "interval_in_seconds", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def path(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "path")
@path.setter
def path(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "path", value)
@property
@pulumi.getter
def protocol(self) -> Optional[pulumi.Input[Union[str, 'FrontDoorProtocol']]]:
return pulumi.get(self, "protocol")
@protocol.setter
def protocol(self, value: Optional[pulumi.Input[Union[str, 'FrontDoorProtocol']]]):
pulumi.set(self, "protocol", value)
@pulumi.input_type
class LoadBalancingSettingsModelArgs:
def __init__(__self__, *,
additional_latency_milliseconds: Optional[pulumi.Input[int]] = None,
id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
sample_size: Optional[pulumi.Input[int]] = None,
successful_samples_required: Optional[pulumi.Input[int]] = None):
if additional_latency_milliseconds is not None:
pulumi.set(__self__, "additional_latency_milliseconds", additional_latency_milliseconds)
if id is not None:
pulumi.set(__self__, "id", id)
if name is not None:
pulumi.set(__self__, "name", name)
if sample_size is not None:
pulumi.set(__self__, "sample_size", sample_size)
if successful_samples_required is not None:
pulumi.set(__self__, "successful_samples_required", successful_samples_required)
@property
@pulumi.getter(name="additionalLatencyMilliseconds")
def additional_latency_milliseconds(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "additional_latency_milliseconds")
@additional_latency_milliseconds.setter
def additional_latency_milliseconds(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "additional_latency_milliseconds", value)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="sampleSize")
def sample_size(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "sample_size")
@sample_size.setter
def sample_size(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "sample_size", value)
@property
@pulumi.getter(name="successfulSamplesRequired")
def successful_samples_required(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "successful_samples_required")
@successful_samples_required.setter
def successful_samples_required(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "successful_samples_required", value)
@pulumi.input_type
class RedirectConfigurationArgs:
def __init__(__self__, *,
odata_type: pulumi.Input[str],
custom_fragment: Optional[pulumi.Input[str]] = None,
custom_host: Optional[pulumi.Input[str]] = None,
custom_path: Optional[pulumi.Input[str]] = None,
custom_query_string: Optional[pulumi.Input[str]] = None,
redirect_protocol: Optional[pulumi.Input[Union[str, 'FrontDoorRedirectProtocol']]] = None,
redirect_type: Optional[pulumi.Input[Union[str, 'FrontDoorRedirectType']]] = None):
pulumi.set(__self__, "odata_type", ' if custom_fragment is not None:
pulumi.set(__self__, "custom_fragment", custom_fragment)
if custom_host is not None:
pulumi.set(__self__, "custom_host", custom_host)
if custom_path is not None:
pulumi.set(__self__, "custom_path", custom_path)
if custom_query_string is not None:
pulumi.set(__self__, "custom_query_string", custom_query_string)
if redirect_protocol is not None:
pulumi.set(__self__, "redirect_protocol", redirect_protocol)
if redirect_type is not None:
pulumi.set(__self__, "redirect_type", redirect_type)
@property
@pulumi.getter(name="odataType")
def odata_type(self) -> pulumi.Input[str]:
return pulumi.get(self, "odata_type")
@odata_type.setter
def odata_type(self, value: pulumi.Input[str]):
pulumi.set(self, "odata_type", value)
@property
@pulumi.getter(name="customFragment")
def custom_fragment(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "custom_fragment")
@custom_fragment.setter
def custom_fragment(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "custom_fragment", value)
@property
@pulumi.getter(name="customHost")
def custom_host(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "custom_host")
@custom_host.setter
def custom_host(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "custom_host", value)
@property
@pulumi.getter(name="customPath")
def custom_path(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "custom_path")
@custom_path.setter
def custom_path(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "custom_path", value)
@property
@pulumi.getter(name="customQueryString")
def custom_query_string(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "custom_query_string")
@custom_query_string.setter
def custom_query_string(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "custom_query_string", value)
@property
@pulumi.getter(name="redirectProtocol")
def redirect_protocol(self) -> Optional[pulumi.Input[Union[str, 'FrontDoorRedirectProtocol']]]:
return pulumi.get(self, "redirect_protocol")
@redirect_protocol.setter
def redirect_protocol(self, value: Optional[pulumi.Input[Union[str, 'FrontDoorRedirectProtocol']]]):
pulumi.set(self, "redirect_protocol", value)
@property
@pulumi.getter(name="redirectType")
def redirect_type(self) -> Optional[pulumi.Input[Union[str, 'FrontDoorRedirectType']]]:
return pulumi.get(self, "redirect_type")
@redirect_type.setter
def redirect_type(self, value: Optional[pulumi.Input[Union[str, 'FrontDoorRedirectType']]]):
pulumi.set(self, "redirect_type", value)
@pulumi.input_type
class RoutingRuleArgs:
def __init__(__self__, *,
accepted_protocols: Optional[pulumi.Input[Sequence[pulumi.Input[Union[str, 'FrontDoorProtocol']]]]] = None,
enabled_state: Optional[pulumi.Input[Union[str, 'RoutingRuleEnabledState']]] = None,
frontend_endpoints: Optional[pulumi.Input[Sequence[pulumi.Input['SubResourceArgs']]]] = None,
id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
patterns_to_match: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
route_configuration: Optional[pulumi.Input[Union['ForwardingConfigurationArgs', 'RedirectConfigurationArgs']]] = None):
if accepted_protocols is not None:
pulumi.set(__self__, "accepted_protocols", accepted_protocols)
if enabled_state is not None:
pulumi.set(__self__, "enabled_state", enabled_state)
if frontend_endpoints is not None:
pulumi.set(__self__, "frontend_endpoints", frontend_endpoints)
if id is not None:
pulumi.set(__self__, "id", id)
if name is not None:
pulumi.set(__self__, "name", name)
if patterns_to_match is not None:
pulumi.set(__self__, "patterns_to_match", patterns_to_match)
if route_configuration is not None:
pulumi.set(__self__, "route_configuration", route_configuration)
@property
@pulumi.getter(name="acceptedProtocols")
def accepted_protocols(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[Union[str, 'FrontDoorProtocol']]]]]:
return pulumi.get(self, "accepted_protocols")
@accepted_protocols.setter
def accepted_protocols(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[Union[str, 'FrontDoorProtocol']]]]]):
pulumi.set(self, "accepted_protocols", value)
@property
@pulumi.getter(name="enabledState")
def enabled_state(self) -> Optional[pulumi.Input[Union[str, 'RoutingRuleEnabledState']]]:
return pulumi.get(self, "enabled_state")
@enabled_state.setter
def enabled_state(self, value: Optional[pulumi.Input[Union[str, 'RoutingRuleEnabledState']]]):
pulumi.set(self, "enabled_state", value)
@property
@pulumi.getter(name="frontendEndpoints")
def frontend_endpoints(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SubResourceArgs']]]]:
return pulumi.get(self, "frontend_endpoints")
@frontend_endpoints.setter
def frontend_endpoints(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SubResourceArgs']]]]):
pulumi.set(self, "frontend_endpoints", value)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="patternsToMatch")
def patterns_to_match(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "patterns_to_match")
@patterns_to_match.setter
def patterns_to_match(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "patterns_to_match", value)
@property
@pulumi.getter(name="routeConfiguration")
def route_configuration(self) -> Optional[pulumi.Input[Union['ForwardingConfigurationArgs', 'RedirectConfigurationArgs']]]:
return pulumi.get(self, "route_configuration")
@route_configuration.setter
def route_configuration(self, value: Optional[pulumi.Input[Union['ForwardingConfigurationArgs', 'RedirectConfigurationArgs']]]):
pulumi.set(self, "route_configuration", value)
@pulumi.input_type
class SubResourceArgs:
def __init__(__self__, *,
id: Optional[pulumi.Input[str]] = None):
if id is not None:
pulumi.set(__self__, "id", id)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
| true | true |
1c4a57868ba829a1a46f4c3661d4ed036f1ad5f9 | 1,295 | py | Python | src/tengi/telegram/telegram_api_utils.py | luckybots/tengi | 1eef42596fb59035a43d6e1fa7b2aa552b52dffc | [
"Apache-2.0"
] | 2 | 2021-08-09T18:02:59.000Z | 2022-01-15T15:11:02.000Z | src/tengi/telegram/telegram_api_utils.py | luckybots/tengi | 1eef42596fb59035a43d6e1fa7b2aa552b52dffc | [
"Apache-2.0"
] | null | null | null | src/tengi/telegram/telegram_api_utils.py | luckybots/tengi | 1eef42596fb59035a43d6e1fa7b2aa552b52dffc | [
"Apache-2.0"
] | null | null | null | from typing import Iterable
from telebot import types as bot_types
from telethon.tl import types as api_types
def api_to_bot_markup(api_markup: api_types.ReplyInlineMarkup) -> bot_types.InlineKeyboardMarkup:
bot_markup = bot_types.InlineKeyboardMarkup()
if api_markup is not None:
for api_r in api_markup.rows:
bot_r = []
for api_b in api_r.buttons:
if isinstance(api_b, api_types.KeyboardButtonCallback):
bot_b = bot_types.InlineKeyboardButton(text=api_b.text,
callback_data=api_b.data.decode(encoding='utf-8'))
elif isinstance(api_b, api_types.KeyboardButtonUrl):
bot_b = bot_types.InlineKeyboardButton(text=api_b.text,
url=api_b.url)
else:
raise TypeError(f'Unhandled button type: {type(api_b)}')
bot_r.append(bot_b)
bot_markup.add(*bot_r)
return bot_markup
def iterate_buttons(message: api_types.Message) -> Iterable[api_types.KeyboardButton]:
if message.reply_markup is not None:
for row in message.reply_markup.rows:
for b in row.buttons:
yield b
| 43.166667 | 109 | 0.601544 | from typing import Iterable
from telebot import types as bot_types
from telethon.tl import types as api_types
def api_to_bot_markup(api_markup: api_types.ReplyInlineMarkup) -> bot_types.InlineKeyboardMarkup:
bot_markup = bot_types.InlineKeyboardMarkup()
if api_markup is not None:
for api_r in api_markup.rows:
bot_r = []
for api_b in api_r.buttons:
if isinstance(api_b, api_types.KeyboardButtonCallback):
bot_b = bot_types.InlineKeyboardButton(text=api_b.text,
callback_data=api_b.data.decode(encoding='utf-8'))
elif isinstance(api_b, api_types.KeyboardButtonUrl):
bot_b = bot_types.InlineKeyboardButton(text=api_b.text,
url=api_b.url)
else:
raise TypeError(f'Unhandled button type: {type(api_b)}')
bot_r.append(bot_b)
bot_markup.add(*bot_r)
return bot_markup
def iterate_buttons(message: api_types.Message) -> Iterable[api_types.KeyboardButton]:
if message.reply_markup is not None:
for row in message.reply_markup.rows:
for b in row.buttons:
yield b
| true | true |
1c4a582f3f25a8a9a114c1133dfd741294ed8ca5 | 5,628 | py | Python | src/game/visualisation/visualise_step.py | IvanRoblesMunoz/hungry_geese_game | 806454bbd1178f214ceae51ea9724faffbb13396 | [
"MIT"
] | 1 | 2021-12-04T13:11:07.000Z | 2021-12-04T13:11:07.000Z | src/game/visualisation/visualise_step.py | IvanRoblesMunoz/hungry_geese_game | 806454bbd1178f214ceae51ea9724faffbb13396 | [
"MIT"
] | null | null | null | src/game/visualisation/visualise_step.py | IvanRoblesMunoz/hungry_geese_game | 806454bbd1178f214ceae51ea9724faffbb13396 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Nov 28 18:03:25 2021
@author: roblesi
This module makes the visuals for the game.
"""
# pylint: disable=E0401
# =============================================================================
# Imports
# =============================================================================
import os
from pathlib import Path
from typing import Tuple
import pygame
from pygame import display
# =============================================================================
# Statics
# =============================================================================
from src.game.visualisation.visualisation_statics import (
START_HEIGHT,
START_WIDTH,
WIDTH_STEP,
HEIGHT_STEP,
)
REPO_PATH = Path(os.getcwd())
ASSETS_PATH = REPO_PATH / "src/game/assets"
BACKGROUND = pygame.image.load(ASSETS_PATH / "background_sprite.png")
LOADING_SCREEN = pygame.image.load(ASSETS_PATH / "loading_screen.png")
FOOD = pygame.image.load(ASSETS_PATH / "food_sprite.png")
OBJECT_WIDTH = FOOD.get_width()
COLOR_GEESE = {
0: (255, 255, 255), # White
1: (255, 0, 0), # Red
2: (0, 255, 0), # Green
3: (0, 0, 255), # Blue
}
# =============================================================================
# Function
# =============================================================================
def cell_to_coordinates(cell: int) -> Tuple[float]:
"""Convert cell number to x,y coordinates."""
cells_width = (cell) % 11
cells_height = (cell) // 11
x_coord = START_WIDTH + cells_width * WIDTH_STEP
y_coord = START_HEIGHT + cells_height * HEIGHT_STEP
return x_coord, y_coord
def draw_goose(dis: display, goose_pos: list, goose_idx: int) -> None:
for body_idx, goose_part in enumerate(goose_pos):
x_coord, y_coord = cell_to_coordinates(goose_part)
# Draw head
if body_idx == 0:
pygame.draw.polygon(
surface=dis,
color=COLOR_GEESE[goose_idx],
points=[
(x_coord + HEIGHT_STEP / 3, y_coord),
(x_coord, y_coord + HEIGHT_STEP / 3),
(x_coord + WIDTH_STEP / 3, y_coord + HEIGHT_STEP * 2 / 3),
(x_coord + WIDTH_STEP * 2 / 3, y_coord + HEIGHT_STEP / 3),
],
)
# Draw tail
elif body_idx == len(goose_pos) - 1:
pygame.draw.circle(
surface=dis,
color=COLOR_GEESE[goose_idx],
center=(x_coord + WIDTH_STEP / 3, y_coord + HEIGHT_STEP / 3),
radius=WIDTH_STEP / 3,
)
# Draw bpdy
else:
pygame.draw.rect(
dis,
COLOR_GEESE[goose_idx],
(x_coord, y_coord, OBJECT_WIDTH, OBJECT_WIDTH),
)
def draw_step(dis: display, obs: dict) -> None:
"""
Draws step of game.
Parameters
----------
dis : display
Game display.
obs : dict
Observation representing the current state of the game.
Returns
-------
None
"""
# Draw background
# dis.blit(BACKGROUND, (0, 0))
dis.blit(BACKGROUND, (0, 0))
# Draw food
for food_cell in obs["food"]:
dis.blit(FOOD, cell_to_coordinates(food_cell))
# Draw geese
for goose_idx in range(4):
goose_pos = obs["geese"][goose_idx]
draw_goose(dis, goose_pos, goose_idx)
# Update
pygame.display.flip()
pygame.display.update()
def draw_loading_screen(dis: display) -> None:
"""Draw loading screen."""
msg1 = "Welcome to Hungry Geese!!!"
msg2 = "Please press any key to start."
msg3 = "You will play the white character, you can control it"
msg4 = "using the direction keys. After each input, the NPCs"
msg5 = "will take a second to submit their direction."
# Draw background
dis.blit(LOADING_SCREEN, (0, 0))
# Write message
font_style1 = pygame.font.SysFont("bahnschrift", 40)
dis.blit(font_style1.render(msg1, True, (0, 196, 151)), [40, 40])
font_style2 = pygame.font.SysFont("bahnschrift", 35)
dis.blit(font_style2.render(msg2, True, (0, 196, 151)), [40, 80])
font_style3 = pygame.font.SysFont("bahnschrift", 25)
dis.blit(font_style3.render(msg3, True, (0, 196, 151)), [30, 175])
dis.blit(font_style3.render(msg4, True, (0, 196, 151)), [30, 200])
dis.blit(font_style3.render(msg5, True, (0, 196, 151)), [30, 225])
# Update
pygame.display.flip()
pygame.display.update()
def draw_endgame_screen(dis: display, position: int) -> None:
"""Draw end game screen."""
msg1 = f"Game ended, you placed {position} of 4!!!"
msg2 = "Please press any key to start."
msg3 = "You will play the white character, you can control it"
msg4 = "using the direction keys. After each input, the NPCs"
msg5 = "will take a second to submit their direction."
# Draw background
dis.blit(LOADING_SCREEN, (0, 0))
# Write message
font_style1 = pygame.font.SysFont("bahnschrift", 40)
dis.blit(font_style1.render(msg1, True, (0, 196, 151)), [40, 50])
font_style2 = pygame.font.SysFont("bahnschrift", 35)
dis.blit(font_style2.render(msg2, True, (0, 196, 151)), [40, 90])
font_style3 = pygame.font.SysFont("bahnschrift", 25)
dis.blit(font_style3.render(msg3, True, (0, 196, 151)), [30, 175])
dis.blit(font_style3.render(msg4, True, (0, 196, 151)), [30, 200])
dis.blit(font_style3.render(msg5, True, (0, 196, 151)), [30, 225])
# Update
pygame.display.flip()
pygame.display.update()
| 29.621053 | 79 | 0.559168 | import os
from pathlib import Path
from typing import Tuple
import pygame
from pygame import display
from src.game.visualisation.visualisation_statics import (
START_HEIGHT,
START_WIDTH,
WIDTH_STEP,
HEIGHT_STEP,
)
REPO_PATH = Path(os.getcwd())
ASSETS_PATH = REPO_PATH / "src/game/assets"
BACKGROUND = pygame.image.load(ASSETS_PATH / "background_sprite.png")
LOADING_SCREEN = pygame.image.load(ASSETS_PATH / "loading_screen.png")
FOOD = pygame.image.load(ASSETS_PATH / "food_sprite.png")
OBJECT_WIDTH = FOOD.get_width()
COLOR_GEESE = {
0: (255, 255, 255), 1: (255, 0, 0), 2: (0, 255, 0), 3: (0, 0, 255), }
def cell_to_coordinates(cell: int) -> Tuple[float]:
cells_width = (cell) % 11
cells_height = (cell) // 11
x_coord = START_WIDTH + cells_width * WIDTH_STEP
y_coord = START_HEIGHT + cells_height * HEIGHT_STEP
return x_coord, y_coord
def draw_goose(dis: display, goose_pos: list, goose_idx: int) -> None:
for body_idx, goose_part in enumerate(goose_pos):
x_coord, y_coord = cell_to_coordinates(goose_part)
if body_idx == 0:
pygame.draw.polygon(
surface=dis,
color=COLOR_GEESE[goose_idx],
points=[
(x_coord + HEIGHT_STEP / 3, y_coord),
(x_coord, y_coord + HEIGHT_STEP / 3),
(x_coord + WIDTH_STEP / 3, y_coord + HEIGHT_STEP * 2 / 3),
(x_coord + WIDTH_STEP * 2 / 3, y_coord + HEIGHT_STEP / 3),
],
)
elif body_idx == len(goose_pos) - 1:
pygame.draw.circle(
surface=dis,
color=COLOR_GEESE[goose_idx],
center=(x_coord + WIDTH_STEP / 3, y_coord + HEIGHT_STEP / 3),
radius=WIDTH_STEP / 3,
)
else:
pygame.draw.rect(
dis,
COLOR_GEESE[goose_idx],
(x_coord, y_coord, OBJECT_WIDTH, OBJECT_WIDTH),
)
def draw_step(dis: display, obs: dict) -> None:
dis.blit(BACKGROUND, (0, 0))
for food_cell in obs["food"]:
dis.blit(FOOD, cell_to_coordinates(food_cell))
for goose_idx in range(4):
goose_pos = obs["geese"][goose_idx]
draw_goose(dis, goose_pos, goose_idx)
pygame.display.flip()
pygame.display.update()
def draw_loading_screen(dis: display) -> None:
msg1 = "Welcome to Hungry Geese!!!"
msg2 = "Please press any key to start."
msg3 = "You will play the white character, you can control it"
msg4 = "using the direction keys. After each input, the NPCs"
msg5 = "will take a second to submit their direction."
dis.blit(LOADING_SCREEN, (0, 0))
font_style1 = pygame.font.SysFont("bahnschrift", 40)
dis.blit(font_style1.render(msg1, True, (0, 196, 151)), [40, 40])
font_style2 = pygame.font.SysFont("bahnschrift", 35)
dis.blit(font_style2.render(msg2, True, (0, 196, 151)), [40, 80])
font_style3 = pygame.font.SysFont("bahnschrift", 25)
dis.blit(font_style3.render(msg3, True, (0, 196, 151)), [30, 175])
dis.blit(font_style3.render(msg4, True, (0, 196, 151)), [30, 200])
dis.blit(font_style3.render(msg5, True, (0, 196, 151)), [30, 225])
pygame.display.flip()
pygame.display.update()
def draw_endgame_screen(dis: display, position: int) -> None:
msg1 = f"Game ended, you placed {position} of 4!!!"
msg2 = "Please press any key to start."
msg3 = "You will play the white character, you can control it"
msg4 = "using the direction keys. After each input, the NPCs"
msg5 = "will take a second to submit their direction."
dis.blit(LOADING_SCREEN, (0, 0))
font_style1 = pygame.font.SysFont("bahnschrift", 40)
dis.blit(font_style1.render(msg1, True, (0, 196, 151)), [40, 50])
font_style2 = pygame.font.SysFont("bahnschrift", 35)
dis.blit(font_style2.render(msg2, True, (0, 196, 151)), [40, 90])
font_style3 = pygame.font.SysFont("bahnschrift", 25)
dis.blit(font_style3.render(msg3, True, (0, 196, 151)), [30, 175])
dis.blit(font_style3.render(msg4, True, (0, 196, 151)), [30, 200])
dis.blit(font_style3.render(msg5, True, (0, 196, 151)), [30, 225])
pygame.display.flip()
pygame.display.update()
| true | true |
1c4a583ce9a5a25fca06dd78ceeeadaabb85e3d8 | 16,466 | py | Python | electrumsv/util/__init__.py | electrumsv/electrumsv | a2d9027ccec338cadfca778888e6ef7f077b1651 | [
"MIT"
] | 136 | 2019-01-10T15:49:09.000Z | 2022-02-20T04:46:39.000Z | electrumsv/util/__init__.py | electrumsv/electrumsv | a2d9027ccec338cadfca778888e6ef7f077b1651 | [
"MIT"
] | 790 | 2019-01-07T01:53:35.000Z | 2022-03-30T23:04:28.000Z | electrumsv/util/__init__.py | electrumsv/electrumsv | a2d9027ccec338cadfca778888e6ef7f077b1651 | [
"MIT"
] | 65 | 2019-01-10T23:55:30.000Z | 2021-12-19T06:47:13.000Z | # ElectrumSV - lightweight Bitcoin SV client
# Copyright (C) 2019-2020 The ElectrumSV Developers
# Copyright (C) 2011 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from collections import defaultdict
from decimal import Decimal
from datetime import datetime, timedelta, tzinfo
import json
import hmac
import os
import stat
import sys
import threading
import time
import types
from typing import Any, Callable, cast, Dict, Iterable, List, Optional, Sequence, Set, Tuple, \
TypedDict, TypeVar, Union
from bitcoinx import PublicKey
from ..logs import logs
from ..startup import package_dir
from ..types import ExceptionInfoType
from ..version import PACKAGE_DATE
T1 = TypeVar("T1")
def protocol_tuple(s: str) -> Tuple[int, ...]:
'''Converts a protocol version number, such as "1.0" to a tuple (1, 0).
If the version number is bad, (0, ) indicating version 0 is returned.'''
try:
return tuple(int(part) for part in s.split('.'))
except (TypeError, ValueError, AttributeError):
raise ValueError(f'invalid protocol version: {s}') from None
def version_string(ptuple: Tuple[int, ...]) -> str:
'''Convert a version tuple such as (1, 2) to "1.2".
There is always at least one dot, so (1, ) becomes "1.0".'''
while len(ptuple) < 2:
ptuple += (0, )
return '.'.join(str(p) for p in ptuple)
class MyEncoder(json.JSONEncoder):
# https://github.com/PyCQA/pylint/issues/414
def default(self, o: Any) -> Any: # pylint: disable=method-hidden
from ..transaction import Transaction, TransactionContext
if isinstance(o, Transaction):
return o.to_dict(TransactionContext())
return super(MyEncoder, self).default(o)
class JSON:
classes: Dict[str, Any] = {}
@classmethod
def register(cls, *classes: Any) -> None:
for klass in classes:
cls.classes[klass.__name__] = klass
@classmethod
def dumps(cls, obj: Any, **kwargs: Any) -> str:
def encode_obj(obj: Any) -> Dict[str, Any]:
class_name = obj.__class__.__name__
if class_name not in cls.classes:
raise TypeError(f'object of type {class_name} is not JSON serializable')
return {'_sv': (class_name, obj.to_json())}
kwargs['default'] = encode_obj
return json.dumps(obj, **kwargs)
@classmethod
def loads(cls, s: Union[str, bytes], **kwargs: Any) -> Any:
def decode_obj(obj: Dict[str, Any]) -> Any:
if '_sv' in obj:
class_name, ser = obj['_sv']
obj = cls.classes[class_name].from_json(ser)
return obj
kwargs['object_hook'] = decode_obj
return json.loads(s, **kwargs)
class DaemonThread(threading.Thread):
""" daemon thread that terminates cleanly """
def __init__(self, name: str) -> None:
threading.Thread.__init__(self)
self.name = name
self.parent_thread = threading.currentThread()
self.running = False
self.running_lock = threading.Lock()
self.logger = logs.get_logger(f'{name} thread')
def start(self) -> None:
with self.running_lock:
self.running = True
threading.Thread.start(self)
def is_running(self) -> bool:
with self.running_lock:
return self.running and self.parent_thread.is_alive()
def stop(self) -> None:
with self.running_lock:
self.running = False
def on_stop(self) -> None:
self.logger.debug("stopped")
def json_encode(obj: Any) -> str:
try:
s = json.dumps(obj, sort_keys = True, indent = 4, cls=MyEncoder)
except TypeError:
s = repr(obj)
return s
def json_decode(x: Union[str, bytes]) -> Any:
try:
return json.loads(x, parse_float=Decimal)
except Exception:
return x
# taken from Django Source Code
def constant_time_compare(val1: str, val2: str) -> bool:
"""Return True if the two strings are equal, False otherwise."""
return hmac.compare_digest(val1.encode('utf8'), val2.encode('utf8'))
# decorator that prints execution time
def profiler(func: Callable[..., T1]) -> Callable[..., T1]:
def do_profile(func: Callable[..., T1], args: Tuple[Any, ...], kw_args: Dict[str, Any]) -> T1:
n = func.__name__
logger = logs.get_logger("profiler")
t0 = time.time()
o = func(*args, **kw_args)
t = time.time() - t0
logger.debug("%s %.4f", n, t)
return o
return lambda *args, **kw_args: do_profile(func, args, kw_args)
def assert_datadir_available(config_path: str) -> None:
path = config_path
if os.path.exists(path):
return
else:
raise FileNotFoundError(
'ElectrumSV datadir does not exist. Was it deleted while running?' + '\n' +
'Should be at {}'.format(path))
def assert_bytes(*args: Any) -> None:
"""
porting helper, assert args type
"""
try:
for x in args:
assert isinstance(x, (bytes, bytearray))
except AssertionError:
logs.root.error('assert bytes failed %s', [type(arg) for arg in args])
raise
def make_dir(path: str) -> None:
# Make directory if it does not yet exist.
if not os.path.exists(path):
if os.path.islink(path):
raise Exception('Dangling link: ' + path)
os.mkdir(path)
os.chmod(path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
def format_satoshis_plain(x: int, decimal_point: int=8) -> str:
"""Display a satoshi amount scaled. Always uses a '.' as a decimal
point and has no thousands separator"""
scale_factor = pow(10, decimal_point)
return "{:.8f}".format(Decimal(x) / scale_factor).rstrip('0').rstrip('.')
def format_satoshis(x: Optional[int], num_zeros: int=0, decimal_point: int=8,
precision: Optional[int]=None,
is_diff: bool=False, whitespaces: bool=False) -> str:
from locale import localeconv
if x is None:
return 'unknown'
if precision is None:
precision = decimal_point
decimal_format = ",.0" + str(precision) if precision > 0 else ""
if is_diff:
decimal_format = '+' + decimal_format
fmt_string = "{:" + decimal_format + "f}"
result = (fmt_string).format(x / pow (10, decimal_point)).rstrip('0')
integer_part, fract_part = result.split(".")
dp = cast(str, localeconv()['decimal_point'])
if len(fract_part) < num_zeros:
fract_part += "0" * (num_zeros - len(fract_part))
result = integer_part + dp + fract_part
if whitespaces:
result += " " * (decimal_point - len(fract_part))
result = " " * (15 - len(result)) + result
return result
def format_fee_satoshis(fee: int, num_zeros: int=0) -> str:
return format_satoshis(fee, num_zeros, 0, precision=num_zeros)
def get_posix_timestamp() -> int:
# In theory we can just return `int(time.time())` but this returns the posix timestamp and
# try reading the documentation for `time.time` and being sure of that.
return int(datetime.now().timestamp())
def posix_timestamp_to_datetime(timestamp: int) -> datetime:
"Get a local timezone unaware datetime object for the given posix timestamp."
return datetime.fromtimestamp(timestamp)
def format_posix_timestamp(timestamp: int, default_text: str) -> str:
"Get the date and time for the given posix timestamp."
date = posix_timestamp_to_datetime(timestamp)
if date:
return date.isoformat(' ')[:16]
return default_text
# Takes a timestamp and returns a string with the approximation of the age
def age(from_timestamp: Optional[float], since_date: Optional[datetime]=None,
target_tz: Optional[tzinfo]=None, include_seconds: bool=False) -> str:
if from_timestamp is None:
return "Unknown"
from_date = datetime.fromtimestamp(from_timestamp)
if since_date is None:
since_date = datetime.now(target_tz)
td = time_difference(from_date - since_date, include_seconds)
return td + " ago" if from_date < since_date else "in " + td
def time_difference(distance_in_time: timedelta, include_seconds: bool) -> str:
#distance_in_time = since_date - from_date
distance_in_seconds = int(round(abs(distance_in_time.days * 86400 + distance_in_time.seconds)))
distance_in_minutes = int(round(distance_in_seconds/60))
if distance_in_minutes <= 1:
if include_seconds:
for remainder in [5, 10, 20]:
if distance_in_seconds < remainder:
return "less than %s seconds" % remainder
if distance_in_seconds < 40:
return "half a minute"
elif distance_in_seconds < 60:
return "less than a minute"
else:
return "1 minute"
else:
if distance_in_minutes == 0:
return "less than a minute"
else:
return "1 minute"
elif distance_in_minutes < 45:
return "%s minutes" % distance_in_minutes
elif distance_in_minutes < 90:
return "about 1 hour"
elif distance_in_minutes < 1440:
return "about %d hours" % (round(distance_in_minutes / 60.0))
elif distance_in_minutes < 2880:
return "1 day"
elif distance_in_minutes < 43220:
return "%d days" % (round(distance_in_minutes / 1440))
elif distance_in_minutes < 86400:
return "about 1 month"
elif distance_in_minutes < 525600:
return "%d months" % (round(distance_in_minutes / 43200))
elif distance_in_minutes < 1051200:
return "about 1 year"
else:
return "over %d years" % (round(distance_in_minutes / 525600))
def setup_thread_excepthook() -> None:
"""
Workaround for `sys.excepthook` thread bug from:
http://bugs.python.org/issue1230540
Call once from the main thread before creating any threads.
"""
init_original = threading.Thread.__init__
def init(self: threading.Thread, *args: Any, **kwargs: Any) -> None:
init_original(self, *args, **kwargs)
run_original = self.run
def run_with_except_hook() -> None:
try:
run_original()
except Exception:
sys.excepthook(*sys.exc_info())
# NOTE(typing) mypy tells us we cannot assign to a method, but we really can and do..
self.run = run_with_except_hook # type: ignore
# NOTE(typing) mypy tells us we cannot assign to a method, but we really can and do..
threading.Thread.__init__ = init # type: ignore
def get_wallet_name_from_path(wallet_path: str) -> str:
return os.path.splitext(os.path.basename(wallet_path))[0]
def versiontuple(v: str) -> Tuple[int, ...]:
return tuple(int(x) for x in v.split("."))
def resource_path(*parts: Sequence[str]) -> str:
return os.path.join(package_dir, "data", *parts) # type: ignore
def read_resource_file(filename: str) -> str:
path = resource_path(filename)
with open(path, 'r') as f:
return f.read()
def text_resource_path(*parts: Sequence[str]) -> str:
return resource_path("text", *parts)
def read_resource_text(*parts: Sequence[str]) -> str:
# NOTE(typing) Does not recognize the sequence of strings as strings, waste of time.
return read_resource_file(os.path.join("text", *parts)) # type:ignore
def get_update_check_dates(new_date: str) -> Tuple[datetime, datetime]:
from dateutil.parser import isoparse
# This is the latest stable release date.
release_date = isoparse(new_date).astimezone()
# This is the rough date of the current release (might be stable or unstable).
current_date = isoparse(PACKAGE_DATE).astimezone()
return release_date, current_date
class ReleaseEntryType(TypedDict):
version: str
date: str
signatures: List[str]
class ReleaseDocumentType(TypedDict, total=False):
stable: ReleaseEntryType
unstable: ReleaseEntryType
UpdateCheckResultType = Union[ExceptionInfoType, ReleaseDocumentType]
def get_identified_release_signers(entry: ReleaseEntryType) -> Set[str]:
signature_addresses = [
("rt121212121", "1Bu6ABvLAXn1ARFo1gjq6sogpajGbp6iK6"),
("kyuupichan", "1BH8E3TkuJMCcH5WGD11kVweKZuhh6vb7V"),
]
release_version = entry['version']
release_date = entry['date']
release_signatures = entry.get('signatures', [])
message = release_version + release_date
signed_names = set()
for signature in release_signatures:
for signer_name, signer_address in signature_addresses:
if signer_name not in signed_names:
# They are mainnet addresses
if PublicKey.verify_message_and_address(signature, message, signer_address):
signed_names.add(signer_name)
break
return signed_names
def chunks(items: List[T1], size: int) -> Iterable[List[T1]]:
'''Break up items, an iterable, into chunks of length size.'''
for i in range(0, len(items), size):
yield items[i: i + size]
class TriggeredCallbacks:
def __init__(self) -> None:
self._callbacks: Dict[str, List[Callable[..., None]]] = defaultdict(list)
self._callback_lock = threading.Lock()
self._callback_logger = logs.get_logger("callback-logger")
def register_callback(self, callback: Callable[..., None], events: List[str]) -> None:
with self._callback_lock:
for event in events:
if callback in self._callbacks[event]:
self._callback_logger.error("Callback reregistered %s %s", event, callback)
continue
self._callbacks[event].append(callback)
def unregister_callback(self, callback: Callable[..., None]) -> None:
with self._callback_lock:
for callbacks in self._callbacks.values():
if callback in callbacks:
callbacks.remove(callback)
def unregister_callbacks_for_object(self, owner: object) -> None:
with self._callback_lock:
for callbacks in self._callbacks.values():
for callback in callbacks[:]:
if isinstance(callback, types.MethodType):
if callback.__self__ is owner:
callbacks.remove(callback)
def trigger_callback(self, event: str, *args: Any) -> None:
with self._callback_lock:
callbacks = self._callbacks[event][:]
[callback(event, *args) for callback in callbacks]
class ValueLocks:
def __init__(self) -> None:
self._namespace_lock = threading.Lock()
self._namespace: Dict[Any, threading.RLock] = {}
self._counters: Dict[Any, int] = {}
def acquire_lock(self, value: Any) -> None:
with self._namespace_lock:
if value in self._namespace:
self._counters[value] += 1
else:
self._namespace[value] = threading.RLock()
self._counters[value] = 1
self._namespace[value].acquire()
def release_lock(self, value: Any) -> None:
with self._namespace_lock:
if self._counters[value] == 1:
del self._counters[value]
lock = self._namespace.pop(value)
else:
self._counters[value] -= 1
lock = self._namespace[value]
lock.release() | 34.665263 | 99 | 0.649034 |
from collections import defaultdict
from decimal import Decimal
from datetime import datetime, timedelta, tzinfo
import json
import hmac
import os
import stat
import sys
import threading
import time
import types
from typing import Any, Callable, cast, Dict, Iterable, List, Optional, Sequence, Set, Tuple, \
TypedDict, TypeVar, Union
from bitcoinx import PublicKey
from ..logs import logs
from ..startup import package_dir
from ..types import ExceptionInfoType
from ..version import PACKAGE_DATE
T1 = TypeVar("T1")
def protocol_tuple(s: str) -> Tuple[int, ...]:
try:
return tuple(int(part) for part in s.split('.'))
except (TypeError, ValueError, AttributeError):
raise ValueError(f'invalid protocol version: {s}') from None
def version_string(ptuple: Tuple[int, ...]) -> str:
while len(ptuple) < 2:
ptuple += (0, )
return '.'.join(str(p) for p in ptuple)
class MyEncoder(json.JSONEncoder):
def default(self, o: Any) -> Any: from ..transaction import Transaction, TransactionContext
if isinstance(o, Transaction):
return o.to_dict(TransactionContext())
return super(MyEncoder, self).default(o)
class JSON:
classes: Dict[str, Any] = {}
@classmethod
def register(cls, *classes: Any) -> None:
for klass in classes:
cls.classes[klass.__name__] = klass
@classmethod
def dumps(cls, obj: Any, **kwargs: Any) -> str:
def encode_obj(obj: Any) -> Dict[str, Any]:
class_name = obj.__class__.__name__
if class_name not in cls.classes:
raise TypeError(f'object of type {class_name} is not JSON serializable')
return {'_sv': (class_name, obj.to_json())}
kwargs['default'] = encode_obj
return json.dumps(obj, **kwargs)
@classmethod
def loads(cls, s: Union[str, bytes], **kwargs: Any) -> Any:
def decode_obj(obj: Dict[str, Any]) -> Any:
if '_sv' in obj:
class_name, ser = obj['_sv']
obj = cls.classes[class_name].from_json(ser)
return obj
kwargs['object_hook'] = decode_obj
return json.loads(s, **kwargs)
class DaemonThread(threading.Thread):
def __init__(self, name: str) -> None:
threading.Thread.__init__(self)
self.name = name
self.parent_thread = threading.currentThread()
self.running = False
self.running_lock = threading.Lock()
self.logger = logs.get_logger(f'{name} thread')
def start(self) -> None:
with self.running_lock:
self.running = True
threading.Thread.start(self)
def is_running(self) -> bool:
with self.running_lock:
return self.running and self.parent_thread.is_alive()
def stop(self) -> None:
with self.running_lock:
self.running = False
def on_stop(self) -> None:
self.logger.debug("stopped")
def json_encode(obj: Any) -> str:
try:
s = json.dumps(obj, sort_keys = True, indent = 4, cls=MyEncoder)
except TypeError:
s = repr(obj)
return s
def json_decode(x: Union[str, bytes]) -> Any:
try:
return json.loads(x, parse_float=Decimal)
except Exception:
return x
def constant_time_compare(val1: str, val2: str) -> bool:
return hmac.compare_digest(val1.encode('utf8'), val2.encode('utf8'))
def profiler(func: Callable[..., T1]) -> Callable[..., T1]:
def do_profile(func: Callable[..., T1], args: Tuple[Any, ...], kw_args: Dict[str, Any]) -> T1:
n = func.__name__
logger = logs.get_logger("profiler")
t0 = time.time()
o = func(*args, **kw_args)
t = time.time() - t0
logger.debug("%s %.4f", n, t)
return o
return lambda *args, **kw_args: do_profile(func, args, kw_args)
def assert_datadir_available(config_path: str) -> None:
path = config_path
if os.path.exists(path):
return
else:
raise FileNotFoundError(
'ElectrumSV datadir does not exist. Was it deleted while running?' + '\n' +
'Should be at {}'.format(path))
def assert_bytes(*args: Any) -> None:
try:
for x in args:
assert isinstance(x, (bytes, bytearray))
except AssertionError:
logs.root.error('assert bytes failed %s', [type(arg) for arg in args])
raise
def make_dir(path: str) -> None:
if not os.path.exists(path):
if os.path.islink(path):
raise Exception('Dangling link: ' + path)
os.mkdir(path)
os.chmod(path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
def format_satoshis_plain(x: int, decimal_point: int=8) -> str:
scale_factor = pow(10, decimal_point)
return "{:.8f}".format(Decimal(x) / scale_factor).rstrip('0').rstrip('.')
def format_satoshis(x: Optional[int], num_zeros: int=0, decimal_point: int=8,
precision: Optional[int]=None,
is_diff: bool=False, whitespaces: bool=False) -> str:
from locale import localeconv
if x is None:
return 'unknown'
if precision is None:
precision = decimal_point
decimal_format = ",.0" + str(precision) if precision > 0 else ""
if is_diff:
decimal_format = '+' + decimal_format
fmt_string = "{:" + decimal_format + "f}"
result = (fmt_string).format(x / pow (10, decimal_point)).rstrip('0')
integer_part, fract_part = result.split(".")
dp = cast(str, localeconv()['decimal_point'])
if len(fract_part) < num_zeros:
fract_part += "0" * (num_zeros - len(fract_part))
result = integer_part + dp + fract_part
if whitespaces:
result += " " * (decimal_point - len(fract_part))
result = " " * (15 - len(result)) + result
return result
def format_fee_satoshis(fee: int, num_zeros: int=0) -> str:
return format_satoshis(fee, num_zeros, 0, precision=num_zeros)
def get_posix_timestamp() -> int:
return int(datetime.now().timestamp())
def posix_timestamp_to_datetime(timestamp: int) -> datetime:
return datetime.fromtimestamp(timestamp)
def format_posix_timestamp(timestamp: int, default_text: str) -> str:
date = posix_timestamp_to_datetime(timestamp)
if date:
return date.isoformat(' ')[:16]
return default_text
def age(from_timestamp: Optional[float], since_date: Optional[datetime]=None,
target_tz: Optional[tzinfo]=None, include_seconds: bool=False) -> str:
if from_timestamp is None:
return "Unknown"
from_date = datetime.fromtimestamp(from_timestamp)
if since_date is None:
since_date = datetime.now(target_tz)
td = time_difference(from_date - since_date, include_seconds)
return td + " ago" if from_date < since_date else "in " + td
def time_difference(distance_in_time: timedelta, include_seconds: bool) -> str:
distance_in_seconds = int(round(abs(distance_in_time.days * 86400 + distance_in_time.seconds)))
distance_in_minutes = int(round(distance_in_seconds/60))
if distance_in_minutes <= 1:
if include_seconds:
for remainder in [5, 10, 20]:
if distance_in_seconds < remainder:
return "less than %s seconds" % remainder
if distance_in_seconds < 40:
return "half a minute"
elif distance_in_seconds < 60:
return "less than a minute"
else:
return "1 minute"
else:
if distance_in_minutes == 0:
return "less than a minute"
else:
return "1 minute"
elif distance_in_minutes < 45:
return "%s minutes" % distance_in_minutes
elif distance_in_minutes < 90:
return "about 1 hour"
elif distance_in_minutes < 1440:
return "about %d hours" % (round(distance_in_minutes / 60.0))
elif distance_in_minutes < 2880:
return "1 day"
elif distance_in_minutes < 43220:
return "%d days" % (round(distance_in_minutes / 1440))
elif distance_in_minutes < 86400:
return "about 1 month"
elif distance_in_minutes < 525600:
return "%d months" % (round(distance_in_minutes / 43200))
elif distance_in_minutes < 1051200:
return "about 1 year"
else:
return "over %d years" % (round(distance_in_minutes / 525600))
def setup_thread_excepthook() -> None:
init_original = threading.Thread.__init__
def init(self: threading.Thread, *args: Any, **kwargs: Any) -> None:
init_original(self, *args, **kwargs)
run_original = self.run
def run_with_except_hook() -> None:
try:
run_original()
except Exception:
sys.excepthook(*sys.exc_info())
self.run = run_with_except_hook
threading.Thread.__init__ = init
def get_wallet_name_from_path(wallet_path: str) -> str:
return os.path.splitext(os.path.basename(wallet_path))[0]
def versiontuple(v: str) -> Tuple[int, ...]:
return tuple(int(x) for x in v.split("."))
def resource_path(*parts: Sequence[str]) -> str:
return os.path.join(package_dir, "data", *parts)
def read_resource_file(filename: str) -> str:
path = resource_path(filename)
with open(path, 'r') as f:
return f.read()
def text_resource_path(*parts: Sequence[str]) -> str:
return resource_path("text", *parts)
def read_resource_text(*parts: Sequence[str]) -> str:
return read_resource_file(os.path.join("text", *parts))
def get_update_check_dates(new_date: str) -> Tuple[datetime, datetime]:
from dateutil.parser import isoparse
release_date = isoparse(new_date).astimezone()
current_date = isoparse(PACKAGE_DATE).astimezone()
return release_date, current_date
class ReleaseEntryType(TypedDict):
version: str
date: str
signatures: List[str]
class ReleaseDocumentType(TypedDict, total=False):
stable: ReleaseEntryType
unstable: ReleaseEntryType
UpdateCheckResultType = Union[ExceptionInfoType, ReleaseDocumentType]
def get_identified_release_signers(entry: ReleaseEntryType) -> Set[str]:
signature_addresses = [
("rt121212121", "1Bu6ABvLAXn1ARFo1gjq6sogpajGbp6iK6"),
("kyuupichan", "1BH8E3TkuJMCcH5WGD11kVweKZuhh6vb7V"),
]
release_version = entry['version']
release_date = entry['date']
release_signatures = entry.get('signatures', [])
message = release_version + release_date
signed_names = set()
for signature in release_signatures:
for signer_name, signer_address in signature_addresses:
if signer_name not in signed_names:
if PublicKey.verify_message_and_address(signature, message, signer_address):
signed_names.add(signer_name)
break
return signed_names
def chunks(items: List[T1], size: int) -> Iterable[List[T1]]:
for i in range(0, len(items), size):
yield items[i: i + size]
class TriggeredCallbacks:
def __init__(self) -> None:
self._callbacks: Dict[str, List[Callable[..., None]]] = defaultdict(list)
self._callback_lock = threading.Lock()
self._callback_logger = logs.get_logger("callback-logger")
def register_callback(self, callback: Callable[..., None], events: List[str]) -> None:
with self._callback_lock:
for event in events:
if callback in self._callbacks[event]:
self._callback_logger.error("Callback reregistered %s %s", event, callback)
continue
self._callbacks[event].append(callback)
def unregister_callback(self, callback: Callable[..., None]) -> None:
with self._callback_lock:
for callbacks in self._callbacks.values():
if callback in callbacks:
callbacks.remove(callback)
def unregister_callbacks_for_object(self, owner: object) -> None:
with self._callback_lock:
for callbacks in self._callbacks.values():
for callback in callbacks[:]:
if isinstance(callback, types.MethodType):
if callback.__self__ is owner:
callbacks.remove(callback)
def trigger_callback(self, event: str, *args: Any) -> None:
with self._callback_lock:
callbacks = self._callbacks[event][:]
[callback(event, *args) for callback in callbacks]
class ValueLocks:
def __init__(self) -> None:
self._namespace_lock = threading.Lock()
self._namespace: Dict[Any, threading.RLock] = {}
self._counters: Dict[Any, int] = {}
def acquire_lock(self, value: Any) -> None:
with self._namespace_lock:
if value in self._namespace:
self._counters[value] += 1
else:
self._namespace[value] = threading.RLock()
self._counters[value] = 1
self._namespace[value].acquire()
def release_lock(self, value: Any) -> None:
with self._namespace_lock:
if self._counters[value] == 1:
del self._counters[value]
lock = self._namespace.pop(value)
else:
self._counters[value] -= 1
lock = self._namespace[value]
lock.release() | true | true |
1c4a5a1c7c070e81fc5736a95a9baebb21c9b24b | 896 | py | Python | unused/py3_tf2_wide_deep/python_v2/lib/utils/image_preprocessing.py | WenqiJiang/FPGA-Accelerator-for-Recommender-Systems | 6c3031487cd1447b7f5362483c14b108177387bb | [
"MIT"
] | 4 | 2020-03-03T12:51:05.000Z | 2021-06-19T17:34:45.000Z | unused/tf2_wide_deep/python/lib/utils/image_preprocessing.py | WenqiJiang/FPGA-Accelerator-for-Recommender-Systems | 6c3031487cd1447b7f5362483c14b108177387bb | [
"MIT"
] | null | null | null | unused/tf2_wide_deep/python/lib/utils/image_preprocessing.py | WenqiJiang/FPGA-Accelerator-for-Recommender-Systems | 6c3031487cd1447b7f5362483c14b108177387bb | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: lapis-hong
# @Date : 2018/3/5
"""Provides custom function to preprocess images.
TODO: custom preprocess for CTR task
"""
import tensorflow as tf
def preprocess_image(image, is_training, height, width, depth):
"""Preprocess a single image of layout [height, width, depth]."""
if is_training:
# Resize the image to add four extra pixels on each side.
image = tf.image.resize_with_crop_or_pad(
image, height + 8, width + 8)
# Randomly crop a [_HEIGHT, _WIDTH] section of the image.
image = tf.image.random_crop(image, [height, width, depth])
# Randomly flip the image horizontally.
image = tf.image.random_flip_left_right(image)
# Subtract off the mean and divide by the variance of the pixels.
image = tf.image.per_image_standardization(image)
return image
| 35.84 | 69 | 0.679688 |
import tensorflow as tf
def preprocess_image(image, is_training, height, width, depth):
if is_training:
image = tf.image.resize_with_crop_or_pad(
image, height + 8, width + 8)
image = tf.image.random_crop(image, [height, width, depth])
image = tf.image.random_flip_left_right(image)
image = tf.image.per_image_standardization(image)
return image
| true | true |
1c4a5a3252aa102f145f995b3c7e86bbde65c4e8 | 455 | py | Python | WebSite/PrayerWall/bookings/tests.py | Tinka8ell/Prayer-Wall | e9e6f3b94a88fc68f26a660b7abc5a781bad8f71 | [
"Apache-2.0"
] | null | null | null | WebSite/PrayerWall/bookings/tests.py | Tinka8ell/Prayer-Wall | e9e6f3b94a88fc68f26a660b7abc5a781bad8f71 | [
"Apache-2.0"
] | null | null | null | WebSite/PrayerWall/bookings/tests.py | Tinka8ell/Prayer-Wall | e9e6f3b94a88fc68f26a660b7abc5a781bad8f71 | [
"Apache-2.0"
] | null | null | null | from django.test import TestCase
# for management command testing
from io import StringIO
from django.core.management import call_command
# Create your tests for bookings here.
class StartEventTest(TestCase):
def test_command_output(self):
out = StringIO()
call_command("startevent 'My New Event' 2020/11/27-20 ('online',) ('104', 2)", stdout=out)
self.assertIn('Successfully created event "My New Event"', out.getvalue())
| 30.333333 | 98 | 0.723077 | from django.test import TestCase
from io import StringIO
from django.core.management import call_command
class StartEventTest(TestCase):
def test_command_output(self):
out = StringIO()
call_command("startevent 'My New Event' 2020/11/27-20 ('online',) ('104', 2)", stdout=out)
self.assertIn('Successfully created event "My New Event"', out.getvalue())
| true | true |
1c4a5a4d9077dba1b07057603efd5ddf4d638abb | 10,165 | py | Python | isi_sdk_8_2_2/isi_sdk_8_2_2/models/event_channel_extended_extended.py | mohitjain97/isilon_sdk_python | a371f438f542568edb8cda35e929e6b300b1177c | [
"Unlicense"
] | 24 | 2018-06-22T14:13:23.000Z | 2022-03-23T01:21:26.000Z | isi_sdk_8_2_2/isi_sdk_8_2_2/models/event_channel_extended_extended.py | mohitjain97/isilon_sdk_python | a371f438f542568edb8cda35e929e6b300b1177c | [
"Unlicense"
] | 46 | 2018-04-30T13:28:22.000Z | 2022-03-21T21:11:07.000Z | isi_sdk_8_2_2/isi_sdk_8_2_2/models/event_channel_extended_extended.py | mohitjain97/isilon_sdk_python | a371f438f542568edb8cda35e929e6b300b1177c | [
"Unlicense"
] | 29 | 2018-06-19T00:14:04.000Z | 2022-02-08T17:51:19.000Z | # coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 9
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from isi_sdk_8_2_2.models.event_channel_parameters import EventChannelParameters # noqa: F401,E501
class EventChannelExtendedExtended(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'allowed_nodes': 'list[int]',
'enabled': 'bool',
'excluded_nodes': 'list[int]',
'id': 'int',
'name': 'str',
'parameters': 'EventChannelParameters',
'system': 'bool',
'type': 'str'
}
attribute_map = {
'allowed_nodes': 'allowed_nodes',
'enabled': 'enabled',
'excluded_nodes': 'excluded_nodes',
'id': 'id',
'name': 'name',
'parameters': 'parameters',
'system': 'system',
'type': 'type'
}
def __init__(self, allowed_nodes=None, enabled=None, excluded_nodes=None, id=None, name=None, parameters=None, system=None, type=None): # noqa: E501
"""EventChannelExtendedExtended - a model defined in Swagger""" # noqa: E501
self._allowed_nodes = None
self._enabled = None
self._excluded_nodes = None
self._id = None
self._name = None
self._parameters = None
self._system = None
self._type = None
self.discriminator = None
if allowed_nodes is not None:
self.allowed_nodes = allowed_nodes
if enabled is not None:
self.enabled = enabled
if excluded_nodes is not None:
self.excluded_nodes = excluded_nodes
if id is not None:
self.id = id
if name is not None:
self.name = name
if parameters is not None:
self.parameters = parameters
if system is not None:
self.system = system
if type is not None:
self.type = type
@property
def allowed_nodes(self):
"""Gets the allowed_nodes of this EventChannelExtendedExtended. # noqa: E501
Nodes (LNNs) that can be masters for this channel. # noqa: E501
:return: The allowed_nodes of this EventChannelExtendedExtended. # noqa: E501
:rtype: list[int]
"""
return self._allowed_nodes
@allowed_nodes.setter
def allowed_nodes(self, allowed_nodes):
"""Sets the allowed_nodes of this EventChannelExtendedExtended.
Nodes (LNNs) that can be masters for this channel. # noqa: E501
:param allowed_nodes: The allowed_nodes of this EventChannelExtendedExtended. # noqa: E501
:type: list[int]
"""
self._allowed_nodes = allowed_nodes
@property
def enabled(self):
"""Gets the enabled of this EventChannelExtendedExtended. # noqa: E501
Channel is to be used or not. # noqa: E501
:return: The enabled of this EventChannelExtendedExtended. # noqa: E501
:rtype: bool
"""
return self._enabled
@enabled.setter
def enabled(self, enabled):
"""Sets the enabled of this EventChannelExtendedExtended.
Channel is to be used or not. # noqa: E501
:param enabled: The enabled of this EventChannelExtendedExtended. # noqa: E501
:type: bool
"""
self._enabled = enabled
@property
def excluded_nodes(self):
"""Gets the excluded_nodes of this EventChannelExtendedExtended. # noqa: E501
Nodes (LNNs) that can NOT be the masters for this channel. # noqa: E501
:return: The excluded_nodes of this EventChannelExtendedExtended. # noqa: E501
:rtype: list[int]
"""
return self._excluded_nodes
@excluded_nodes.setter
def excluded_nodes(self, excluded_nodes):
"""Sets the excluded_nodes of this EventChannelExtendedExtended.
Nodes (LNNs) that can NOT be the masters for this channel. # noqa: E501
:param excluded_nodes: The excluded_nodes of this EventChannelExtendedExtended. # noqa: E501
:type: list[int]
"""
self._excluded_nodes = excluded_nodes
@property
def id(self):
"""Gets the id of this EventChannelExtendedExtended. # noqa: E501
Unique identifier. # noqa: E501
:return: The id of this EventChannelExtendedExtended. # noqa: E501
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this EventChannelExtendedExtended.
Unique identifier. # noqa: E501
:param id: The id of this EventChannelExtendedExtended. # noqa: E501
:type: int
"""
if id is not None and id > 4294967295: # noqa: E501
raise ValueError("Invalid value for `id`, must be a value less than or equal to `4294967295`") # noqa: E501
if id is not None and id < 0: # noqa: E501
raise ValueError("Invalid value for `id`, must be a value greater than or equal to `0`") # noqa: E501
self._id = id
@property
def name(self):
"""Gets the name of this EventChannelExtendedExtended. # noqa: E501
Channel name, may not contain / # noqa: E501
:return: The name of this EventChannelExtendedExtended. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this EventChannelExtendedExtended.
Channel name, may not contain / # noqa: E501
:param name: The name of this EventChannelExtendedExtended. # noqa: E501
:type: str
"""
if name is not None and len(name) > 254:
raise ValueError("Invalid value for `name`, length must be less than or equal to `254`") # noqa: E501
if name is not None and len(name) < 1:
raise ValueError("Invalid value for `name`, length must be greater than or equal to `1`") # noqa: E501
self._name = name
@property
def parameters(self):
"""Gets the parameters of this EventChannelExtendedExtended. # noqa: E501
Parameters to be used for an smtp channel. # noqa: E501
:return: The parameters of this EventChannelExtendedExtended. # noqa: E501
:rtype: EventChannelParameters
"""
return self._parameters
@parameters.setter
def parameters(self, parameters):
"""Sets the parameters of this EventChannelExtendedExtended.
Parameters to be used for an smtp channel. # noqa: E501
:param parameters: The parameters of this EventChannelExtendedExtended. # noqa: E501
:type: EventChannelParameters
"""
self._parameters = parameters
@property
def system(self):
"""Gets the system of this EventChannelExtendedExtended. # noqa: E501
Channel is a pre-defined system channel. # noqa: E501
:return: The system of this EventChannelExtendedExtended. # noqa: E501
:rtype: bool
"""
return self._system
@system.setter
def system(self, system):
"""Sets the system of this EventChannelExtendedExtended.
Channel is a pre-defined system channel. # noqa: E501
:param system: The system of this EventChannelExtendedExtended. # noqa: E501
:type: bool
"""
self._system = system
@property
def type(self):
"""Gets the type of this EventChannelExtendedExtended. # noqa: E501
The mechanism used by the channel. # noqa: E501
:return: The type of this EventChannelExtendedExtended. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this EventChannelExtendedExtended.
The mechanism used by the channel. # noqa: E501
:param type: The type of this EventChannelExtendedExtended. # noqa: E501
:type: str
"""
allowed_values = ["connectemc", "smtp", "snmp", "heartbeat"] # noqa: E501
if type not in allowed_values:
raise ValueError(
"Invalid value for `type` ({0}), must be one of {1}" # noqa: E501
.format(type, allowed_values)
)
self._type = type
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, EventChannelExtendedExtended):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 31.085627 | 153 | 0.600787 |
import pprint
import re
import six
from isi_sdk_8_2_2.models.event_channel_parameters import EventChannelParameters
class EventChannelExtendedExtended(object):
swagger_types = {
'allowed_nodes': 'list[int]',
'enabled': 'bool',
'excluded_nodes': 'list[int]',
'id': 'int',
'name': 'str',
'parameters': 'EventChannelParameters',
'system': 'bool',
'type': 'str'
}
attribute_map = {
'allowed_nodes': 'allowed_nodes',
'enabled': 'enabled',
'excluded_nodes': 'excluded_nodes',
'id': 'id',
'name': 'name',
'parameters': 'parameters',
'system': 'system',
'type': 'type'
}
def __init__(self, allowed_nodes=None, enabled=None, excluded_nodes=None, id=None, name=None, parameters=None, system=None, type=None):
self._allowed_nodes = None
self._enabled = None
self._excluded_nodes = None
self._id = None
self._name = None
self._parameters = None
self._system = None
self._type = None
self.discriminator = None
if allowed_nodes is not None:
self.allowed_nodes = allowed_nodes
if enabled is not None:
self.enabled = enabled
if excluded_nodes is not None:
self.excluded_nodes = excluded_nodes
if id is not None:
self.id = id
if name is not None:
self.name = name
if parameters is not None:
self.parameters = parameters
if system is not None:
self.system = system
if type is not None:
self.type = type
@property
def allowed_nodes(self):
return self._allowed_nodes
@allowed_nodes.setter
def allowed_nodes(self, allowed_nodes):
self._allowed_nodes = allowed_nodes
@property
def enabled(self):
return self._enabled
@enabled.setter
def enabled(self, enabled):
self._enabled = enabled
@property
def excluded_nodes(self):
return self._excluded_nodes
@excluded_nodes.setter
def excluded_nodes(self, excluded_nodes):
self._excluded_nodes = excluded_nodes
@property
def id(self):
return self._id
@id.setter
def id(self, id):
if id is not None and id > 4294967295: raise ValueError("Invalid value for `id`, must be a value less than or equal to `4294967295`") if id is not None and id < 0: raise ValueError("Invalid value for `id`, must be a value greater than or equal to `0`")
self._id = id
@property
def name(self):
return self._name
@name.setter
def name(self, name):
if name is not None and len(name) > 254:
raise ValueError("Invalid value for `name`, length must be less than or equal to `254`") if name is not None and len(name) < 1:
raise ValueError("Invalid value for `name`, length must be greater than or equal to `1`")
self._name = name
@property
def parameters(self):
return self._parameters
@parameters.setter
def parameters(self, parameters):
self._parameters = parameters
@property
def system(self):
return self._system
@system.setter
def system(self, system):
self._system = system
@property
def type(self):
return self._type
@type.setter
def type(self, type):
allowed_values = ["connectemc", "smtp", "snmp", "heartbeat"] if type not in allowed_values:
raise ValueError(
"Invalid value for `type` ({0}), must be one of {1}" .format(type, allowed_values)
)
self._type = type
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, EventChannelExtendedExtended):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true | true |
1c4a5ab94d83ecf9ea0835ba18256570b96630c6 | 4,204 | py | Python | challenges/musicbox/musicbox.py | nickbjohnson4224/greyhat-crypto-ctf-2014 | 62d03ecf1126edc04944dc9183fa2ba01ed974e7 | [
"MIT"
] | 4 | 2015-04-18T02:25:20.000Z | 2018-12-17T20:46:07.000Z | challenges/musicbox/musicbox.py | nickbjohnson4224/greyhat-crypto-ctf-2014 | 62d03ecf1126edc04944dc9183fa2ba01ed974e7 | [
"MIT"
] | null | null | null | challenges/musicbox/musicbox.py | nickbjohnson4224/greyhat-crypto-ctf-2014 | 62d03ecf1126edc04944dc9183fa2ba01ed974e7 | [
"MIT"
] | null | null | null | import sys, struct, array
import SocketServer
# import StringIO as StringIO
# import pygame
p = 0x08d682598db70a889ff1bc7e3e00d602e9fe9e812162d4e3d06954b2ff554a4a21d5f0aab3eae5c49ac1aec7117709cba1b88b79ae9805d28ddb99be07ba05ea219654afe0c8dddac7e73165f3dcd851a3c8a3b6515766321420aff177eaaa7b3da39682d7e773aa863a729706d52e83a1d0e34d69b461c837ed239745d6c50f124e34f4d1d00ad15d6ebabda8c189c7b8b35b5bae7a9cbafc5f09bd506a39bd9d2d9245324f02ff7254fab4ab17f7a165d49e318baeb8effc4e1a3f1251d2ea1ab93f767bd6dcf5567406550ea1f194ef7deb1b2fec8b30520b6777fea1b305593db941f9ad8ce1eba6f77c3a104bd97448ec0c11688c5bf82e85c90234abfc5
q = 0x0f67e886d1a0d1e59a53b4aa831c9bcb39a5d0a8f
g = 0x27d6a1359821e2a758a93f5c06ebb26382a06a4681e7cf44d71aeff2390c87d20ce7cd885fb01fd84ad9d52839a8ae163bfee5d09820fea1a09f814801cb157b2c5bc4636d042fb2ac1a836f33adafd6735826ae1e96c3bfbd04f7df672a14120f6780e8848ff3b3123004654127c9d25843cd54c68c396a410a2f0496e8ebb35b971993dee0f596388911277fce46ff3c5191e7e76262875bb3368724d3a40c852ccc80be4dc82335fb9267c6ff0e20396ae8bb2d51e35f15fbd07fa1b354944c285367ac88763dd00fe6fe0aab5a49faf7bc10f8e90ba376efdc034e9e1cae7e79ac906aed3b513c5f3452dc33eb307ab3d45efe92a31b1cd9a6f52dd5fb09
y = 0x6bff47f5ea736b03c85885b0bd0f1f7fa2a7efef8812c544ab47f4aa3542235f5a298fc778bb9263223c66d149f88d377b1e70a5715e4554776127ffb874e218d7c75a3c6202cc3e2cfb6a5a4cf34e7e8d5428b90b7aa1dbf9a7e965feab029220266ad0dabade6ae09362f6463eea60e3133bb79fc4af511057e31574f4b0f34b848b180fa20da7d9a6d8adedded9819da20b8923073e35f43ca75eeb9a1ab5451c3a5446306f93ef246759f59e65e498032d48aece56f437b4b7179daf3dfa80d6a36c211ed5acdfeaf91a7e8070a49a521f3c2e411a26eeaf8fab697535914982f1f7cda1e1aa1aac602f9606ea326632b4fbabf6b361fe118637e048c482
def bytesToInt(s):
x = 0
for c in s:
x = (x << 8) | ord(c)
return x
def verifySig(r, s, m):
#DSA, straight from Wikipedia
if not 0 < s < q and 0 < r < q:
return False
w = pow(s, q-2, q)
u1 = m*w % q
u2 = r*w % q
v = pow(g, u1, p) * pow(y, u2, p) % p
return (v % q) == r
def superHash(b):
b += '0' * (-len(b) % 2)
h = (len(b) + 1) * (len(b) ^ 42)
x = 88172645463325252
for i, c in enumerate(array.array('H', b)):
x ^= (x<<13) & 0xFFFFFFFFFFFFFFFF
x ^= (x>>7) & 0xFFFFFFFFFFFFFFFF
x ^= (x<<17) & 0xFFFFFFFFFFFFFFFF
h += c * (((i % 7) + 9) ** (i % 25))
if i % 2:
h *= x | i
else:
h += x | i
h &= 0xFFFFFFFFFFFFFFFF
h ^= (len(b) ^ 1) * (len(b) + 42)
h &= 0xFFFFFFFFFFFFFFFF
return h
class HandleCheckin(SocketServer.BaseRequestHandler):
def readStr(self):
req = self.request
prefix = req.recv(2)
if prefix != '\x12\xae':
req.sendall("Incorrect prefix\n")
req.close()
return None
leng = struct.unpack("<I", req.recv(4))[0]
toRead = ""
while len(toRead) < leng:
toRead += req.recv(leng - len(toRead))
if len(toRead) > leng:
req.sendall("Length does not match input data size\n")
req.close()
return None
return toRead
def handle(self):
req = self.request
req.sendall("""Welcome to the new and improved Music Box! Please provide your signed music file.""")
data = self.readStr()
if data is None or len(data) < 48:
req.sendall("Incomplete header\n")
return
elif len(data) > 12345678:
req.sendall("The data. It is too much!\n")
return
r = bytesToInt(data[:20])
s = bytesToInt(data[20:40])
h = bytesToInt(data[40:48])
sound = data[48:]
if not verifySig(r, s, h):
req.sendall("Invalid signature\n")
return
elif h != superHash(sound):
req.sendall("Message hash does not match\n")
return
else:
req.sendall("Success!\n")
if "Secret backdoor lol GIMME THE FLAG" in sound:
with open('flag.txt','r') as f:
req.sendall(f.read() + "\n")
else:
req.sendall("Unfortunately, the musicbox is not available at the moment.\n")
req.close()
# f = StringIO.StringIO(sound)
# pygame.mixer.music.load(f)
# pygame.mixer.music.play(loops=-1)
class ThreadedServer(SocketServer.ThreadingMixIn, SocketServer.TCPServer):
pass
if __name__ == "__main__":
# pygame.mixer.init()
HOST, PORT = sys.argv[1], int(sys.argv[2])
print 'Running on port', PORT
server = ThreadedServer((HOST, PORT), HandleCheckin)
server.allow_reuse_address = True
server.serve_forever()
| 36.556522 | 519 | 0.766175 | import sys, struct, array
import SocketServer
p = 0x08d682598db70a889ff1bc7e3e00d602e9fe9e812162d4e3d06954b2ff554a4a21d5f0aab3eae5c49ac1aec7117709cba1b88b79ae9805d28ddb99be07ba05ea219654afe0c8dddac7e73165f3dcd851a3c8a3b6515766321420aff177eaaa7b3da39682d7e773aa863a729706d52e83a1d0e34d69b461c837ed239745d6c50f124e34f4d1d00ad15d6ebabda8c189c7b8b35b5bae7a9cbafc5f09bd506a39bd9d2d9245324f02ff7254fab4ab17f7a165d49e318baeb8effc4e1a3f1251d2ea1ab93f767bd6dcf5567406550ea1f194ef7deb1b2fec8b30520b6777fea1b305593db941f9ad8ce1eba6f77c3a104bd97448ec0c11688c5bf82e85c90234abfc5
q = 0x0f67e886d1a0d1e59a53b4aa831c9bcb39a5d0a8f
g = 0x27d6a1359821e2a758a93f5c06ebb26382a06a4681e7cf44d71aeff2390c87d20ce7cd885fb01fd84ad9d52839a8ae163bfee5d09820fea1a09f814801cb157b2c5bc4636d042fb2ac1a836f33adafd6735826ae1e96c3bfbd04f7df672a14120f6780e8848ff3b3123004654127c9d25843cd54c68c396a410a2f0496e8ebb35b971993dee0f596388911277fce46ff3c5191e7e76262875bb3368724d3a40c852ccc80be4dc82335fb9267c6ff0e20396ae8bb2d51e35f15fbd07fa1b354944c285367ac88763dd00fe6fe0aab5a49faf7bc10f8e90ba376efdc034e9e1cae7e79ac906aed3b513c5f3452dc33eb307ab3d45efe92a31b1cd9a6f52dd5fb09
y = 0x6bff47f5ea736b03c85885b0bd0f1f7fa2a7efef8812c544ab47f4aa3542235f5a298fc778bb9263223c66d149f88d377b1e70a5715e4554776127ffb874e218d7c75a3c6202cc3e2cfb6a5a4cf34e7e8d5428b90b7aa1dbf9a7e965feab029220266ad0dabade6ae09362f6463eea60e3133bb79fc4af511057e31574f4b0f34b848b180fa20da7d9a6d8adedded9819da20b8923073e35f43ca75eeb9a1ab5451c3a5446306f93ef246759f59e65e498032d48aece56f437b4b7179daf3dfa80d6a36c211ed5acdfeaf91a7e8070a49a521f3c2e411a26eeaf8fab697535914982f1f7cda1e1aa1aac602f9606ea326632b4fbabf6b361fe118637e048c482
def bytesToInt(s):
x = 0
for c in s:
x = (x << 8) | ord(c)
return x
def verifySig(r, s, m):
if not 0 < s < q and 0 < r < q:
return False
w = pow(s, q-2, q)
u1 = m*w % q
u2 = r*w % q
v = pow(g, u1, p) * pow(y, u2, p) % p
return (v % q) == r
def superHash(b):
b += '0' * (-len(b) % 2)
h = (len(b) + 1) * (len(b) ^ 42)
x = 88172645463325252
for i, c in enumerate(array.array('H', b)):
x ^= (x<<13) & 0xFFFFFFFFFFFFFFFF
x ^= (x>>7) & 0xFFFFFFFFFFFFFFFF
x ^= (x<<17) & 0xFFFFFFFFFFFFFFFF
h += c * (((i % 7) + 9) ** (i % 25))
if i % 2:
h *= x | i
else:
h += x | i
h &= 0xFFFFFFFFFFFFFFFF
h ^= (len(b) ^ 1) * (len(b) + 42)
h &= 0xFFFFFFFFFFFFFFFF
return h
class HandleCheckin(SocketServer.BaseRequestHandler):
def readStr(self):
req = self.request
prefix = req.recv(2)
if prefix != '\x12\xae':
req.sendall("Incorrect prefix\n")
req.close()
return None
leng = struct.unpack("<I", req.recv(4))[0]
toRead = ""
while len(toRead) < leng:
toRead += req.recv(leng - len(toRead))
if len(toRead) > leng:
req.sendall("Length does not match input data size\n")
req.close()
return None
return toRead
def handle(self):
req = self.request
req.sendall("""Welcome to the new and improved Music Box! Please provide your signed music file.""")
data = self.readStr()
if data is None or len(data) < 48:
req.sendall("Incomplete header\n")
return
elif len(data) > 12345678:
req.sendall("The data. It is too much!\n")
return
r = bytesToInt(data[:20])
s = bytesToInt(data[20:40])
h = bytesToInt(data[40:48])
sound = data[48:]
if not verifySig(r, s, h):
req.sendall("Invalid signature\n")
return
elif h != superHash(sound):
req.sendall("Message hash does not match\n")
return
else:
req.sendall("Success!\n")
if "Secret backdoor lol GIMME THE FLAG" in sound:
with open('flag.txt','r') as f:
req.sendall(f.read() + "\n")
else:
req.sendall("Unfortunately, the musicbox is not available at the moment.\n")
req.close()
class ThreadedServer(SocketServer.ThreadingMixIn, SocketServer.TCPServer):
pass
if __name__ == "__main__":
HOST, PORT = sys.argv[1], int(sys.argv[2])
print 'Running on port', PORT
server = ThreadedServer((HOST, PORT), HandleCheckin)
server.allow_reuse_address = True
server.serve_forever()
| false | true |
1c4a5cf471c60ef2a56210e6cc5aeab54a57bcce | 4,028 | py | Python | configs/top_down/resnetv1d/coco/resnetv1d152_coco_256x192.py | jcwon0/BlurHPE | c97a57e92a8a7f171b0403aee640222a32513562 | [
"Apache-2.0"
] | null | null | null | configs/top_down/resnetv1d/coco/resnetv1d152_coco_256x192.py | jcwon0/BlurHPE | c97a57e92a8a7f171b0403aee640222a32513562 | [
"Apache-2.0"
] | null | null | null | configs/top_down/resnetv1d/coco/resnetv1d152_coco_256x192.py | jcwon0/BlurHPE | c97a57e92a8a7f171b0403aee640222a32513562 | [
"Apache-2.0"
] | null | null | null | log_level = 'INFO'
load_from = None
resume_from = None
dist_params = dict(backend='nccl')
workflow = [('train', 1)]
checkpoint_config = dict(interval=10)
evaluation = dict(interval=10, metric='mAP', key_indicator='AP')
optimizer = dict(
type='Adam',
lr=5e-4,
)
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=0.001,
step=[170, 200])
total_epochs = 210
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
channel_cfg = dict(
num_output_channels=17,
dataset_joints=17,
dataset_channel=[
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16],
],
inference_channel=[
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16
])
# model settings
model = dict(
type='TopDown',
pretrained='mmcls://resnet152_v1d',
backbone=dict(type='ResNetV1d', depth=152),
keypoint_head=dict(
type='TopDownSimpleHead',
in_channels=2048,
out_channels=channel_cfg['num_output_channels'],
loss_keypoint=dict(type='JointsMSELoss', use_target_weight=True)),
train_cfg=dict(),
test_cfg=dict(
flip_test=True,
post_process='default',
shift_heatmap=True,
modulate_kernel=11))
data_cfg = dict(
image_size=[192, 256],
heatmap_size=[48, 64],
num_output_channels=channel_cfg['num_output_channels'],
num_joints=channel_cfg['dataset_joints'],
dataset_channel=channel_cfg['dataset_channel'],
inference_channel=channel_cfg['inference_channel'],
soft_nms=False,
nms_thr=1.0,
oks_thr=0.9,
vis_thr=0.2,
use_gt_bbox=False,
det_bbox_thr=0.0,
bbox_file='data/coco/person_detection_results/'
'COCO_val2017_detections_AP_H_56_person.json',
)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='TopDownRandomFlip', flip_prob=0.5),
dict(
type='TopDownHalfBodyTransform',
num_joints_half_body=8,
prob_half_body=0.3),
dict(
type='TopDownGetRandomScaleRotation', rot_factor=40, scale_factor=0.5),
dict(type='TopDownAffine'),
dict(type='ToTensor'),
dict(
type='NormalizeTensor',
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
dict(type='TopDownGenerateTarget', sigma=2),
dict(
type='Collect',
keys=['img', 'target', 'target_weight'],
meta_keys=[
'image_file', 'joints_3d', 'joints_3d_visible', 'center', 'scale',
'rotation', 'bbox_score', 'flip_pairs'
]),
]
val_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='TopDownAffine'),
dict(type='ToTensor'),
dict(
type='NormalizeTensor',
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
dict(
type='Collect',
keys=['img'],
meta_keys=[
'image_file', 'center', 'scale', 'rotation', 'bbox_score',
'flip_pairs'
]),
]
test_pipeline = val_pipeline
data_root = 'data/coco'
data = dict(
samples_per_gpu=32,
workers_per_gpu=2,
train=dict(
type='TopDownCocoDataset',
ann_file=f'{data_root}/annotations/person_keypoints_train2017.json',
img_prefix=f'{data_root}/train2017/',
data_cfg=data_cfg,
pipeline=train_pipeline),
val=dict(
type='TopDownCocoDataset',
ann_file=f'{data_root}/annotations/person_keypoints_val2017.json',
img_prefix=f'{data_root}/val2017/',
data_cfg=data_cfg,
pipeline=val_pipeline),
test=dict(
type='TopDownCocoDataset',
ann_file=f'{data_root}/annotations/person_keypoints_val2017.json',
img_prefix=f'{data_root}/val2017/',
data_cfg=data_cfg,
pipeline=val_pipeline),
)
| 28.771429 | 80 | 0.606504 | log_level = 'INFO'
load_from = None
resume_from = None
dist_params = dict(backend='nccl')
workflow = [('train', 1)]
checkpoint_config = dict(interval=10)
evaluation = dict(interval=10, metric='mAP', key_indicator='AP')
optimizer = dict(
type='Adam',
lr=5e-4,
)
optimizer_config = dict(grad_clip=None)
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=0.001,
step=[170, 200])
total_epochs = 210
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
])
channel_cfg = dict(
num_output_channels=17,
dataset_joints=17,
dataset_channel=[
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16],
],
inference_channel=[
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16
])
model = dict(
type='TopDown',
pretrained='mmcls://resnet152_v1d',
backbone=dict(type='ResNetV1d', depth=152),
keypoint_head=dict(
type='TopDownSimpleHead',
in_channels=2048,
out_channels=channel_cfg['num_output_channels'],
loss_keypoint=dict(type='JointsMSELoss', use_target_weight=True)),
train_cfg=dict(),
test_cfg=dict(
flip_test=True,
post_process='default',
shift_heatmap=True,
modulate_kernel=11))
data_cfg = dict(
image_size=[192, 256],
heatmap_size=[48, 64],
num_output_channels=channel_cfg['num_output_channels'],
num_joints=channel_cfg['dataset_joints'],
dataset_channel=channel_cfg['dataset_channel'],
inference_channel=channel_cfg['inference_channel'],
soft_nms=False,
nms_thr=1.0,
oks_thr=0.9,
vis_thr=0.2,
use_gt_bbox=False,
det_bbox_thr=0.0,
bbox_file='data/coco/person_detection_results/'
'COCO_val2017_detections_AP_H_56_person.json',
)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='TopDownRandomFlip', flip_prob=0.5),
dict(
type='TopDownHalfBodyTransform',
num_joints_half_body=8,
prob_half_body=0.3),
dict(
type='TopDownGetRandomScaleRotation', rot_factor=40, scale_factor=0.5),
dict(type='TopDownAffine'),
dict(type='ToTensor'),
dict(
type='NormalizeTensor',
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
dict(type='TopDownGenerateTarget', sigma=2),
dict(
type='Collect',
keys=['img', 'target', 'target_weight'],
meta_keys=[
'image_file', 'joints_3d', 'joints_3d_visible', 'center', 'scale',
'rotation', 'bbox_score', 'flip_pairs'
]),
]
val_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='TopDownAffine'),
dict(type='ToTensor'),
dict(
type='NormalizeTensor',
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
dict(
type='Collect',
keys=['img'],
meta_keys=[
'image_file', 'center', 'scale', 'rotation', 'bbox_score',
'flip_pairs'
]),
]
test_pipeline = val_pipeline
data_root = 'data/coco'
data = dict(
samples_per_gpu=32,
workers_per_gpu=2,
train=dict(
type='TopDownCocoDataset',
ann_file=f'{data_root}/annotations/person_keypoints_train2017.json',
img_prefix=f'{data_root}/train2017/',
data_cfg=data_cfg,
pipeline=train_pipeline),
val=dict(
type='TopDownCocoDataset',
ann_file=f'{data_root}/annotations/person_keypoints_val2017.json',
img_prefix=f'{data_root}/val2017/',
data_cfg=data_cfg,
pipeline=val_pipeline),
test=dict(
type='TopDownCocoDataset',
ann_file=f'{data_root}/annotations/person_keypoints_val2017.json',
img_prefix=f'{data_root}/val2017/',
data_cfg=data_cfg,
pipeline=val_pipeline),
)
| true | true |
1c4a5d624386288eaed2cfbb319df3274578f578 | 28,100 | py | Python | paddlenlp/datasets/dataset.py | JunnYu/ConvBERT-Prod | a1351e1e7f9400cb8c71d0a15d23629b4cb055d4 | [
"Apache-2.0"
] | 11 | 2022-01-06T07:39:47.000Z | 2022-03-22T06:18:40.000Z | paddlenlp/datasets/dataset.py | JunnYu/ConvBERT-Prod | a1351e1e7f9400cb8c71d0a15d23629b4cb055d4 | [
"Apache-2.0"
] | null | null | null | paddlenlp/datasets/dataset.py | JunnYu/ConvBERT-Prod | a1351e1e7f9400cb8c71d0a15d23629b4cb055d4 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import atexit
import collections
import io
import math
import os
import warnings
import sys
import inspect
from multiprocess import Pool, RLock
import time
import paddle.distributed as dist
from paddle.io import Dataset, IterableDataset
from paddle.dataset.common import md5file
from paddle.utils.download import get_path_from_url, _get_unique_endpoints
from paddlenlp.utils.env import DATA_HOME
from typing import Iterable, Iterator, Optional, List, Any, Callable, Union
import importlib
from functools import partial
__all__ = ['MapDataset', 'DatasetBuilder', 'IterDataset', 'load_dataset']
DATASETS_MODULE_PATH = "paddlenlp.datasets."
def import_main_class(module_path):
"""
Import a module at module_path and return its DatasetBuilder class.
"""
module_path = DATASETS_MODULE_PATH + module_path
module = importlib.import_module(module_path)
main_cls_type = DatasetBuilder
# Find the main class in our imported module
module_main_cls = None
for name, obj in module.__dict__.items():
if isinstance(obj, type) and issubclass(obj, main_cls_type):
if name == 'DatasetBuilder':
continue
module_main_cls = obj
break
return module_main_cls
def load_dataset(path_or_read_func,
name=None,
data_files=None,
splits=None,
lazy=None,
**kwargs):
"""
This method will load a dataset, either form PaddleNLP library or from a
self-defined data loading script, by calling functions in `DatasetBuilder`.
For all the names of datasets in PaddleNLP library, see here: `dataset_list
<https://paddlenlp.readthedocs.io/zh/latest/data_prepare/dataset_list.html>`__.
Either `splits` or `data_files` must be specified.
Args:
path_or_read_func (str|callable): Name of the dataset processing script
in PaddleNLP library or a custom data reading function.
name (str, optional): Additional name to select a more specific dataset.
Defaults to None.
data_files (str|list|tuple|dict, optional): Defining the path of dataset
files. If None. `splits` must be specified. Defaults to None.
splits (str|list|tuple, optional): Which split of the data to load. If None.
`data_files` must be specified. Defaults to None.
lazy (bool, optional): Weather to return `MapDataset` or an `IterDataset`.
True for `IterDataset`. False for `MapDataset`. If None, return the
default type of this dataset. Defaults to None.
kwargs (dict): Other keyword arguments to be passed to the `DatasetBuilder`.
Returns:
A `MapDataset` or `IterDataset` or a tuple of those.
For how to use this function, please see `dataset_load
<https://paddlenlp.readthedocs.io/zh/latest/data_prepare/dataset_load.html>`__
and `dataset_self_defined
<https://paddlenlp.readthedocs.io/zh/latest/data_prepare/dataset_self_defined.html>`__
"""
if inspect.isfunction(path_or_read_func):
assert lazy is not None, "lazy can not be None in custom mode."
kwargs['name'] = name
kwargs['data_files'] = data_files
kwargs['splits'] = splits
custom_kwargs = {}
for name in inspect.signature(path_or_read_func).parameters.keys():
if name in kwargs.keys():
custom_kwargs[name] = kwargs[name]
reader_instance = SimpleBuilder(lazy=lazy, read_func=path_or_read_func)
return reader_instance.read(**custom_kwargs)
else:
reader_cls = import_main_class(path_or_read_func)
reader_instance = reader_cls(lazy=lazy, name=name, **kwargs)
# Check if selected name and split is valid in this DatasetBuilder
if hasattr(reader_instance, 'BUILDER_CONFIGS'):
if name in reader_cls.BUILDER_CONFIGS.keys():
split_names = reader_cls.BUILDER_CONFIGS[name]['splits'].keys()
else:
raise ValueError(
'Invalid name "{}". Should be one of {}.'.format(
name, list(reader_cls.BUILDER_CONFIGS.keys())))
elif hasattr(reader_instance, 'SPLITS'):
split_names = reader_instance.SPLITS.keys()
else:
raise AttributeError(
"Either 'SPLITS' or 'BUILDER_CONFIGS' must be implemented for DatasetBuilder."
)
selected_splits = []
if isinstance(splits, list) or isinstance(splits, tuple):
selected_splits.extend(splits)
else:
selected_splits += [splits]
for split_name in selected_splits:
if split_name not in split_names and split_name != None:
raise ValueError('Invalid split "{}". Should be one of {}.'.
format(split_name, list(split_names)))
datasets = reader_instance.read_datasets(
data_files=data_files, splits=splits)
return datasets
class MapDataset(Dataset):
"""
Wraps a map-style dataset-like object as an instance of `MapDataset`, and equips it
with `map` and other utility methods. All non-magic methods of the raw object
are also accessible.
Args:
data (list|Dataset): An object with `__getitem__` and `__len__` methods. It could
be a list or a subclass of `paddle.io.Dataset`.
kwargs (dict, optional): Other information to be passed to the dataset.
For examples of this class, please see `dataset_self_defined
<https://paddlenlp.readthedocs.io/zh/latest/data_prepare/dataset_self_defined.html>`__.
"""
def __init__(self, data, **kwargs):
self.data = data
self._transform_pipline = []
self.new_data = self.data
self.label_list = kwargs.pop('label_list', None)
self.vocab_info = kwargs.pop('vocab_info', None)
def _transform(self, data):
for fn in self._transform_pipline:
data = fn(data)
return data
def __getitem__(self, idx):
"""
Basic function of `MapDataset` to get sample from dataset with a given
index.
"""
return self._transform(self.new_data[
idx]) if self._transform_pipline else self.new_data[idx]
def __len__(self):
"""
Returns the number of samples in dataset.
"""
return len(self.new_data)
def filter(self, fn, num_workers=0):
"""
Filters samples by the filter function and uses the filtered data to
update this dataset.
Args:
fn (callable): A filter function that takes a sample as input and
returns a boolean. Samples that return False would be discarded.
num_workers(int, optional): Number of processes for multiprocessing. If
set to 0, it doesn't use multiprocessing. Defaults to `0`.
"""
assert num_workers >= 0, "num_workers should be a non-negative value"
if num_workers > 0:
pool = Pool(
num_workers, initargs=(RLock(), ), maxtasksperchild=1000)
def filter_shard(num_workers, index, fn):
self.shard(
num_shards=num_workers, index=index, contiguous=True)
self._filter(fn=fn)
return self
kwds_per_shard = [
dict(
num_workers=num_workers, index=rank, fn=fn)
for rank in range(num_workers)
]
results = [
pool.apply_async(
filter_shard, kwds=kwds) for kwds in kwds_per_shard
]
transformed_shards = [r.get() for r in results]
pool.close()
pool.join()
self.new_data = []
for i in range(num_workers):
self.new_data += transformed_shards[i].new_data
return self
else:
return self._filter(fn)
def _filter(self, fn):
self.new_data = [
self.new_data[idx] for idx in range(len(self.new_data))
if fn(self.new_data[idx])
]
return self
def shard(self, num_shards=None, index=None, contiguous=False):
"""
Split the dataset into `num_shards` pieces. Note that the size of each
shard might be different because the original dataset may not be evenly
divisible.
Args:
num_shards (int, optional): An integer representing the number of
data shards. If None, `num_shards` would be number of trainers.
Defaults to `None`.
index (int, optional): An integer representing the index of the
current shard. If None, `index` would be the current trainer rank
id. Defaults to `None`.
contiguous: (bool, optional): If true, contiguous chunks of data
will be select for sharding. And total number of examples will
be the same. Otherwise each shard will contain all examples of
dataset whose index mod `num_shards` = `index`. Defaults to `False`.
"""
if num_shards is None:
num_shards = dist.get_world_size()
if index is None:
index = dist.get_rank()
if contiguous:
div = len(self) // num_shards
mod = len(self) % num_shards
start = div * index + min(index, mod)
end = start + div + (1 if index < mod else 0)
self.new_data = self.new_data[start:end]
else:
num_samples = int(math.ceil(len(self.new_data) * 1.0 / num_shards))
self.new_data = [
self.new_data[idx] for idx in range(len(self.new_data))
if idx % num_shards == index
]
return self
def map(self, fn, lazy=True, batched=False, num_workers=0):
"""
Performs specific function on the dataset to transform and update every sample.
Args:
fn (callable): Transformations to be performed. It receives single
sample as argument if batched is False. Else it receives all examples.
lazy (bool, optional): If True, transformations would be delayed and
performed on demand. Otherwise, transforms all samples at once. Note that
if `fn` is stochastic, `lazy` should be True or you will get the same
result on all epochs. Defaults to False.
batched(bool, optional): If True, transformations would take all examples as
input and return a collection of transformed examples. Note that if set
True, `lazy` option would be ignored. Defaults to False.
num_workers(int, optional): Number of processes for multiprocessing. If
set to 0, it doesn't use multiprocessing. Note that if set to positive
value, `lazy` option would be ignored. Defaults to 0.
"""
assert num_workers >= 0, "num_workers should be a non-negative value"
if num_workers > 0:
def map_shard(num_workers, index, fn, batched):
self.shard(
num_shards=num_workers, index=index, contiguous=True)
self._map(fn=fn, lazy=False, batched=batched)
return self
kwds_per_shard = [
dict(
num_workers=num_workers,
index=rank,
fn=fn,
batched=batched) for rank in range(num_workers)
]
pool = Pool(
num_workers, initargs=(RLock(), ), maxtasksperchild=1000)
results = [
pool.apply_async(
map_shard, kwds=kwds) for kwds in kwds_per_shard
]
transformed_shards = [r.get() for r in results]
pool.close()
pool.join()
self.new_data = []
for i in range(num_workers):
self.new_data += transformed_shards[i].new_data
return self
else:
return self._map(fn, lazy=lazy, batched=batched)
def _map(self, fn, lazy=True, batched=False):
if batched:
self.new_data = fn(self.new_data)
elif lazy:
self._transform_pipline.append(fn)
else:
self.new_data = [
fn(self.new_data[idx]) for idx in range(len(self.new_data))
]
return self
class IterDataset(IterableDataset):
"""
Wraps a dataset-like object as an instance of `IterDataset`, and equips it with
`map` and other utility methods. All non-magic methods of the raw object
also accessible.
Args:
data (Iterable): An object with `__iter__` function. It can be a Iterable or a
subclass of `paddle.io.IterableDataset`.
kwargs (dict, optional): Other information to be passed to the dataset.
For examples of this class, please see `dataset_self_defined
<https://paddlenlp.readthedocs.io/zh/latest/data_prepare/dataset_self_defined.html>`__.
"""
def __init__(self, data, **kwargs):
self.data = data
self._transform_pipline = []
self._filter_pipline = []
self.label_list = kwargs.pop('label_list', None)
self.vocab_info = kwargs.pop('vocab_info', None)
def _transform(self, data):
for fn in self._transform_pipline:
data = fn(data)
return data
def _shard_filter(self, num_samples):
return True
def _filter(self, data):
for fn in self._filter_pipline:
if not fn(data):
return False
return True
def __iter__(self):
"""
yields sample sequentially.
"""
num_samples = 0
if inspect.isfunction(self.data):
for example in self.data():
if (not self._filter_pipline or
self._filter(self._filter_pipline)
) and self._shard_filter(num_samples=num_samples):
yield self._transform(
example) if self._transform_pipline else example
num_samples += 1
else:
if inspect.isgenerator(self.data):
warnings.warn(
'Reciving generator as data source, data can only be iterated once'
)
for example in self.data:
if (not self._filter_pipline or
self._filter(self._filter_pipline)
) and self._shard_filter(num_samples=num_samples):
yield self._transform(
example) if self._transform_pipline else example
num_samples += 1
def filter(self, fn):
"""
Filters samples by the filter function and uses the filtered data to
update this dataset.
Args:
fn (callable): A filter function that takes a sample as input and
returns a boolean. Samples that return False are discarded.
"""
self._filter_pipline.append(fn)
return self
def shard(self, num_shards=None, index=None):
"""
Split the dataset into `num_shards` pieces.
Args:
num_shards (int, optional): An integer representing the number of
data shards. If None, `num_shards` would be number of trainers.
Defaults to None.
index (int, optional): An integer representing the index of the
current shard. If None, `index` would be the current trainer rank
id. Defaults to None.
"""
if num_shards is None:
num_shards = dist.get_world_size()
if index is None:
index = dist.get_rank()
def sharder(num_shards, index, num_samples):
if num_samples % num_shards == index:
return True
else:
return False
fn = partial(sharder, num_shards=num_shards, index=index)
self._shard_filter = fn
return self
def map(self, fn):
"""
Performs specific function on the dataset to transform and update every sample.
Args:
fn (callable): Transformations to be performed. It receives single
sample as argument.
"""
self._transform_pipline.append(fn)
return self
class DatasetBuilder:
"""
A base class for all DatasetBuilder. It provides a `read()` function to turn
a data file into a MapDataset or IterDataset.
`_get_data()` function and `_read()` function should be implemented to download
data file and read data file into a `Iterable` of the examples.
For how to define a custom `DatasetBuilder`, please see `contribute_dataset
<https://paddlenlp.readthedocs.io/zh/latest/community/contribute_dataset.html>`__.
"""
lazy = False
def __init__(self, lazy=None, name=None, **config):
if lazy is not None:
self.lazy = lazy
self.name = name
self.config = config
def read_datasets(self, splits=None, data_files=None):
datasets = []
assert splits or data_files, "`data_files` and `splits` can not both be None."
def remove_if_exit(filepath):
if isinstance(filepath, (list, tuple)):
for file in filepath:
try:
os.remove(file)
except OSError:
pass
else:
try:
os.remove(filepath)
except OSError:
pass
if splits and data_files is None:
assert isinstance(splits, str) or (
isinstance(splits, list) and isinstance(splits[0], str)
) or (
isinstance(splits, tuple) and isinstance(splits[0], str)
), "`splits` should be a string or list of string or a tuple of string."
if isinstance(splits, str):
splits = [splits]
parallel_env = dist.ParallelEnv()
unique_endpoints = _get_unique_endpoints(
parallel_env.trainer_endpoints[:])
# move register hook to first and register togather
lock_files = []
for split in splits:
lock_file = os.path.join(DATA_HOME, self.__class__.__name__)
if self.name is not None:
lock_file = lock_file + "." + self.name
lock_file += "." + split + ".done" + "." + str(os.getppid())
lock_files.append(lock_file)
# Must register to all procs to make the lock file can be removed
# when any proc breaks. Otherwise, the single registered proc may
# not receive proper singal send by the parent proc to exit.
atexit.register(lambda: remove_if_exit(lock_files))
for split in splits:
filename = self._get_data(split)
lock_file = os.path.join(DATA_HOME, self.__class__.__name__)
if self.name is not None:
lock_file = lock_file + "." + self.name
lock_file += "." + split + ".done" + "." + str(os.getppid())
# `lock_file` indicates the finished status of`_get_data`.
# `_get_data` only works in the `unique_endpoints` specified
# proc since `get_path_from_url` only work for it. The other
# procs wait `_get_data` to be finished.
if parallel_env.current_endpoint in unique_endpoints:
f = open(lock_file, "w")
f.close()
else:
while not os.path.exists(lock_file):
time.sleep(1)
datasets.append(self.read(filename=filename, split=split))
if data_files:
assert isinstance(data_files, str) or isinstance(
data_files, tuple
) or isinstance(
data_files, list
), "`data_files` should be a string or tuple or list of strings."
if isinstance(data_files, str):
data_files = [data_files]
default_split = 'train'
if splits:
if isinstance(splits, str):
splits = [splits]
assert len(splits) == len(
data_files
), "Number of `splits` and number of `data_files` should be the same if you want to specify the split of loacl data file."
datasets += [
self.read(
filename=data_files[i], split=splits[i])
for i in range(len(data_files))
]
else:
datasets += [
self.read(
filename=data_files[i], split=default_split)
for i in range(len(data_files))
]
return datasets if len(datasets) > 1 else datasets[0]
def read(self, filename, split='train'):
"""
Returns a dataset containing all the examples that can be read from the file path.
If `self.lazy` is False, this eagerly reads all instances from `self._read()`
and returns a `MapDataset`.
If `self.lazy` is True, this returns an `IterDataset`, which internally
relies on the generator created from `self._read()` to lazily produce examples.
In this case your implementation of `_read()` must also be lazy
(that is, not load all examples into memory at once).
Args:
filename (str): Path of data file to read, usually provided by `_get_data`
function.
split (str, optional): The split name of selected dataset. This only makes
a different when data files of different splits have different structures.
Returns:
A `MapDataset|IterDataset`.
"""
label_list = self.get_labels()
vocab_info = self.get_vocab()
if self.lazy:
def generate_examples():
generator = self._read(
filename, split
) if self._read.__code__.co_argcount > 2 else self._read(
filename)
for example in generator:
# We need to check if the example contains label column and confirm its name.
# For now we only allow `label` or `labels` to be the name of label column.
if 'labels' in example.keys():
label_col = 'labels'
elif 'label' in example.keys():
label_col = 'label'
else:
label_col = None
# Convert class label to label ids.
if label_list is not None and example.get(label_col, None):
label_dict = {}
for i, label in enumerate(label_list):
label_dict[label] = i
if isinstance(example[label_col], list) or isinstance(
example[label_col], tuple):
for label_idx in range(len(example[label_col])):
example[label_col][label_idx] = label_dict[
example[label_col][label_idx]]
else:
example[label_col] = label_dict[example[label_col]]
yield example
else:
yield example
return IterDataset(
generate_examples(),
label_list=label_list,
vocab_info=vocab_info)
else:
examples = self._read(
filename,
split) if self._read.__code__.co_argcount > 2 else self._read(
filename)
# Then some validation.
if not isinstance(examples, list):
examples = list(examples)
if not examples:
raise ValueError(
"No instances were read from the given filepath {}. "
"Is the path correct?".format(filename))
# We need to check if the example contains label column and confirm its name.
# For now we only allow `label` or `labels` to be the name of label column.
if 'labels' in examples[0].keys():
label_col = 'labels'
elif 'label' in examples[0].keys():
label_col = 'label'
else:
label_col = None
# Convert class label to label ids.
if label_list is not None and examples[0].get(label_col, None):
label_dict = {}
for i, label in enumerate(label_list):
label_dict[label] = i
for idx in range(len(examples)):
if isinstance(examples[idx][label_col],
list) or isinstance(examples[idx][label_col],
tuple):
for label_idx in range(len(examples[idx][label_col])):
examples[idx][label_col][label_idx] = label_dict[
examples[idx][label_col][label_idx]]
else:
examples[idx][label_col] = label_dict[examples[idx][
label_col]]
return MapDataset(
examples, label_list=label_list, vocab_info=vocab_info)
def _read(self, filename: str, *args):
"""
Reads examples from the given file_path and returns them as an
`Iterable` (which could be a list or a generator).
This method must be implemented in self-defined `DatasetBuilder`.
"""
raise NotImplementedError
def _get_data(self, mode: str):
"""
Downloads examples from the given URL and customized split
informations and returns a filepath.
This method must be implemented in self-defined `DatasetBuilder`.
"""
raise NotImplementedError
def get_labels(self):
"""
Returns list of class labels of the dataset if specified.
"""
return None
def get_vocab(self):
"""
Returns vocab file path of the dataset if specified.
"""
return None
class SimpleBuilder(DatasetBuilder):
def __init__(self, lazy, read_func):
self._read = read_func
self.lazy = lazy
def read(self, **kwargs):
if self.lazy:
def generate_examples():
generator = self._read(**kwargs)
for example in generator:
yield example
return IterDataset(generate_examples)
else:
examples = self._read(**kwargs)
if hasattr(examples, '__len__') and hasattr(examples,
'__getitem__'):
return MapDataset(examples)
else:
return MapDataset(list(examples))
| 38.865837 | 138 | 0.574698 |
import atexit
import collections
import io
import math
import os
import warnings
import sys
import inspect
from multiprocess import Pool, RLock
import time
import paddle.distributed as dist
from paddle.io import Dataset, IterableDataset
from paddle.dataset.common import md5file
from paddle.utils.download import get_path_from_url, _get_unique_endpoints
from paddlenlp.utils.env import DATA_HOME
from typing import Iterable, Iterator, Optional, List, Any, Callable, Union
import importlib
from functools import partial
__all__ = ['MapDataset', 'DatasetBuilder', 'IterDataset', 'load_dataset']
DATASETS_MODULE_PATH = "paddlenlp.datasets."
def import_main_class(module_path):
module_path = DATASETS_MODULE_PATH + module_path
module = importlib.import_module(module_path)
main_cls_type = DatasetBuilder
module_main_cls = None
for name, obj in module.__dict__.items():
if isinstance(obj, type) and issubclass(obj, main_cls_type):
if name == 'DatasetBuilder':
continue
module_main_cls = obj
break
return module_main_cls
def load_dataset(path_or_read_func,
name=None,
data_files=None,
splits=None,
lazy=None,
**kwargs):
if inspect.isfunction(path_or_read_func):
assert lazy is not None, "lazy can not be None in custom mode."
kwargs['name'] = name
kwargs['data_files'] = data_files
kwargs['splits'] = splits
custom_kwargs = {}
for name in inspect.signature(path_or_read_func).parameters.keys():
if name in kwargs.keys():
custom_kwargs[name] = kwargs[name]
reader_instance = SimpleBuilder(lazy=lazy, read_func=path_or_read_func)
return reader_instance.read(**custom_kwargs)
else:
reader_cls = import_main_class(path_or_read_func)
reader_instance = reader_cls(lazy=lazy, name=name, **kwargs)
if hasattr(reader_instance, 'BUILDER_CONFIGS'):
if name in reader_cls.BUILDER_CONFIGS.keys():
split_names = reader_cls.BUILDER_CONFIGS[name]['splits'].keys()
else:
raise ValueError(
'Invalid name "{}". Should be one of {}.'.format(
name, list(reader_cls.BUILDER_CONFIGS.keys())))
elif hasattr(reader_instance, 'SPLITS'):
split_names = reader_instance.SPLITS.keys()
else:
raise AttributeError(
"Either 'SPLITS' or 'BUILDER_CONFIGS' must be implemented for DatasetBuilder."
)
selected_splits = []
if isinstance(splits, list) or isinstance(splits, tuple):
selected_splits.extend(splits)
else:
selected_splits += [splits]
for split_name in selected_splits:
if split_name not in split_names and split_name != None:
raise ValueError('Invalid split "{}". Should be one of {}.'.
format(split_name, list(split_names)))
datasets = reader_instance.read_datasets(
data_files=data_files, splits=splits)
return datasets
class MapDataset(Dataset):
def __init__(self, data, **kwargs):
self.data = data
self._transform_pipline = []
self.new_data = self.data
self.label_list = kwargs.pop('label_list', None)
self.vocab_info = kwargs.pop('vocab_info', None)
def _transform(self, data):
for fn in self._transform_pipline:
data = fn(data)
return data
def __getitem__(self, idx):
return self._transform(self.new_data[
idx]) if self._transform_pipline else self.new_data[idx]
def __len__(self):
return len(self.new_data)
def filter(self, fn, num_workers=0):
assert num_workers >= 0, "num_workers should be a non-negative value"
if num_workers > 0:
pool = Pool(
num_workers, initargs=(RLock(), ), maxtasksperchild=1000)
def filter_shard(num_workers, index, fn):
self.shard(
num_shards=num_workers, index=index, contiguous=True)
self._filter(fn=fn)
return self
kwds_per_shard = [
dict(
num_workers=num_workers, index=rank, fn=fn)
for rank in range(num_workers)
]
results = [
pool.apply_async(
filter_shard, kwds=kwds) for kwds in kwds_per_shard
]
transformed_shards = [r.get() for r in results]
pool.close()
pool.join()
self.new_data = []
for i in range(num_workers):
self.new_data += transformed_shards[i].new_data
return self
else:
return self._filter(fn)
def _filter(self, fn):
self.new_data = [
self.new_data[idx] for idx in range(len(self.new_data))
if fn(self.new_data[idx])
]
return self
def shard(self, num_shards=None, index=None, contiguous=False):
if num_shards is None:
num_shards = dist.get_world_size()
if index is None:
index = dist.get_rank()
if contiguous:
div = len(self) // num_shards
mod = len(self) % num_shards
start = div * index + min(index, mod)
end = start + div + (1 if index < mod else 0)
self.new_data = self.new_data[start:end]
else:
num_samples = int(math.ceil(len(self.new_data) * 1.0 / num_shards))
self.new_data = [
self.new_data[idx] for idx in range(len(self.new_data))
if idx % num_shards == index
]
return self
def map(self, fn, lazy=True, batched=False, num_workers=0):
assert num_workers >= 0, "num_workers should be a non-negative value"
if num_workers > 0:
def map_shard(num_workers, index, fn, batched):
self.shard(
num_shards=num_workers, index=index, contiguous=True)
self._map(fn=fn, lazy=False, batched=batched)
return self
kwds_per_shard = [
dict(
num_workers=num_workers,
index=rank,
fn=fn,
batched=batched) for rank in range(num_workers)
]
pool = Pool(
num_workers, initargs=(RLock(), ), maxtasksperchild=1000)
results = [
pool.apply_async(
map_shard, kwds=kwds) for kwds in kwds_per_shard
]
transformed_shards = [r.get() for r in results]
pool.close()
pool.join()
self.new_data = []
for i in range(num_workers):
self.new_data += transformed_shards[i].new_data
return self
else:
return self._map(fn, lazy=lazy, batched=batched)
def _map(self, fn, lazy=True, batched=False):
if batched:
self.new_data = fn(self.new_data)
elif lazy:
self._transform_pipline.append(fn)
else:
self.new_data = [
fn(self.new_data[idx]) for idx in range(len(self.new_data))
]
return self
class IterDataset(IterableDataset):
def __init__(self, data, **kwargs):
self.data = data
self._transform_pipline = []
self._filter_pipline = []
self.label_list = kwargs.pop('label_list', None)
self.vocab_info = kwargs.pop('vocab_info', None)
def _transform(self, data):
for fn in self._transform_pipline:
data = fn(data)
return data
def _shard_filter(self, num_samples):
return True
def _filter(self, data):
for fn in self._filter_pipline:
if not fn(data):
return False
return True
def __iter__(self):
num_samples = 0
if inspect.isfunction(self.data):
for example in self.data():
if (not self._filter_pipline or
self._filter(self._filter_pipline)
) and self._shard_filter(num_samples=num_samples):
yield self._transform(
example) if self._transform_pipline else example
num_samples += 1
else:
if inspect.isgenerator(self.data):
warnings.warn(
'Reciving generator as data source, data can only be iterated once'
)
for example in self.data:
if (not self._filter_pipline or
self._filter(self._filter_pipline)
) and self._shard_filter(num_samples=num_samples):
yield self._transform(
example) if self._transform_pipline else example
num_samples += 1
def filter(self, fn):
self._filter_pipline.append(fn)
return self
def shard(self, num_shards=None, index=None):
if num_shards is None:
num_shards = dist.get_world_size()
if index is None:
index = dist.get_rank()
def sharder(num_shards, index, num_samples):
if num_samples % num_shards == index:
return True
else:
return False
fn = partial(sharder, num_shards=num_shards, index=index)
self._shard_filter = fn
return self
def map(self, fn):
self._transform_pipline.append(fn)
return self
class DatasetBuilder:
lazy = False
def __init__(self, lazy=None, name=None, **config):
if lazy is not None:
self.lazy = lazy
self.name = name
self.config = config
def read_datasets(self, splits=None, data_files=None):
datasets = []
assert splits or data_files, "`data_files` and `splits` can not both be None."
def remove_if_exit(filepath):
if isinstance(filepath, (list, tuple)):
for file in filepath:
try:
os.remove(file)
except OSError:
pass
else:
try:
os.remove(filepath)
except OSError:
pass
if splits and data_files is None:
assert isinstance(splits, str) or (
isinstance(splits, list) and isinstance(splits[0], str)
) or (
isinstance(splits, tuple) and isinstance(splits[0], str)
), "`splits` should be a string or list of string or a tuple of string."
if isinstance(splits, str):
splits = [splits]
parallel_env = dist.ParallelEnv()
unique_endpoints = _get_unique_endpoints(
parallel_env.trainer_endpoints[:])
lock_files = []
for split in splits:
lock_file = os.path.join(DATA_HOME, self.__class__.__name__)
if self.name is not None:
lock_file = lock_file + "." + self.name
lock_file += "." + split + ".done" + "." + str(os.getppid())
lock_files.append(lock_file)
atexit.register(lambda: remove_if_exit(lock_files))
for split in splits:
filename = self._get_data(split)
lock_file = os.path.join(DATA_HOME, self.__class__.__name__)
if self.name is not None:
lock_file = lock_file + "." + self.name
lock_file += "." + split + ".done" + "." + str(os.getppid())
if parallel_env.current_endpoint in unique_endpoints:
f = open(lock_file, "w")
f.close()
else:
while not os.path.exists(lock_file):
time.sleep(1)
datasets.append(self.read(filename=filename, split=split))
if data_files:
assert isinstance(data_files, str) or isinstance(
data_files, tuple
) or isinstance(
data_files, list
), "`data_files` should be a string or tuple or list of strings."
if isinstance(data_files, str):
data_files = [data_files]
default_split = 'train'
if splits:
if isinstance(splits, str):
splits = [splits]
assert len(splits) == len(
data_files
), "Number of `splits` and number of `data_files` should be the same if you want to specify the split of loacl data file."
datasets += [
self.read(
filename=data_files[i], split=splits[i])
for i in range(len(data_files))
]
else:
datasets += [
self.read(
filename=data_files[i], split=default_split)
for i in range(len(data_files))
]
return datasets if len(datasets) > 1 else datasets[0]
def read(self, filename, split='train'):
label_list = self.get_labels()
vocab_info = self.get_vocab()
if self.lazy:
def generate_examples():
generator = self._read(
filename, split
) if self._read.__code__.co_argcount > 2 else self._read(
filename)
for example in generator:
if 'labels' in example.keys():
label_col = 'labels'
elif 'label' in example.keys():
label_col = 'label'
else:
label_col = None
if label_list is not None and example.get(label_col, None):
label_dict = {}
for i, label in enumerate(label_list):
label_dict[label] = i
if isinstance(example[label_col], list) or isinstance(
example[label_col], tuple):
for label_idx in range(len(example[label_col])):
example[label_col][label_idx] = label_dict[
example[label_col][label_idx]]
else:
example[label_col] = label_dict[example[label_col]]
yield example
else:
yield example
return IterDataset(
generate_examples(),
label_list=label_list,
vocab_info=vocab_info)
else:
examples = self._read(
filename,
split) if self._read.__code__.co_argcount > 2 else self._read(
filename)
if not isinstance(examples, list):
examples = list(examples)
if not examples:
raise ValueError(
"No instances were read from the given filepath {}. "
"Is the path correct?".format(filename))
if 'labels' in examples[0].keys():
label_col = 'labels'
elif 'label' in examples[0].keys():
label_col = 'label'
else:
label_col = None
if label_list is not None and examples[0].get(label_col, None):
label_dict = {}
for i, label in enumerate(label_list):
label_dict[label] = i
for idx in range(len(examples)):
if isinstance(examples[idx][label_col],
list) or isinstance(examples[idx][label_col],
tuple):
for label_idx in range(len(examples[idx][label_col])):
examples[idx][label_col][label_idx] = label_dict[
examples[idx][label_col][label_idx]]
else:
examples[idx][label_col] = label_dict[examples[idx][
label_col]]
return MapDataset(
examples, label_list=label_list, vocab_info=vocab_info)
def _read(self, filename: str, *args):
raise NotImplementedError
def _get_data(self, mode: str):
raise NotImplementedError
def get_labels(self):
return None
def get_vocab(self):
return None
class SimpleBuilder(DatasetBuilder):
def __init__(self, lazy, read_func):
self._read = read_func
self.lazy = lazy
def read(self, **kwargs):
if self.lazy:
def generate_examples():
generator = self._read(**kwargs)
for example in generator:
yield example
return IterDataset(generate_examples)
else:
examples = self._read(**kwargs)
if hasattr(examples, '__len__') and hasattr(examples,
'__getitem__'):
return MapDataset(examples)
else:
return MapDataset(list(examples))
| true | true |
1c4a5d83d36d2e8c948aeb0969a624af35d58159 | 6,949 | py | Python | simple_python_profiler/main.py | jeshan/simple-python-profiler | a3d3a709781b5aaff38b55389c93efd132274344 | [
"MIT"
] | null | null | null | simple_python_profiler/main.py | jeshan/simple-python-profiler | a3d3a709781b5aaff38b55389c93efd132274344 | [
"MIT"
] | 1 | 2021-06-02T00:57:36.000Z | 2021-06-02T00:57:36.000Z | simple_python_profiler/main.py | jeshan/simple-python-profiler | a3d3a709781b5aaff38b55389c93efd132274344 | [
"MIT"
] | null | null | null | import functools
import inspect
import sys
from time import perf_counter_ns
from typing import List, Dict
from loguru import logger
from recursive_decorator import recursive_decorator
def fn_description(f):
return f'{f.__module__}.{f.__qualname__}'
def sort_fn(invocation):
return invocation.end - invocation.start
def log_call(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
logger.debug(f'Entering {f}')
result = f(*args, **kwargs)
logger.debug(f'Exiting {f}')
return result
return wrapper
@log_call
def sort_invocations_by_individual_time(invocations):
return sorted(invocations, key=sort_fn, reverse=True)
def duration(invocation):
return invocation['end'] - invocation['start']
@log_call
def sort_invocations_by_function_time(group):
name_speed_tuple_list = []
for fn_name, invocations in group.items():
total_per_function = sum(map(lambda x: duration(x), invocations))
name_speed_tuple_list.append((fn_name, total_per_function, len(invocations)))
return sorted(name_speed_tuple_list, key=lambda x: x[1], reverse=True)
@log_call
def group_by_function(invocations: List) -> Dict[object, List]:
result = {}
for invocation in invocations:
f = invocation['f']
if f not in result:
result[f] = []
result[f].append(invocation)
return result
def is_site_package(module):
return 'site-packages' in (module.__dict__.get('__file__') or {})
def exclude_paths(module):
return module.__dict__.get('__file__')
def exclude_importers(module):
loader = module.__dict__.get('__loader__')
loader_type = type(loader)
if hasattr(loader_type, '__name__'):
name = loader_type.__name__
elif hasattr(loader, 'name'):
name = loader.name
if loader:
qualified_name = loader_type.__module__ + '.' + name
else:
qualified_name = ''
return qualified_name.endswith('._SixMetaPathImporter')
def is_system_package(module):
from importlib._bootstrap import BuiltinImporter
loader = module.__dict__.get('__loader__')
return (
loader in [BuiltinImporter]
or (
hasattr(module, '__file__')
and f"python{sys.version_info.major}.{sys.version_info.minor}/{(module.__package__ or '').replace('.', '/')}"
in module.__file__
)
or module.__name__.startswith('typing.')
)
def get_loaded_modules():
import sys
all_modules = []
for name, module in sys.modules.items():
all_modules.append((name, module))
return all_modules
def mergeFunctionMetadata(f, g):
# this function was copied from Twisted core, https://github.com/racker/python-twisted-core
# licence notice in file ../LICENCE-Twisted-core
"""
Overwrite C{g}'s name and docstring with values from C{f}. Update
C{g}'s instance dictionary with C{f}'s.
To use this function safely you must use the return value. In Python 2.3,
L{mergeFunctionMetadata} will create a new function. In later versions of
Python, C{g} will be mutated and returned.
@return: A function that has C{g}'s behavior and metadata merged from
C{f}.
"""
try:
g.__name__ = f.__name__
except TypeError:
try:
import types
merged = types.FunctionType(
g.func_code, g.func_globals, f.__name__, inspect.getargspec(g)[-1], g.func_closure
)
except TypeError:
pass
else:
merged = g
try:
merged.__doc__ = f.__doc__
except (TypeError, AttributeError):
pass
try:
merged.__dict__.update(g.__dict__)
merged.__dict__.update(f.__dict__)
except (TypeError, AttributeError):
pass
merged.__module__ = f.__module__
return merged
def time_fn():
return perf_counter_ns()
def singleton(cls):
obj = cls()
# Always return the same object
cls.__new__ = staticmethod(lambda cls: obj)
# Disable __init__
try:
del cls.__init__
except AttributeError:
pass
return cls
@singleton
class Profiler:
def __init__(self):
logger.debug('creating instance of profiler')
self.invocations = []
def add_invocation(self, start, end, result, f):
i = {'start': start, 'end': end, 'result': result, 'f': f}
self.invocations.append(i)
def __enter__(self):
bootstrap()
logger.debug('Start recording invocations')
def __exit__(self, exc_type, exc_val, exc_tb):
logger.debug(f'stopped recording invocations, got {len(self.invocations)} of them.')
invocation_group = group_by_function(self.invocations)
by_time = sort_invocations_by_function_time(invocation_group)
by_time = limit_results(by_time)
print_results(by_time)
@recursive_decorator
def profile_recursive(f):
return profile(f)
def profile(f):
if f in [time_fn, profile]:
return f
# print('in profile', f)
@functools.wraps(f)
def wrapper(*args, **kwargs):
# print('wrapped', f)
start = time_fn()
result = f(*args, **kwargs)
end = time_fn()
Profiler().add_invocation(start, end, result, f)
return result
return wrapper
def edit_functions(items, module):
for fn_name, fn in items:
if fn == edit_functions:
continue
# print('editing', fn_name, fn)
new_item = mergeFunctionMetadata(fn, profile(fn))
setattr(module, fn.__name__, new_item)
def bootstrap():
for name, module in get_loaded_modules():
# print('loading', name)
try:
items = inspect.getmembers(module, inspect.isfunction)
except Exception as e:
# I saw this could happen when in debug mode
logger.warning(f'Failed getting members for module {module}, skipping')
logger.error(e)
continue
# if 'main' not in name:
exclude_site_package = True
exclude_system_package = True
if 'simple_python_profiler' in module.__name__:
logger.trace('Excluding the profiler itself')
continue
if exclude_site_package and is_site_package(module):
logger.trace(f'excluding site package {module}')
continue
if exclude_importers(module):
logger.trace(f'excluding importer {module}')
continue
if exclude_system_package and is_system_package(module):
logger.trace(f'excluding system module {module}')
continue
logger.debug(f'allowing module {module}')
edit_functions(items, module)
def limit_results(groups):
return groups[:100]
@log_call
def print_results(by_time):
for item in by_time:
logger.info(fn_description(item[0]) + f',invoked={item[2]} times, total={item[1] / 1_000_000}ms')
| 27.466403 | 121 | 0.647287 | import functools
import inspect
import sys
from time import perf_counter_ns
from typing import List, Dict
from loguru import logger
from recursive_decorator import recursive_decorator
def fn_description(f):
return f'{f.__module__}.{f.__qualname__}'
def sort_fn(invocation):
return invocation.end - invocation.start
def log_call(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
logger.debug(f'Entering {f}')
result = f(*args, **kwargs)
logger.debug(f'Exiting {f}')
return result
return wrapper
@log_call
def sort_invocations_by_individual_time(invocations):
return sorted(invocations, key=sort_fn, reverse=True)
def duration(invocation):
return invocation['end'] - invocation['start']
@log_call
def sort_invocations_by_function_time(group):
name_speed_tuple_list = []
for fn_name, invocations in group.items():
total_per_function = sum(map(lambda x: duration(x), invocations))
name_speed_tuple_list.append((fn_name, total_per_function, len(invocations)))
return sorted(name_speed_tuple_list, key=lambda x: x[1], reverse=True)
@log_call
def group_by_function(invocations: List) -> Dict[object, List]:
result = {}
for invocation in invocations:
f = invocation['f']
if f not in result:
result[f] = []
result[f].append(invocation)
return result
def is_site_package(module):
return 'site-packages' in (module.__dict__.get('__file__') or {})
def exclude_paths(module):
return module.__dict__.get('__file__')
def exclude_importers(module):
loader = module.__dict__.get('__loader__')
loader_type = type(loader)
if hasattr(loader_type, '__name__'):
name = loader_type.__name__
elif hasattr(loader, 'name'):
name = loader.name
if loader:
qualified_name = loader_type.__module__ + '.' + name
else:
qualified_name = ''
return qualified_name.endswith('._SixMetaPathImporter')
def is_system_package(module):
from importlib._bootstrap import BuiltinImporter
loader = module.__dict__.get('__loader__')
return (
loader in [BuiltinImporter]
or (
hasattr(module, '__file__')
and f"python{sys.version_info.major}.{sys.version_info.minor}/{(module.__package__ or '').replace('.', '/')}"
in module.__file__
)
or module.__name__.startswith('typing.')
)
def get_loaded_modules():
import sys
all_modules = []
for name, module in sys.modules.items():
all_modules.append((name, module))
return all_modules
def mergeFunctionMetadata(f, g):
try:
g.__name__ = f.__name__
except TypeError:
try:
import types
merged = types.FunctionType(
g.func_code, g.func_globals, f.__name__, inspect.getargspec(g)[-1], g.func_closure
)
except TypeError:
pass
else:
merged = g
try:
merged.__doc__ = f.__doc__
except (TypeError, AttributeError):
pass
try:
merged.__dict__.update(g.__dict__)
merged.__dict__.update(f.__dict__)
except (TypeError, AttributeError):
pass
merged.__module__ = f.__module__
return merged
def time_fn():
return perf_counter_ns()
def singleton(cls):
obj = cls()
cls.__new__ = staticmethod(lambda cls: obj)
try:
del cls.__init__
except AttributeError:
pass
return cls
@singleton
class Profiler:
def __init__(self):
logger.debug('creating instance of profiler')
self.invocations = []
def add_invocation(self, start, end, result, f):
i = {'start': start, 'end': end, 'result': result, 'f': f}
self.invocations.append(i)
def __enter__(self):
bootstrap()
logger.debug('Start recording invocations')
def __exit__(self, exc_type, exc_val, exc_tb):
logger.debug(f'stopped recording invocations, got {len(self.invocations)} of them.')
invocation_group = group_by_function(self.invocations)
by_time = sort_invocations_by_function_time(invocation_group)
by_time = limit_results(by_time)
print_results(by_time)
@recursive_decorator
def profile_recursive(f):
return profile(f)
def profile(f):
if f in [time_fn, profile]:
return f
@functools.wraps(f)
def wrapper(*args, **kwargs):
start = time_fn()
result = f(*args, **kwargs)
end = time_fn()
Profiler().add_invocation(start, end, result, f)
return result
return wrapper
def edit_functions(items, module):
for fn_name, fn in items:
if fn == edit_functions:
continue
new_item = mergeFunctionMetadata(fn, profile(fn))
setattr(module, fn.__name__, new_item)
def bootstrap():
for name, module in get_loaded_modules():
try:
items = inspect.getmembers(module, inspect.isfunction)
except Exception as e:
logger.warning(f'Failed getting members for module {module}, skipping')
logger.error(e)
continue
exclude_site_package = True
exclude_system_package = True
if 'simple_python_profiler' in module.__name__:
logger.trace('Excluding the profiler itself')
continue
if exclude_site_package and is_site_package(module):
logger.trace(f'excluding site package {module}')
continue
if exclude_importers(module):
logger.trace(f'excluding importer {module}')
continue
if exclude_system_package and is_system_package(module):
logger.trace(f'excluding system module {module}')
continue
logger.debug(f'allowing module {module}')
edit_functions(items, module)
def limit_results(groups):
return groups[:100]
@log_call
def print_results(by_time):
for item in by_time:
logger.info(fn_description(item[0]) + f',invoked={item[2]} times, total={item[1] / 1_000_000}ms')
| true | true |
1c4a610aebca605ce60f6577184339a699daaaa0 | 470 | py | Python | tests/performance/conftest.py | rspadim/aiocache | bf675ae912173bee25cc1d8c22b77f57de34375d | [
"BSD-3-Clause"
] | 213 | 2020-11-02T14:29:46.000Z | 2022-03-24T23:12:32.000Z | tests/performance/conftest.py | rspadim/aiocache | bf675ae912173bee25cc1d8c22b77f57de34375d | [
"BSD-3-Clause"
] | 48 | 2020-11-02T11:17:13.000Z | 2022-03-24T17:55:31.000Z | tests/performance/conftest.py | rspadim/aiocache | bf675ae912173bee25cc1d8c22b77f57de34375d | [
"BSD-3-Clause"
] | 49 | 2020-11-13T07:41:37.000Z | 2022-03-25T12:24:49.000Z | import pytest
from aiocache import Cache
from aiocache.backends.redis import RedisBackend
@pytest.fixture
def redis_cache(event_loop):
cache = Cache(Cache.REDIS, namespace="test", pool_max_size=1)
yield cache
for _, pool in RedisBackend.pools.items():
pool.close()
event_loop.run_until_complete(pool.wait_closed())
@pytest.fixture
def memcached_cache():
cache = Cache(Cache.MEMCACHED, namespace="test", pool_size=1)
yield cache
| 22.380952 | 65 | 0.731915 | import pytest
from aiocache import Cache
from aiocache.backends.redis import RedisBackend
@pytest.fixture
def redis_cache(event_loop):
cache = Cache(Cache.REDIS, namespace="test", pool_max_size=1)
yield cache
for _, pool in RedisBackend.pools.items():
pool.close()
event_loop.run_until_complete(pool.wait_closed())
@pytest.fixture
def memcached_cache():
cache = Cache(Cache.MEMCACHED, namespace="test", pool_size=1)
yield cache
| true | true |
1c4a624f74d426cc722ed45176fcf417a5aa38db | 2,970 | py | Python | spacy_crfsuite/tokenizer.py | lusterck/spacy_crfsuite | 21acb6431b8c3c98528d6994880ca7bb3b69f499 | [
"MIT"
] | 12 | 2020-07-29T17:08:06.000Z | 2022-03-28T10:39:39.000Z | spacy_crfsuite/tokenizer.py | marzi-heidari/spacy_crfsuite | b9f31aac9e727245791197aed4245f03a57a89ba | [
"MIT"
] | 5 | 2020-07-29T17:08:03.000Z | 2022-03-28T07:16:19.000Z | spacy_crfsuite/tokenizer.py | marzi-heidari/spacy_crfsuite | b9f31aac9e727245791197aed4245f03a57a89ba | [
"MIT"
] | 7 | 2020-08-06T11:08:30.000Z | 2022-01-20T14:25:19.000Z | import numpy as np
import spacy
from abc import ABCMeta, abstractmethod
from typing import Text, Optional, Any, Dict, Union
class Token:
def __init__(
self,
text: Text,
start: int,
end: Optional[int] = None,
data: Optional[Dict[Text, Any]] = None,
lemma: Optional[Text] = None,
) -> None:
self.text = text
self.start = start
self.end = end if end else start + len(text)
self.data = data if data else {}
self.lemma = lemma or text
def set(self, prop: Text, info: Any) -> None:
self.data[prop] = info
def get(self, prop: Text, default: Optional[Any] = None) -> Any:
return self.data.get(prop, default)
def __eq__(self, other):
if not isinstance(other, Token):
return NotImplemented
return (self.start, self.end, self.text, self.lemma) == (
other.start,
other.end,
other.text,
other.lemma,
)
def __lt__(self, other):
if not isinstance(other, Token):
return NotImplemented
return (self.start, self.end, self.text, self.lemma) < (
other.start,
other.end,
other.text,
other.lemma,
)
class Tokenizer:
__metaclass__ = ABCMeta
@abstractmethod
def tokenize(self, message: Dict, attribute: Text = "text") -> None:
raise NotImplementedError("should be implemented by subclass")
class SpacyTokenizer(Tokenizer):
def __init__(self, nlp=None):
self.nlp = nlp or spacy.blank("en")
def tokenize(self, message: Dict, attribute: Text = "text") -> None:
doc = message[attribute]
if attribute == "text":
doc = self.nlp(doc)
tokens = [
Token(
t.text,
t.idx,
lemma=t.lemma_,
data={"pos": self._tag_of_token(t), "shape": t.shape_},
)
for t in doc
]
# Token -> Vec
for token in tokens:
vector = self.get_vector(token)
if vector is not None:
token.set("vector", vector)
# Add CLS token
idx = tokens[-1].end + 1
tokens.append(Token("__CLS__", idx))
message["tokens"] = tokens
def get_vector(self, token: Union[Text, Token]) -> Optional[np.ndarray]:
word_vec = None
if self.nlp.vocab.vectors_length > 0:
word = token.text if isinstance(token, Token) else token
word_hash = self.nlp.vocab.strings[word]
if word_hash in self.nlp.vocab.vectors:
word_vec = self.nlp.vocab.vectors[word_hash]
return word_vec
@staticmethod
def _tag_of_token(token: Any) -> Text:
import spacy
if spacy.about.__version__ > "2" and token._.has("tag"):
return token._.get("tag")
else:
return token.tag_
| 27.5 | 76 | 0.550168 | import numpy as np
import spacy
from abc import ABCMeta, abstractmethod
from typing import Text, Optional, Any, Dict, Union
class Token:
def __init__(
self,
text: Text,
start: int,
end: Optional[int] = None,
data: Optional[Dict[Text, Any]] = None,
lemma: Optional[Text] = None,
) -> None:
self.text = text
self.start = start
self.end = end if end else start + len(text)
self.data = data if data else {}
self.lemma = lemma or text
def set(self, prop: Text, info: Any) -> None:
self.data[prop] = info
def get(self, prop: Text, default: Optional[Any] = None) -> Any:
return self.data.get(prop, default)
def __eq__(self, other):
if not isinstance(other, Token):
return NotImplemented
return (self.start, self.end, self.text, self.lemma) == (
other.start,
other.end,
other.text,
other.lemma,
)
def __lt__(self, other):
if not isinstance(other, Token):
return NotImplemented
return (self.start, self.end, self.text, self.lemma) < (
other.start,
other.end,
other.text,
other.lemma,
)
class Tokenizer:
__metaclass__ = ABCMeta
@abstractmethod
def tokenize(self, message: Dict, attribute: Text = "text") -> None:
raise NotImplementedError("should be implemented by subclass")
class SpacyTokenizer(Tokenizer):
def __init__(self, nlp=None):
self.nlp = nlp or spacy.blank("en")
def tokenize(self, message: Dict, attribute: Text = "text") -> None:
doc = message[attribute]
if attribute == "text":
doc = self.nlp(doc)
tokens = [
Token(
t.text,
t.idx,
lemma=t.lemma_,
data={"pos": self._tag_of_token(t), "shape": t.shape_},
)
for t in doc
]
for token in tokens:
vector = self.get_vector(token)
if vector is not None:
token.set("vector", vector)
idx = tokens[-1].end + 1
tokens.append(Token("__CLS__", idx))
message["tokens"] = tokens
def get_vector(self, token: Union[Text, Token]) -> Optional[np.ndarray]:
word_vec = None
if self.nlp.vocab.vectors_length > 0:
word = token.text if isinstance(token, Token) else token
word_hash = self.nlp.vocab.strings[word]
if word_hash in self.nlp.vocab.vectors:
word_vec = self.nlp.vocab.vectors[word_hash]
return word_vec
@staticmethod
def _tag_of_token(token: Any) -> Text:
import spacy
if spacy.about.__version__ > "2" and token._.has("tag"):
return token._.get("tag")
else:
return token.tag_
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.