source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
---|---|
mp_utils.py
|
#!/usr/bin/env python
'''Utilities for multiprocessing.
@author: Zach Hafen
@contact: [email protected]
@status: Development
'''
import multiprocessing as mp
import os
import pdb
import sys
from types import MethodType
# Python 2/3 compatible copyreg
try:
import copy_reg
except:
import copyreg as copy_reg
import galaxy_dive.utils.utilities as utilities
########################################################################
def apply_among_processors( f, all_args, n_processors=mp.cpu_count() ):
'''Takes a list of arguments and breaks it up and splits those chunks among the processors.
Note: This currently does not return anything, so it doesn't work for functions where you want f to return something.
However! It does work for shared memory objects, unlike Pool or parmap!
Args:
f (function) : The function to apply the args to.
all_args (list) : Args to apply. Format, [ (args1), (args2), ... ]
n_processors (int, optional) : Number of processors to use.
'''
def wrapped_f( args_chunk ):
for args in args_chunk:
f(*args)
chunked_args = utilities.chunk_list( all_args, n_processors )
ps = [ mp.Process( target=wrapped_f, args=(args_chunk,) ) for args_chunk in chunked_args ]
[ p.start() for p in ps ]
[ p.join() for p in ps ]
########################################################################
def mp_queue_to_list( queue, n_processors=mp.cpu_count() ):
'''Convert a multiprocessing.Queue object to a list, using multiple processors to parse it.
The list is unordered. It may also not work if the queue contains lists.
Args:
queue (mp.Queue) : The queue to turn into a list.
n_processors (int) : Number of processors to use.
'''
def process_queue( q, l ):
while True:
l.acquire()
if q.qsize() > 1:
popped1 = q.get()
popped2 = q.get()
l.release()
else:
l.release()
break
if not isinstance( popped1, list ):
popped1 = [ popped1, ]
if not isinstance( popped2, list ):
popped2 = [ popped2, ]
q.put( popped1 + popped2 )
lock = mp.Lock()
proc = [ mp.Process( target=process_queue, args=(queue,lock) ) for _ in range( n_processors ) ]
for p in proc:
p.daemon = True
p.start()
[ p.join() for p in proc ]
return queue.get()
########################################################################
'''The following is a version of Pool, written with classes in mind. It does not handle shared memory objects well.
https://stackoverflow.com/a/16071616
'''
def fun( f, q_in, q_out ):
while True:
i, x = q_in.get()
if i is None:
print( "PID {} finishing, PPID {}.".format( os.getpid(), os.getppid() ) )
break
q_out.put( (i, f( x )) )
def set_fun( f, q_in, q_out ):
res_proc = set()
while True:
i, x = q_in.get()
if i is None:
print( "PID {} finishing, PPID {}.".format( os.getpid(), os.getppid() ) )
q_out.put( res_proc )
break
res_proc = res_proc | f( x )
def parmap( f, X, n_processors=mp.cpu_count(), return_values=True, set_case=False, use_mp_queue_to_list=False ):
'''Parallel map, viable with classes.
Args:
f (function) : Function to map to.
X (list) : List of arguments to provide f
n_processors (int) : Number of processors to use.
return_values (bool) : If False, don't bother getting the results from the functions.
set_case (bool) : If this option is True, it assumes that f returns a set, and that results should be the
union of all those sets.
use_mp_queue_to_list (bool) : Experimental. If True, try to use mp_queue_to_list to convert the list.
Only works if set_case, currently.
Returns:
results (list or set) : The results.
'''
m = mp.Manager()
q_in = m.Queue(1)
q_out = m.Queue()
if set_case:
target_fun = set_fun
else:
target_fun = fun
proc = [ mp.Process( target=target_fun, args=(f, q_in, q_out) )
for _ in range( n_processors ) ]
for p in proc:
p.daemon = True
p.start()
sent = [ q_in.put( (i, x) ) for i, x in enumerate( X ) ]
[ q_in.put( (None, None) ) for _ in range( n_processors ) ]
print( "Getting results from queue. This could take a while..." )
# Store the results
if return_values:
if set_case:
if use_mp_queue_to_list:
res = mp_queue_to_list( q_out, n_processors )
else:
res = [ q_out.get() for _ in range( n_processors ) ]
[ p.join() for p in proc ]
return res
else:
res = [ q_out.get() for _ in range( len( sent ) ) ]
[ p.join() for p in proc ]
return [ x for i, x in sorted( res ) ]
else:
[ p.join() for p in proc ]
return
########################################################################
'''This section contains efforts to make classes pickleable, allowing multiprocessing.Pool to be used.'''
def _pickle_method(method):
'''The majority of this was taken from the following StackOverflow answer:
http://stackoverflow.com/questions/1816958/cant-pickle-type-instancemethod-when-using-pythons-multiprocessing-pool-ma/7309686#7309686
'''
func_name = method.im_func.__name__
obj = method.im_self
cls = method.im_class
return _unpickle_method, (func_name, obj, cls)
def _unpickle_method(func_name, obj, cls):
'''The majority of this was taken from the following StackOverflow answer:
http://stackoverflow.com/questions/1816958/cant-pickle-type-instancemethod-when-using-pythons-multiprocessing-pool-ma/7309686#7309686
'''
for cls in cls.mro():
try:
func = cls.__dict__[func_name]
except KeyError:
pass
else:
break
return func.__get__(obj, cls)
def make_classes_picklable():
'''The majority of this was taken from the following StackOverflow answer:
http://stackoverflow.com/questions/1816958/cant-pickle-type-instancemethod-when-using-pythons-multiprocessing-pool-ma/7309686#7309686
'''
copy_reg.pickle(MethodType, _pickle_method, _unpickle_method)
########################################################################
class ForkedPdb(pdb.Pdb):
"""A Pdb subclass that may be used
from a forked multiprocessing child
From https://stackoverflow.com/a/23654936
"""
def interaction(self, *args, **kwargs):
_stdin = sys.stdin
try:
sys.stdin = open('/dev/stdin')
pdb.Pdb.interaction(self, *args, **kwargs)
finally:
sys.stdin = _stdin
|
util.py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import binascii
import collections
import struct
import sys
from threading import Thread, Event
import six
from monasca_common.kafka_lib.common import BufferUnderflowError
def crc32(data):
return binascii.crc32(data) & 0xffffffff
def write_int_string(s):
if s is not None and not isinstance(s, six.binary_type):
raise TypeError('Expected "%s" to be bytes\n'
'data=%s' % (type(s), repr(s)))
if s is None:
return struct.pack('>i', -1)
else:
return struct.pack('>i%ds' % len(s), len(s), s)
def write_short_string(s):
if s is not None and not isinstance(s, six.binary_type):
raise TypeError('Expected "%s" to be bytes\n'
'data=%s' % (type(s), repr(s)))
if s is None:
return struct.pack('>h', -1)
elif len(s) > 32767 and sys.version_info < (2, 7):
# Python 2.6 issues a deprecation warning instead of a struct error
raise struct.error(len(s))
else:
return struct.pack('>h%ds' % len(s), len(s), s)
def read_short_string(data, cur):
if len(data) < cur + 2:
raise BufferUnderflowError("Not enough data left")
(strlen,) = struct.unpack('>h', data[cur:cur + 2])
if strlen == -1:
return None, cur + 2
cur += 2
if len(data) < cur + strlen:
raise BufferUnderflowError("Not enough data left")
out = data[cur:cur + strlen]
return out, cur + strlen
def read_int_string(data, cur):
if len(data) < cur + 4:
raise BufferUnderflowError(
"Not enough data left to read string len (%d < %d)" %
(len(data), cur + 4))
(strlen,) = struct.unpack('>i', data[cur:cur + 4])
if strlen == -1:
return None, cur + 4
cur += 4
if len(data) < cur + strlen:
raise BufferUnderflowError("Not enough data left")
out = data[cur:cur + strlen]
return out, cur + strlen
def relative_unpack(fmt, data, cur):
size = struct.calcsize(fmt)
if len(data) < cur + size:
raise BufferUnderflowError("Not enough data left")
out = struct.unpack(fmt, data[cur:cur + size])
return out, cur + size
def group_by_topic_and_partition(tuples):
out = collections.defaultdict(dict)
for t in tuples:
assert t.topic not in out or t.partition not in out[t.topic], \
'Duplicate {0}s for {1} {2}'.format(t.__class__.__name__, t.topic, t.partition)
out[t.topic][t.partition] = t
return out
def kafka_bytestring(s):
"""
Takes a string or bytes instance
Returns bytes, encoding strings in utf-8 as necessary
"""
if isinstance(s, six.binary_type):
return s
if isinstance(s, six.string_types):
return s.encode('utf-8')
raise TypeError(s)
class ReentrantTimer(object):
"""
A timer that can be restarted, unlike threading.Timer
(although this uses threading.Timer)
Arguments:
t: timer interval in milliseconds
fn: a callable to invoke
args: tuple of args to be passed to function
kwargs: keyword arguments to be passed to function
"""
def __init__(self, t, fn, *args, **kwargs):
if t <= 0:
raise ValueError('Invalid timeout value')
if not callable(fn):
raise ValueError('fn must be callable')
self.thread = None
self.t = t / 1000.0
self.fn = fn
self.args = args
self.kwargs = kwargs
self.active = None
def _timer(self, active):
# python2.6 Event.wait() always returns None
# python2.7 and greater returns the flag value (true/false)
# we want the flag value, so add an 'or' here for python2.6
# this is redundant for later python versions (FLAG OR FLAG == FLAG)
while not (active.wait(self.t) or active.is_set()):
self.fn(*self.args, **self.kwargs)
def start(self):
if self.thread is not None:
self.stop()
self.active = Event()
self.thread = Thread(target=self._timer, args=(self.active,))
self.thread.daemon = True # So the app exits when main thread exits
self.thread.start()
def stop(self):
if self.thread is None:
return
self.active.set()
self.thread.join(self.t + 1)
# noinspection PyAttributeOutsideInit
self.timer = None
self.fn = None
def __del__(self):
self.stop()
|
load-data.py
|
#!/usr/bin/env impala-python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# This script is used to load the proper datasets for the specified workloads. It loads
# all data via Hive except for parquet data which needs to be loaded via Impala.
# Most ddl commands are executed by Impala.
import collections
import getpass
import logging
import os
import re
import sqlparse
import subprocess
import sys
import tempfile
import time
import traceback
from itertools import product
from optparse import OptionParser
from Queue import Queue
from tests.beeswax.impala_beeswax import *
from threading import Thread
logging.basicConfig()
LOG = logging.getLogger('load-data.py')
LOG.setLevel(logging.DEBUG)
parser = OptionParser()
parser.add_option("-e", "--exploration_strategy", dest="exploration_strategy",
default="core",
help="The exploration strategy for schema gen: 'core', "\
"'pairwise', or 'exhaustive'")
parser.add_option("--hive_warehouse_dir", dest="hive_warehouse_dir",
default="/test-warehouse",
help="The HDFS path to the base Hive test warehouse directory")
parser.add_option("-w", "--workloads", dest="workloads",
help="Comma-separated list of workloads to load data for. If 'all' is "\
"specified then data for all workloads is loaded.")
parser.add_option("-s", "--scale_factor", dest="scale_factor", default="",
help="An optional scale factor to generate the schema for")
parser.add_option("-f", "--force_reload", dest="force_reload", action="store_true",
default=False, help='Skips HDFS exists check and reloads all tables')
parser.add_option("--impalad", dest="impalad", default="localhost:21000",
help="Impala daemon to connect to")
parser.add_option("--hive_hs2_hostport", dest="hive_hs2_hostport",
default="localhost:11050",
help="HS2 host:Port to issue Hive queries against using beeline")
parser.add_option("--table_names", dest="table_names", default=None,
help="Only load the specified tables - specified as a comma-seperated "\
"list of base table names")
parser.add_option("--table_formats", dest="table_formats", default=None,
help="Override the test vectors and load using the specified table "\
"formats. Ex. --table_formats=seq/snap/block,text/none")
parser.add_option("--hdfs_namenode", dest="hdfs_namenode", default="localhost:20500",
help="HDFS name node for Avro schema URLs, default localhost:20500")
parser.add_option("--workload_dir", dest="workload_dir",
default=os.environ['IMPALA_WORKLOAD_DIR'],
help="Directory that contains Impala workloads")
parser.add_option("--dataset_dir", dest="dataset_dir",
default=os.environ['IMPALA_DATASET_DIR'],
help="Directory that contains Impala datasets")
parser.add_option("--use_kerberos", action="store_true", default=False,
help="Load data on a kerberized cluster.")
parser.add_option("--principal", default=None, dest="principal",
help="Kerberos service principal, required if --use_kerberos is set")
options, args = parser.parse_args()
SQL_OUTPUT_DIR = os.environ['IMPALA_DATA_LOADING_SQL_DIR']
WORKLOAD_DIR = options.workload_dir
DATASET_DIR = options.dataset_dir
TESTDATA_BIN_DIR = os.path.join(os.environ['IMPALA_HOME'], 'testdata/bin')
AVRO_SCHEMA_DIR = "avro_schemas"
GENERATE_SCHEMA_CMD = "generate-schema-statements.py --exploration_strategy=%s "\
"--workload=%s --scale_factor=%s --verbose"
# Load data using Hive's beeline because the Hive shell has regressed (HIVE-5515).
# The Hive shell is stateful, meaning that certain series of actions lead to problems.
# Examples of problems due to the statefullness of the Hive shell:
# - Creating an HBase table changes the replication factor to 1 for subsequent LOADs.
# - INSERTs into an HBase table fail if they are the first stmt executed in a session.
# However, beeline itself also has bugs. For example, inserting a NULL literal into
# a string-typed column leads to an NPE. We work around these problems by using LOAD from
# a datafile instead of doing INSERTs.
HIVE_CMD = os.path.join(os.environ['HIVE_HOME'], 'bin/beeline')
hive_auth = "auth=none"
if options.use_kerberos:
if not options.principal:
print "--principal is required when --use_kerberos is specified"
exit(1)
hive_auth = "principal=" + options.principal
HIVE_ARGS = '-n %s -u "jdbc:hive2://%s/default;%s" --verbose=true'\
% (getpass.getuser(), options.hive_hs2_hostport, hive_auth)
# When HiveServer2 is configured to use "local" mode (i.e., MR jobs are run
# in-process rather than on YARN), Hadoop's LocalDistributedCacheManager has a
# race, wherein it tires to localize jars into
# /tmp/hadoop-$USER/mapred/local/<millis>. Two simultaneous Hive queries
# against HS2 can conflict here. Weirdly LocalJobRunner handles a similar issue
# (with the staging directory) by appending a random number. To over come this,
# in the case that HS2 is on the local machine (which we conflate with also
# running MR jobs locally), we move the temporary directory into a unique
# directory via configuration. This block can be removed when
# https://issues.apache.org/jira/browse/MAPREDUCE-6441 is resolved.
# A similar workaround is used in tests/common/impala_test_suite.py.
if options.hive_hs2_hostport.startswith("localhost:"):
HIVE_ARGS += ' --hiveconf "mapreduce.cluster.local.dir=%s"' % (tempfile.mkdtemp(
prefix="impala-data-load-"))
HADOOP_CMD = os.path.join(os.environ['HADOOP_HOME'], 'bin/hadoop')
def available_workloads(workload_dir):
return [subdir for subdir in os.listdir(workload_dir)
if os.path.isdir(os.path.join(workload_dir, subdir))]
def validate_workloads(all_workloads, workloads):
for workload in workloads:
if workload not in all_workloads:
print 'Workload \'%s\' not found in workload directory' % workload
print 'Available workloads: ' + ', '.join(all_workloads)
sys.exit(1)
def exec_cmd(cmd, error_msg, exit_on_error=True):
ret_val = -1
try:
ret_val = subprocess.call(cmd, shell=True)
except Exception as e:
error_msg = "%s: %s" % (error_msg, str(e))
finally:
if ret_val != 0:
print error_msg
if exit_on_error: sys.exit(ret_val)
return ret_val
def exec_hive_query_from_file(file_name):
if not os.path.exists(file_name): return
hive_cmd = "%s %s -f %s" % (HIVE_CMD, HIVE_ARGS, file_name)
print 'Executing Hive Command: %s' % hive_cmd
exec_cmd(hive_cmd, 'Error executing file from Hive: ' + file_name)
def exec_hbase_query_from_file(file_name):
if not os.path.exists(file_name): return
hbase_cmd = "hbase shell %s" % file_name
print 'Executing HBase Command: %s' % hbase_cmd
exec_cmd(hbase_cmd, 'Error executing hbase create commands')
# KERBEROS TODO: fails when kerberized and impalad principal isn't "impala"
def exec_impala_query_from_file(file_name):
"""Execute each query in an Impala query file individually"""
is_success = True
impala_client = ImpalaBeeswaxClient(options.impalad, use_kerberos=options.use_kerberos)
try:
impala_client.connect()
with open(file_name, 'r+') as query_file:
queries = sqlparse.split(query_file.read())
for query in queries:
query = sqlparse.format(query.rstrip(';'), strip_comments=True)
print '(%s):\n%s\n' % (file_name, query.strip())
if query.strip() != "":
result = impala_client.execute(query)
except Exception as e:
print "Data Loading from Impala failed with error: %s" % str(e)
traceback.print_exc()
is_success = False
finally:
impala_client.close_connection()
return is_success
def exec_bash_script(file_name):
bash_cmd = "bash %s" % file_name
print 'Executing Bash Command: ' + bash_cmd
exec_cmd(bash_cmd, 'Error bash script: ' + file_name)
def run_dataset_preload(dataset):
"""Execute a preload script if present in dataset directory. E.g. to generate data
before loading"""
dataset_preload_script = os.path.join(DATASET_DIR, dataset, "preload")
if os.path.exists(dataset_preload_script):
print("Running preload script for " + dataset)
if options.scale_factor > 1:
dataset_preload_script += " " + str(options.scale_factor)
exec_cmd(dataset_preload_script, "Error executing preload script for " + dataset,
exit_on_error=True)
def generate_schema_statements(workload):
generate_cmd = GENERATE_SCHEMA_CMD % (options.exploration_strategy, workload,
options.scale_factor)
if options.table_names:
generate_cmd += " --table_names=%s" % options.table_names
if options.force_reload:
generate_cmd += " --force_reload"
if options.table_formats:
generate_cmd += " --table_formats=%s" % options.table_formats
if options.hive_warehouse_dir is not None:
generate_cmd += " --hive_warehouse_dir=%s" % options.hive_warehouse_dir
if options.hdfs_namenode is not None:
generate_cmd += " --hdfs_namenode=%s" % options.hdfs_namenode
generate_cmd += " --backend=%s" % options.impalad
print 'Executing Generate Schema Command: ' + generate_cmd
schema_cmd = os.path.join(TESTDATA_BIN_DIR, generate_cmd)
error_msg = 'Error generating schema statements for workload: ' + workload
exec_cmd(schema_cmd, error_msg)
def get_dataset_for_workload(workload):
dimension_file_name = os.path.join(WORKLOAD_DIR, workload,
'%s_dimensions.csv' % workload)
if not os.path.isfile(dimension_file_name):
print 'Dimension file not found: ' + dimension_file_name
sys.exit(1)
with open(dimension_file_name, 'rb') as input_file:
match = re.search('dataset:\s*([\w\-\.]+)', input_file.read())
if match:
return match.group(1)
else:
print 'Dimension file does not contain dataset for workload \'%s\'' % (workload)
sys.exit(1)
def copy_avro_schemas_to_hdfs(schemas_dir):
"""Recursively copies all of schemas_dir to the test warehouse."""
if not os.path.exists(schemas_dir):
print 'Avro schema dir (%s) does not exist. Skipping copy to HDFS.' % schemas_dir
return
exec_hadoop_fs_cmd("-mkdir -p " + options.hive_warehouse_dir)
exec_hadoop_fs_cmd("-put -f %s %s/" % (schemas_dir, options.hive_warehouse_dir))
def exec_hadoop_fs_cmd(args, exit_on_error=True):
cmd = "%s fs %s" % (HADOOP_CMD, args)
print "Executing Hadoop command: " + cmd
exec_cmd(cmd, "Error executing Hadoop command, exiting",
exit_on_error=exit_on_error)
def exec_impala_query_from_file_parallel(query_files):
# Get the name of the query file that loads the base tables, if it exists.
# TODO: Find a better way to detect the file that loads the base tables.
create_base_table_file = next((q for q in query_files if 'text' in q), None)
if create_base_table_file:
is_success = exec_impala_query_from_file(create_base_table_file)
query_files.remove(create_base_table_file)
# If loading the base tables failed, exit with a non zero error code.
if not is_success: sys.exit(1)
if not query_files: return
threads = []
result_queue = Queue()
for query_file in query_files:
thread = Thread(target=lambda x: result_queue.put(exec_impala_query_from_file(x)),
args=[query_file])
thread.daemon = True
threads.append(thread)
thread.start()
# Keep looping until the number of results retrieved is the same as the number of
# threads spawned, or until a data loading query fails. result_queue.get() will
# block until a result is available in the queue.
num_fetched_results = 0
while num_fetched_results < len(threads):
success = result_queue.get()
num_fetched_results += 1
if not success: sys.exit(1)
# There is a small window where a thread may still be alive even if all the threads have
# finished putting their results in the queue.
for thread in threads: thread.join()
if __name__ == "__main__":
# Having the actual command line at the top of each data-load-* log can help
# when debugging dataload issues.
#
LOG.debug(' '.join(sys.argv))
all_workloads = available_workloads(WORKLOAD_DIR)
workloads = []
if options.workloads is None:
print "At least one workload name must be specified."
parser.print_help()
sys.exit(1)
elif options.workloads == 'all':
print 'Loading data for all workloads.'
workloads = all_workloads
else:
workloads = options.workloads.split(",")
validate_workloads(all_workloads, workloads)
print 'Starting data load for the following workloads: ' + ', '.join(workloads)
loading_time_map = collections.defaultdict(float)
for workload in workloads:
start_time = time.time()
dataset = get_dataset_for_workload(workload)
run_dataset_preload(dataset)
generate_schema_statements(workload)
sql_dir = os.path.join(SQL_OUTPUT_DIR, dataset)
assert os.path.isdir(sql_dir),\
("Could not find the generated SQL files for loading dataset '%s'.\
\nExpected to find the SQL files in: %s" % (dataset, sql_dir))
os.chdir(os.path.join(SQL_OUTPUT_DIR, dataset))
copy_avro_schemas_to_hdfs(AVRO_SCHEMA_DIR)
dataset_dir_contents = os.listdir(os.getcwd())
load_file_substr = "%s-%s" % (workload, options.exploration_strategy)
# Data loading with Impala is done in parallel, each file format has a separate query
# file.
create_filename = 'create-%s-impala-generated' % load_file_substr
load_filename = 'load-%s-impala-generated' % load_file_substr
impala_create_files = [f for f in dataset_dir_contents if create_filename in f]
impala_load_files = [f for f in dataset_dir_contents if load_filename in f]
# Execute the data loading scripts.
# Creating tables in Impala has no dependencies, so we execute them first.
# HBase table inserts are done via hive, so the hbase tables need to be created before
# running the hive script. Some of the Impala inserts depend on hive tables,
# so they're done at the end. Finally, the Hbase Tables that have been filled with data
# need to be flushed.
exec_impala_query_from_file_parallel(impala_create_files)
exec_hbase_query_from_file('load-%s-hbase-generated.create' % load_file_substr)
exec_hive_query_from_file('load-%s-hive-generated.sql' % load_file_substr)
exec_hbase_query_from_file('post-load-%s-hbase-generated.sql' % load_file_substr)
# Invalidate so that Impala sees the loads done by Hive before loading Parquet/Kudu
# Note: This only invalidates tables for this workload.
invalidate_sql_file = 'invalidate-{0}-impala-generated.sql'.format(load_file_substr)
if impala_load_files: exec_impala_query_from_file(invalidate_sql_file)
exec_impala_query_from_file_parallel(impala_load_files)
# Final invalidate for this workload
exec_impala_query_from_file(invalidate_sql_file)
loading_time_map[workload] = time.time() - start_time
total_time = 0.0
for workload, load_time in loading_time_map.iteritems():
total_time += load_time
print 'Data loading for workload \'%s\' completed in: %.2fs'\
% (workload, load_time)
print 'Total load time: %.2fs\n' % total_time
|
worker_test.py
|
# -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import email.parser
import functools
import logging
import os
import shutil
import signal
import tempfile
import threading
import time
import psutil
from helpers import (unittest, with_config, skipOnTravis, LuigiTestCase,
temporary_unloaded_module)
import luigi.notifications
import luigi.task_register
import luigi.worker
import mock
from luigi import ExternalTask, RemoteScheduler, Task, Event
from luigi.mock import MockTarget, MockFileSystem
from luigi.scheduler import Scheduler
from luigi.worker import Worker
from luigi.rpc import RPCError
from luigi.cmdline import luigi_run
luigi.notifications.DEBUG = True
class DummyTask(Task):
def __init__(self, *args, **kwargs):
super(DummyTask, self).__init__(*args, **kwargs)
self.has_run = False
def complete(self):
return self.has_run
def run(self):
logging.debug("%s - setting has_run", self)
self.has_run = True
class DynamicDummyTask(Task):
p = luigi.Parameter()
def output(self):
return luigi.LocalTarget(self.p)
def run(self):
with self.output().open('w') as f:
f.write('Done!')
time.sleep(0.5) # so we can benchmark & see if parallelization works
class DynamicDummyTaskWithNamespace(DynamicDummyTask):
task_namespace = 'banana'
class DynamicRequires(Task):
p = luigi.Parameter()
use_banana_task = luigi.BoolParameter(default=False)
def output(self):
return luigi.LocalTarget(os.path.join(self.p, 'parent'))
def run(self):
if self.use_banana_task:
task_cls = DynamicDummyTaskWithNamespace
else:
task_cls = DynamicDummyTask
dummy_targets = yield [task_cls(os.path.join(self.p, str(i)))
for i in range(5)]
dummy_targets += yield [task_cls(os.path.join(self.p, str(i)))
for i in range(5, 7)]
with self.output().open('w') as f:
for i, d in enumerate(dummy_targets):
for line in d.open('r'):
print('%d: %s' % (i, line.strip()), file=f)
class DynamicRequiresOtherModule(Task):
p = luigi.Parameter()
def output(self):
return luigi.LocalTarget(os.path.join(self.p, 'baz'))
def run(self):
import other_module
other_target_foo = yield other_module.OtherModuleTask(os.path.join(self.p, 'foo')) # NOQA
other_target_bar = yield other_module.OtherModuleTask(os.path.join(self.p, 'bar')) # NOQA
with self.output().open('w') as f:
f.write('Done!')
class DummyErrorTask(Task):
retry_index = 0
def run(self):
self.retry_index += 1
raise Exception("Retry index is %s for %s" % (self.retry_index, self.task_family))
class WorkerTest(LuigiTestCase):
def run(self, result=None):
self.sch = Scheduler(retry_delay=100, remove_delay=1000, worker_disconnect_delay=10, stable_done_cooldown_secs=0)
self.time = time.time
with Worker(scheduler=self.sch, worker_id='X') as w, Worker(scheduler=self.sch, worker_id='Y') as w2:
self.w = w
self.w2 = w2
super(WorkerTest, self).run(result)
if time.time != self.time:
time.time = self.time
def setTime(self, t):
time.time = lambda: t
def test_dep(self):
class A(Task):
def run(self):
self.has_run = True
def complete(self):
return self.has_run
a = A()
class B(Task):
def requires(self):
return a
def run(self):
self.has_run = True
def complete(self):
return self.has_run
b = B()
a.has_run = False
b.has_run = False
self.assertTrue(self.w.add(b))
self.assertTrue(self.w.run())
self.assertTrue(a.has_run)
self.assertTrue(b.has_run)
def test_external_dep(self):
class A(ExternalTask):
def complete(self):
return False
a = A()
class B(Task):
def requires(self):
return a
def run(self):
self.has_run = True
def complete(self):
return self.has_run
b = B()
a.has_run = False
b.has_run = False
self.assertTrue(self.w.add(b))
self.assertTrue(self.w.run())
self.assertFalse(a.has_run)
self.assertFalse(b.has_run)
def test_externalized_dep(self):
class A(Task):
has_run = False
def run(self):
self.has_run = True
def complete(self):
return self.has_run
a = A()
class B(A):
def requires(self):
return luigi.task.externalize(a)
b = B()
self.assertTrue(self.w.add(b))
self.assertTrue(self.w.run())
self.assertFalse(a.has_run)
self.assertFalse(b.has_run)
def test_legacy_externalized_dep(self):
class A(Task):
has_run = False
def run(self):
self.has_run = True
def complete(self):
return self.has_run
a = A()
a.run = NotImplemented
class B(A):
def requires(self):
return a
b = B()
self.assertTrue(self.w.add(b))
self.assertTrue(self.w.run())
self.assertFalse(a.has_run)
self.assertFalse(b.has_run)
def test_type_error_in_tracking_run_deprecated(self):
class A(Task):
num_runs = 0
def complete(self):
return False
def run(self, tracking_url_callback=None):
self.num_runs += 1
raise TypeError('bad type')
a = A()
self.assertTrue(self.w.add(a))
self.assertFalse(self.w.run())
# Should only run and fail once, not retry because of the type error
self.assertEqual(1, a.num_runs)
def test_tracking_url(self):
tracking_url = 'http://test_url.com/'
class A(Task):
has_run = False
def complete(self):
return self.has_run
def run(self):
self.set_tracking_url(tracking_url)
self.has_run = True
a = A()
self.assertTrue(self.w.add(a))
self.assertTrue(self.w.run())
tasks = self.sch.task_list('DONE', '')
self.assertEqual(1, len(tasks))
self.assertEqual(tracking_url, tasks[a.task_id]['tracking_url'])
def test_fail(self):
class CustomException(BaseException):
def __init__(self, msg):
self.msg = msg
class A(Task):
def run(self):
self.has_run = True
raise CustomException('bad things')
def complete(self):
return self.has_run
a = A()
class B(Task):
def requires(self):
return a
def run(self):
self.has_run = True
def complete(self):
return self.has_run
b = B()
a.has_run = False
b.has_run = False
self.assertTrue(self.w.add(b))
self.assertFalse(self.w.run())
self.assertTrue(a.has_run)
self.assertFalse(b.has_run)
def test_unknown_dep(self):
# see related test_remove_dep test (grep for it)
class A(ExternalTask):
def complete(self):
return False
class C(Task):
def complete(self):
return True
def get_b(dep):
class B(Task):
def requires(self):
return dep
def run(self):
self.has_run = True
def complete(self):
return False
b = B()
b.has_run = False
return b
b_a = get_b(A())
b_c = get_b(C())
self.assertTrue(self.w.add(b_a))
# So now another worker goes in and schedules C -> B
# This should remove the dep A -> B but will screw up the first worker
self.assertTrue(self.w2.add(b_c))
self.assertFalse(self.w.run()) # should not run anything - the worker should detect that A is broken
self.assertFalse(b_a.has_run)
# not sure what should happen??
# self.w2.run() # should run B since C is fulfilled
# self.assertTrue(b_c.has_run)
def test_unfulfilled_dep(self):
class A(Task):
def complete(self):
return self.done
def run(self):
self.done = True
def get_b(a):
class B(A):
def requires(self):
return a
b = B()
b.done = False
a.done = True
return b
a = A()
b = get_b(a)
self.assertTrue(self.w.add(b))
a.done = False
self.w.run()
self.assertTrue(a.complete())
self.assertTrue(b.complete())
def test_check_unfulfilled_deps_config(self):
class A(Task):
i = luigi.IntParameter()
def __init__(self, *args, **kwargs):
super(A, self).__init__(*args, **kwargs)
self.complete_count = 0
self.has_run = False
def complete(self):
self.complete_count += 1
return self.has_run
def run(self):
self.has_run = True
class B(A):
def requires(self):
return A(i=self.i)
# test the enabled features
with Worker(scheduler=self.sch, worker_id='1') as w:
w._config.check_unfulfilled_deps = True
a1 = A(i=1)
b1 = B(i=1)
self.assertTrue(w.add(b1))
self.assertEqual(a1.complete_count, 1)
self.assertEqual(b1.complete_count, 1)
w.run()
self.assertTrue(a1.complete())
self.assertTrue(b1.complete())
self.assertEqual(a1.complete_count, 3)
self.assertEqual(b1.complete_count, 2)
# test the disabled features
with Worker(scheduler=self.sch, worker_id='2') as w:
w._config.check_unfulfilled_deps = False
a2 = A(i=2)
b2 = B(i=2)
self.assertTrue(w.add(b2))
self.assertEqual(a2.complete_count, 1)
self.assertEqual(b2.complete_count, 1)
w.run()
self.assertTrue(a2.complete())
self.assertTrue(b2.complete())
self.assertEqual(a2.complete_count, 2)
self.assertEqual(b2.complete_count, 2)
def test_gets_missed_work(self):
class A(Task):
done = False
def complete(self):
return self.done
def run(self):
self.done = True
a = A()
self.assertTrue(self.w.add(a))
# simulate a missed get_work response
self.assertEqual(a.task_id, self.sch.get_work(worker='X')['task_id'])
self.assertTrue(self.w.run())
self.assertTrue(a.complete())
def test_avoid_infinite_reschedule(self):
class A(Task):
def complete(self):
return False
class B(Task):
def complete(self):
return False
def requires(self):
return A()
self.assertTrue(self.w.add(B()))
self.assertFalse(self.w.run())
def test_fails_registering_signal(self):
with mock.patch('luigi.worker.signal', spec=['signal']):
# mock will raise an attribute error getting signal.SIGUSR1
Worker()
def test_allow_reschedule_with_many_missing_deps(self):
class A(Task):
""" Task that must run twice to succeed """
i = luigi.IntParameter()
runs = 0
def complete(self):
return self.runs >= 2
def run(self):
self.runs += 1
class B(Task):
done = False
def requires(self):
return map(A, range(20))
def complete(self):
return self.done
def run(self):
self.done = True
b = B()
w = Worker(scheduler=self.sch, worker_id='X', max_reschedules=1)
self.assertTrue(w.add(b))
self.assertFalse(w.run())
# For b to be done, we must have rescheduled its dependencies to run them twice
self.assertTrue(b.complete())
self.assertTrue(all(a.complete() for a in b.deps()))
def test_interleaved_workers(self):
class A(DummyTask):
pass
a = A()
class B(DummyTask):
def requires(self):
return a
ExternalB = luigi.task.externalize(B)
b = B()
eb = ExternalB()
self.assertEqual(str(eb), "B()")
sch = Scheduler(retry_delay=100, remove_delay=1000, worker_disconnect_delay=10)
with Worker(scheduler=sch, worker_id='X') as w, Worker(scheduler=sch, worker_id='Y') as w2:
self.assertTrue(w.add(b))
self.assertTrue(w2.add(eb))
logging.debug("RUNNING BROKEN WORKER")
self.assertTrue(w2.run())
self.assertFalse(a.complete())
self.assertFalse(b.complete())
logging.debug("RUNNING FUNCTIONAL WORKER")
self.assertTrue(w.run())
self.assertTrue(a.complete())
self.assertTrue(b.complete())
def test_interleaved_workers2(self):
# two tasks without dependencies, one external, one not
class B(DummyTask):
pass
ExternalB = luigi.task.externalize(B)
b = B()
eb = ExternalB()
self.assertEqual(str(eb), "B()")
sch = Scheduler(retry_delay=100, remove_delay=1000, worker_disconnect_delay=10)
with Worker(scheduler=sch, worker_id='X') as w, Worker(scheduler=sch, worker_id='Y') as w2:
self.assertTrue(w2.add(eb))
self.assertTrue(w.add(b))
self.assertTrue(w2.run())
self.assertFalse(b.complete())
self.assertTrue(w.run())
self.assertTrue(b.complete())
def test_interleaved_workers3(self):
class A(DummyTask):
def run(self):
logging.debug('running A')
time.sleep(0.1)
super(A, self).run()
a = A()
class B(DummyTask):
def requires(self):
return a
def run(self):
logging.debug('running B')
super(B, self).run()
b = B()
sch = Scheduler(retry_delay=100, remove_delay=1000, worker_disconnect_delay=10)
with Worker(scheduler=sch, worker_id='X', keep_alive=True, count_uniques=True) as w:
with Worker(scheduler=sch, worker_id='Y', keep_alive=True, count_uniques=True, wait_interval=0.1, wait_jitter=0.05) as w2:
self.assertTrue(w.add(a))
self.assertTrue(w2.add(b))
threading.Thread(target=w.run).start()
self.assertTrue(w2.run())
self.assertTrue(a.complete())
self.assertTrue(b.complete())
def test_die_for_non_unique_pending(self):
class A(DummyTask):
def run(self):
logging.debug('running A')
time.sleep(0.1)
super(A, self).run()
a = A()
class B(DummyTask):
def requires(self):
return a
def run(self):
logging.debug('running B')
super(B, self).run()
b = B()
sch = Scheduler(retry_delay=100, remove_delay=1000, worker_disconnect_delay=10)
with Worker(scheduler=sch, worker_id='X', keep_alive=True, count_uniques=True) as w:
with Worker(scheduler=sch, worker_id='Y', keep_alive=True, count_uniques=True, wait_interval=0.1, wait_jitter=0.05) as w2:
self.assertTrue(w.add(b))
self.assertTrue(w2.add(b))
self.assertEqual(w._get_work()[0], a.task_id)
self.assertTrue(w2.run())
self.assertFalse(a.complete())
self.assertFalse(b.complete())
def test_complete_exception(self):
"Tests that a task is still scheduled if its sister task crashes in the complete() method"
class A(DummyTask):
def complete(self):
raise Exception("doh")
a = A()
class C(DummyTask):
pass
c = C()
class B(DummyTask):
def requires(self):
return a, c
b = B()
sch = Scheduler(retry_delay=100, remove_delay=1000, worker_disconnect_delay=10)
with Worker(scheduler=sch, worker_id="foo") as w:
self.assertFalse(w.add(b))
self.assertTrue(w.run())
self.assertFalse(b.has_run)
self.assertTrue(c.has_run)
self.assertFalse(a.has_run)
def test_requires_exception(self):
class A(DummyTask):
def requires(self):
raise Exception("doh")
a = A()
class D(DummyTask):
pass
d = D()
class C(DummyTask):
def requires(self):
return d
c = C()
class B(DummyTask):
def requires(self):
return c, a
b = B()
sch = Scheduler(retry_delay=100, remove_delay=1000, worker_disconnect_delay=10)
with Worker(scheduler=sch, worker_id="foo") as w:
self.assertFalse(w.add(b))
self.assertTrue(w.run())
self.assertFalse(b.has_run)
self.assertTrue(c.has_run)
self.assertTrue(d.has_run)
self.assertFalse(a.has_run)
def test_run_csv_batch_job(self):
completed = set()
class CsvBatchJob(luigi.Task):
values = luigi.parameter.Parameter(batch_method=','.join)
has_run = False
def run(self):
completed.update(self.values.split(','))
self.has_run = True
def complete(self):
return all(value in completed for value in self.values.split(','))
tasks = [CsvBatchJob(str(i)) for i in range(10)]
for task in tasks:
self.assertTrue(self.w.add(task))
self.assertTrue(self.w.run())
for task in tasks:
self.assertTrue(task.complete())
self.assertFalse(task.has_run)
def test_run_max_batch_job(self):
completed = set()
class MaxBatchJob(luigi.Task):
value = luigi.IntParameter(batch_method=max)
has_run = False
def run(self):
completed.add(self.value)
self.has_run = True
def complete(self):
return any(self.value <= ran for ran in completed)
tasks = [MaxBatchJob(i) for i in range(10)]
for task in tasks:
self.assertTrue(self.w.add(task))
self.assertTrue(self.w.run())
for task in tasks:
self.assertTrue(task.complete())
# only task number 9 should run
self.assertFalse(task.has_run and task.value < 9)
def test_run_batch_job_unbatched(self):
completed = set()
class MaxNonBatchJob(luigi.Task):
value = luigi.IntParameter(batch_method=max)
has_run = False
batchable = False
def run(self):
completed.add(self.value)
self.has_run = True
def complete(self):
return self.value in completed
tasks = [MaxNonBatchJob((i,)) for i in range(10)]
for task in tasks:
self.assertTrue(self.w.add(task))
self.assertTrue(self.w.run())
for task in tasks:
self.assertTrue(task.complete())
self.assertTrue(task.has_run)
def test_run_batch_job_limit_batch_size(self):
completed = set()
runs = []
class CsvLimitedBatchJob(luigi.Task):
value = luigi.parameter.Parameter(batch_method=','.join)
has_run = False
max_batch_size = 4
def run(self):
completed.update(self.value.split(','))
runs.append(self)
def complete(self):
return all(value in completed for value in self.value.split(','))
tasks = [CsvLimitedBatchJob(str(i)) for i in range(11)]
for task in tasks:
self.assertTrue(self.w.add(task))
self.assertTrue(self.w.run())
for task in tasks:
self.assertTrue(task.complete())
self.assertEqual(3, len(runs))
def test_fail_max_batch_job(self):
class MaxBatchFailJob(luigi.Task):
value = luigi.IntParameter(batch_method=max)
has_run = False
def run(self):
self.has_run = True
assert False
def complete(self):
return False
tasks = [MaxBatchFailJob(i) for i in range(10)]
for task in tasks:
self.assertTrue(self.w.add(task))
self.assertFalse(self.w.run())
for task in tasks:
# only task number 9 should run
self.assertFalse(task.has_run and task.value < 9)
self.assertEqual({task.task_id for task in tasks}, set(self.sch.task_list('FAILED', '')))
def test_gracefully_handle_batch_method_failure(self):
class BadBatchMethodTask(DummyTask):
priority = 10
batch_int_param = luigi.IntParameter(batch_method=int.__add__) # should be sum
bad_tasks = [BadBatchMethodTask(i) for i in range(5)]
good_tasks = [DummyTask()]
all_tasks = good_tasks + bad_tasks
self.assertFalse(any(task.complete() for task in all_tasks))
worker = Worker(scheduler=Scheduler(retry_count=1), keep_alive=True)
for task in all_tasks:
self.assertTrue(worker.add(task))
self.assertFalse(worker.run())
self.assertFalse(any(task.complete() for task in bad_tasks))
# we only get to run the good task if the bad task failures were handled gracefully
self.assertTrue(all(task.complete() for task in good_tasks))
def test_post_error_message_for_failed_batch_methods(self):
class BadBatchMethodTask(DummyTask):
batch_int_param = luigi.IntParameter(batch_method=int.__add__) # should be sum
tasks = [BadBatchMethodTask(1), BadBatchMethodTask(2)]
for task in tasks:
self.assertTrue(self.w.add(task))
self.assertFalse(self.w.run())
failed_ids = set(self.sch.task_list('FAILED', ''))
self.assertEqual({task.task_id for task in tasks}, failed_ids)
self.assertTrue(all(self.sch.fetch_error(task_id)['error'] for task_id in failed_ids))
class WorkerKeepAliveTests(LuigiTestCase):
def setUp(self):
self.sch = Scheduler()
super(WorkerKeepAliveTests, self).setUp()
def _worker_keep_alive_test(self, first_should_live, second_should_live, task_status=None, **worker_args):
worker_args.update({
'scheduler': self.sch,
'worker_processes': 0,
'wait_interval': 0.01,
'wait_jitter': 0.0,
})
w1 = Worker(worker_id='w1', **worker_args)
w2 = Worker(worker_id='w2', **worker_args)
with w1 as worker1, w2 as worker2:
worker1.add(DummyTask())
t1 = threading.Thread(target=worker1.run)
t1.start()
worker2.add(DummyTask())
t2 = threading.Thread(target=worker2.run)
t2.start()
if task_status:
self.sch.add_task(worker='DummyWorker', task_id=DummyTask().task_id, status=task_status)
# allow workers to run their get work loops a few times
time.sleep(0.1)
try:
self.assertEqual(first_should_live, t1.is_alive())
self.assertEqual(second_should_live, t2.is_alive())
finally:
# mark the task done so the worker threads will die
self.sch.add_task(worker='DummyWorker', task_id=DummyTask().task_id, status='DONE')
t1.join()
t2.join()
def test_no_keep_alive(self):
self._worker_keep_alive_test(
first_should_live=False,
second_should_live=False,
)
def test_keep_alive(self):
self._worker_keep_alive_test(
first_should_live=True,
second_should_live=True,
keep_alive=True,
)
def test_keep_alive_count_uniques(self):
self._worker_keep_alive_test(
first_should_live=False,
second_should_live=False,
keep_alive=True,
count_uniques=True,
)
def test_keep_alive_count_last_scheduled(self):
self._worker_keep_alive_test(
first_should_live=False,
second_should_live=True,
keep_alive=True,
count_last_scheduled=True,
)
def test_keep_alive_through_failure(self):
self._worker_keep_alive_test(
first_should_live=True,
second_should_live=True,
keep_alive=True,
task_status='FAILED',
)
def test_do_not_keep_alive_through_disable(self):
self._worker_keep_alive_test(
first_should_live=False,
second_should_live=False,
keep_alive=True,
task_status='DISABLED',
)
class WorkerInterruptedTest(unittest.TestCase):
def setUp(self):
self.sch = Scheduler(retry_delay=100, remove_delay=1000, worker_disconnect_delay=10)
requiring_sigusr = unittest.skipUnless(hasattr(signal, 'SIGUSR1'),
'signal.SIGUSR1 not found on this system')
def _test_stop_getting_new_work(self, worker):
d = DummyTask()
with worker:
worker.add(d) # For assistant its ok that other tasks add it
self.assertFalse(d.complete())
worker.handle_interrupt(signal.SIGUSR1, None)
worker.run()
self.assertFalse(d.complete())
@requiring_sigusr
def test_stop_getting_new_work(self):
self._test_stop_getting_new_work(
Worker(scheduler=self.sch))
@requiring_sigusr
def test_stop_getting_new_work_assistant(self):
self._test_stop_getting_new_work(
Worker(scheduler=self.sch, keep_alive=False, assistant=True))
@requiring_sigusr
def test_stop_getting_new_work_assistant_keep_alive(self):
self._test_stop_getting_new_work(
Worker(scheduler=self.sch, keep_alive=True, assistant=True))
def test_existence_of_disabling_option(self):
# any code equivalent of `os.kill(os.getpid(), signal.SIGUSR1)`
# seem to give some sort of a "InvocationError"
Worker(no_install_shutdown_handler=True)
@with_config({"worker": {"no_install_shutdown_handler": "True"}})
def test_can_run_luigi_in_thread(self):
class A(DummyTask):
pass
task = A()
# Note that ``signal.signal(signal.SIGUSR1, fn)`` can only be called in the main thread.
# So if we do not disable the shutdown handler, this would fail.
t = threading.Thread(target=lambda: luigi.build([task], local_scheduler=True))
t.start()
t.join()
self.assertTrue(task.complete())
class WorkerDisabledTest(LuigiTestCase):
def make_sch(self):
return Scheduler(retry_delay=100, remove_delay=1000, worker_disconnect_delay=10)
def _test_stop_getting_new_work_build(self, sch, worker):
"""
I got motivated to create this test case when I saw that the
execution_summary crashed after my first attempted solution.
"""
class KillWorkerTask(luigi.Task):
did_actually_run = False
def run(self):
sch.disable_worker('my_worker_id')
KillWorkerTask.did_actually_run = True
class Factory:
def create_local_scheduler(self, *args, **kwargs):
return sch
def create_worker(self, *args, **kwargs):
return worker
luigi.build([KillWorkerTask()], worker_scheduler_factory=Factory(), local_scheduler=True)
self.assertTrue(KillWorkerTask.did_actually_run)
def _test_stop_getting_new_work_manual(self, sch, worker):
d = DummyTask()
with worker:
worker.add(d) # For assistant its ok that other tasks add it
self.assertFalse(d.complete())
sch.disable_worker('my_worker_id')
worker.run() # Note: Test could fail by hanging on this line
self.assertFalse(d.complete())
def _test_stop_getting_new_work(self, **worker_kwargs):
worker_kwargs['worker_id'] = 'my_worker_id'
sch = self.make_sch()
worker_kwargs['scheduler'] = sch
self._test_stop_getting_new_work_manual(sch, Worker(**worker_kwargs))
sch = self.make_sch()
worker_kwargs['scheduler'] = sch
self._test_stop_getting_new_work_build(sch, Worker(**worker_kwargs))
def test_stop_getting_new_work_keep_alive(self):
self._test_stop_getting_new_work(keep_alive=True, assistant=False)
def test_stop_getting_new_work_assistant(self):
self._test_stop_getting_new_work(keep_alive=False, assistant=True)
def test_stop_getting_new_work_assistant_keep_alive(self):
self._test_stop_getting_new_work(keep_alive=True, assistant=True)
class DynamicDependenciesTest(unittest.TestCase):
n_workers = 1
timeout = float('inf')
def setUp(self):
self.p = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.p)
def test_dynamic_dependencies(self, use_banana_task=False):
t0 = time.time()
t = DynamicRequires(p=self.p, use_banana_task=use_banana_task)
luigi.build([t], local_scheduler=True, workers=self.n_workers)
self.assertTrue(t.complete())
# loop through output and verify
with t.output().open('r') as f:
for i in range(7):
self.assertEqual(f.readline().strip(), '%d: Done!' % i)
self.assertTrue(time.time() - t0 < self.timeout)
def test_dynamic_dependencies_with_namespace(self):
self.test_dynamic_dependencies(use_banana_task=True)
def test_dynamic_dependencies_other_module(self):
t = DynamicRequiresOtherModule(p=self.p)
luigi.build([t], local_scheduler=True, workers=self.n_workers)
self.assertTrue(t.complete())
class DynamicDependenciesWithMultipleWorkersTest(DynamicDependenciesTest):
n_workers = 100
timeout = 3.0 # We run 7 tasks that take 0.5s each so it should take less than 3.5s
class WorkerPingThreadTests(unittest.TestCase):
def test_ping_retry(self):
""" Worker ping fails once. Ping continues to try to connect to scheduler
Kind of ugly since it uses actual timing with sleep to test the thread
"""
sch = Scheduler(
retry_delay=100,
remove_delay=1000,
worker_disconnect_delay=10,
)
self._total_pings = 0 # class var so it can be accessed from fail_ping
def fail_ping(worker):
# this will be called from within keep-alive thread...
self._total_pings += 1
raise Exception("Some random exception")
sch.ping = fail_ping
with Worker(
scheduler=sch,
worker_id="foo",
ping_interval=0.01 # very short between pings to make test fast
):
# let the keep-alive thread run for a bit...
time.sleep(0.1) # yes, this is ugly but it's exactly what we need to test
self.assertTrue(
self._total_pings > 1,
msg="Didn't retry pings (%d pings performed)" % (self._total_pings,)
)
def test_ping_thread_shutdown(self):
with Worker(ping_interval=0.01) as w:
self.assertTrue(w._keep_alive_thread.is_alive())
self.assertFalse(w._keep_alive_thread.is_alive())
def email_patch(test_func, email_config=None):
EMAIL_CONFIG = {"email": {"receiver": "not-a-real-email-address-for-test-only", "force_send": "true"}}
if email_config is not None:
EMAIL_CONFIG.update(email_config)
emails = []
def mock_send_email(sender, recipients, msg):
emails.append(msg)
@with_config(EMAIL_CONFIG)
@functools.wraps(test_func)
@mock.patch('smtplib.SMTP')
def run_test(self, smtp):
smtp().sendmail.side_effect = mock_send_email
test_func(self, emails)
return run_test
def custom_email_patch(config):
return functools.partial(email_patch, email_config=config)
class WorkerEmailTest(LuigiTestCase):
def run(self, result=None):
super(WorkerEmailTest, self).setUp()
sch = Scheduler(retry_delay=100, remove_delay=1000, worker_disconnect_delay=10)
with Worker(scheduler=sch, worker_id="foo") as self.worker:
super(WorkerEmailTest, self).run(result)
@email_patch
def test_connection_error(self, emails):
sch = RemoteScheduler('http://tld.invalid:1337', connect_timeout=1)
self.waits = 0
def dummy_wait():
self.waits += 1
sch._wait = dummy_wait
class A(DummyTask):
pass
a = A()
self.assertEqual(emails, [])
with Worker(scheduler=sch) as worker:
try:
worker.add(a)
except RPCError:
self.assertEqual(self.waits, 2) # should attempt to add it 3 times
self.assertNotEqual(emails, [])
self.assertTrue(emails[0].find("Luigi: Framework error while scheduling %s" % (a,)) != -1)
else:
self.fail()
@email_patch
def test_complete_error(self, emails):
class A(DummyTask):
def complete(self):
raise Exception("b0rk")
a = A()
self.assertEqual(emails, [])
self.worker.add(a)
self.assertTrue(emails[0].find("Luigi: %s failed scheduling" % (a,)) != -1)
self.worker.run()
self.assertTrue(emails[0].find("Luigi: %s failed scheduling" % (a,)) != -1)
self.assertFalse(a.has_run)
@with_config({'batch_email': {'email_interval': '0'}, 'worker': {'send_failure_email': 'False'}})
@email_patch
def test_complete_error_email_batch(self, emails):
class A(DummyTask):
def complete(self):
raise Exception("b0rk")
scheduler = Scheduler(batch_emails=True)
worker = Worker(scheduler)
a = A()
self.assertEqual(emails, [])
worker.add(a)
self.assertEqual(emails, [])
worker.run()
self.assertEqual(emails, [])
self.assertFalse(a.has_run)
scheduler.prune()
self.assertTrue("1 scheduling failure" in emails[0])
@with_config({'batch_email': {'email_interval': '0'}, 'worker': {'send_failure_email': 'False'}})
@email_patch
def test_complete_error_email_batch_to_owner(self, emails):
class A(DummyTask):
owner_email = '[email protected]'
def complete(self):
raise Exception("b0rk")
scheduler = Scheduler(batch_emails=True)
worker = Worker(scheduler)
a = A()
self.assertEqual(emails, [])
worker.add(a)
self.assertEqual(emails, [])
worker.run()
self.assertEqual(emails, [])
self.assertFalse(a.has_run)
scheduler.prune()
self.assertTrue(any(
"1 scheduling failure" in email and '[email protected]' in email
for email in emails))
@email_patch
def test_announce_scheduling_failure_unexpected_error(self, emails):
class A(DummyTask):
owner_email = '[email protected]'
def complete(self):
pass
scheduler = Scheduler(batch_emails=True)
worker = Worker(scheduler)
a = A()
with mock.patch.object(worker._scheduler, 'announce_scheduling_failure', side_effect=Exception('Unexpected')),\
self.assertRaises(Exception):
worker.add(a)
self.assertTrue(len(emails) == 2) # One for `complete` error, one for exception in announcing.
self.assertTrue('Luigi: Framework error while scheduling' in emails[1])
self.assertTrue('[email protected]' in emails[1])
@email_patch
def test_requires_error(self, emails):
class A(DummyTask):
def requires(self):
raise Exception("b0rk")
a = A()
self.assertEqual(emails, [])
self.worker.add(a)
self.assertTrue(emails[0].find("Luigi: %s failed scheduling" % (a,)) != -1)
self.worker.run()
self.assertFalse(a.has_run)
@with_config({'batch_email': {'email_interval': '0'}, 'worker': {'send_failure_email': 'False'}})
@email_patch
def test_requires_error_email_batch(self, emails):
class A(DummyTask):
def requires(self):
raise Exception("b0rk")
scheduler = Scheduler(batch_emails=True)
worker = Worker(scheduler)
a = A()
self.assertEqual(emails, [])
worker.add(a)
self.assertEqual(emails, [])
worker.run()
self.assertFalse(a.has_run)
scheduler.prune()
self.assertTrue("1 scheduling failure" in emails[0])
@email_patch
def test_complete_return_value(self, emails):
class A(DummyTask):
def complete(self):
pass # no return value should be an error
a = A()
self.assertEqual(emails, [])
self.worker.add(a)
self.assertTrue(emails[0].find("Luigi: %s failed scheduling" % (a,)) != -1)
self.worker.run()
self.assertTrue(emails[0].find("Luigi: %s failed scheduling" % (a,)) != -1)
self.assertFalse(a.has_run)
@with_config({'batch_email': {'email_interval': '0'}, 'worker': {'send_failure_email': 'False'}})
@email_patch
def test_complete_return_value_email_batch(self, emails):
class A(DummyTask):
def complete(self):
pass # no return value should be an error
scheduler = Scheduler(batch_emails=True)
worker = Worker(scheduler)
a = A()
self.assertEqual(emails, [])
worker.add(a)
self.assertEqual(emails, [])
self.worker.run()
self.assertEqual(emails, [])
self.assertFalse(a.has_run)
scheduler.prune()
self.assertTrue("1 scheduling failure" in emails[0])
@email_patch
def test_run_error(self, emails):
class A(luigi.Task):
def run(self):
raise Exception("b0rk")
a = A()
luigi.build([a], workers=1, local_scheduler=True)
self.assertEqual(1, len(emails))
self.assertTrue(emails[0].find("Luigi: %s FAILED" % (a,)) != -1)
@with_config({'batch_email': {'email_interval': '0'}, 'worker': {'send_failure_email': 'False'}})
@email_patch
def test_run_error_email_batch(self, emails):
class A(luigi.Task):
owner_email = ['[email protected]', '[email protected]']
def run(self):
raise Exception("b0rk")
scheduler = Scheduler(batch_emails=True)
worker = Worker(scheduler)
worker.add(A())
worker.run()
scheduler.prune()
self.assertEqual(3, len(emails))
self.assertTrue(any('[email protected]' in email for email in emails))
self.assertTrue(any('[email protected]' in email for email in emails))
@with_config({'batch_email': {'email_interval': '0'}, 'worker': {'send_failure_email': 'False'}})
@email_patch
def test_run_error_batch_email_string(self, emails):
class A(luigi.Task):
owner_email = '[email protected]'
def run(self):
raise Exception("b0rk")
scheduler = Scheduler(batch_emails=True)
worker = Worker(scheduler)
worker.add(A())
worker.run()
scheduler.prune()
self.assertEqual(2, len(emails))
self.assertTrue(any('[email protected]' in email for email in emails))
@with_config({'worker': {'send_failure_email': 'False'}})
@email_patch
def test_run_error_no_email(self, emails):
class A(luigi.Task):
def run(self):
raise Exception("b0rk")
luigi.build([A()], workers=1, local_scheduler=True)
self.assertFalse(emails)
@staticmethod
def read_email(email_msg):
subject_obj, body_obj = email.parser.Parser().parsestr(email_msg).walk()
return str(subject_obj['Subject']), str(body_obj.get_payload(decode=True))
@email_patch
def test_task_process_dies_with_email(self, emails):
a = SendSignalTask(signal.SIGKILL)
luigi.build([a], workers=2, local_scheduler=True)
self.assertEqual(1, len(emails))
subject, body = self.read_email(emails[0])
self.assertIn("Luigi: {} FAILED".format(a), subject)
self.assertIn("died unexpectedly with exit code -9", body)
@with_config({'worker': {'send_failure_email': 'False'}})
@email_patch
def test_task_process_dies_no_email(self, emails):
luigi.build([SendSignalTask(signal.SIGKILL)], workers=2, local_scheduler=True)
self.assertEqual([], emails)
@email_patch
def test_task_times_out(self, emails):
class A(luigi.Task):
worker_timeout = 0.0001
def run(self):
time.sleep(5)
a = A()
luigi.build([a], workers=2, local_scheduler=True)
self.assertEqual(1, len(emails))
subject, body = self.read_email(emails[0])
self.assertIn("Luigi: %s FAILED" % (a,), subject)
self.assertIn("timed out after 0.0001 seconds and was terminated.", body)
@with_config({'worker': {'send_failure_email': 'False'}})
@email_patch
def test_task_times_out_no_email(self, emails):
class A(luigi.Task):
worker_timeout = 0.0001
def run(self):
time.sleep(5)
luigi.build([A()], workers=2, local_scheduler=True)
self.assertEqual([], emails)
@with_config(dict(worker=dict(retry_external_tasks='true')))
@email_patch
def test_external_task_retries(self, emails):
"""
Test that we do not send error emails on the failures of external tasks
"""
class A(luigi.ExternalTask):
pass
a = A()
luigi.build([a], workers=2, local_scheduler=True)
self.assertEqual(emails, [])
@email_patch
def test_no_error(self, emails):
class A(DummyTask):
pass
a = A()
self.assertEqual(emails, [])
self.worker.add(a)
self.assertEqual(emails, [])
self.worker.run()
self.assertEqual(emails, [])
self.assertTrue(a.complete())
@custom_email_patch({"email": {"receiver": "not-a-real-email-address-for-test-only", 'format': 'none'}})
def test_disable_emails(self, emails):
class A(luigi.Task):
def complete(self):
raise Exception("b0rk")
self.worker.add(A())
self.assertEqual(emails, [])
class RaiseSystemExit(luigi.Task):
def run(self):
raise SystemExit("System exit!!")
class SendSignalTask(luigi.Task):
signal = luigi.IntParameter()
def run(self):
os.kill(os.getpid(), self.signal)
class HangTheWorkerTask(luigi.Task):
worker_timeout = luigi.IntParameter(default=None)
def run(self):
while True:
pass
def complete(self):
return False
class MultipleWorkersTest(unittest.TestCase):
@unittest.skip('Always skip. There are many intermittent failures')
def test_multiple_workers(self):
# Test using multiple workers
# Also test generating classes dynamically since this may reflect issues with
# various platform and how multiprocessing is implemented. If it's using os.fork
# under the hood it should be fine, but dynamic classses can't be pickled, so
# other implementations of multiprocessing (using spawn etc) may fail
class MyDynamicTask(luigi.Task):
x = luigi.Parameter()
def run(self):
time.sleep(0.1)
t0 = time.time()
luigi.build([MyDynamicTask(i) for i in range(100)], workers=100, local_scheduler=True)
self.assertTrue(time.time() < t0 + 5.0) # should ideally take exactly 0.1s, but definitely less than 10.0
def test_zero_workers(self):
d = DummyTask()
luigi.build([d], workers=0, local_scheduler=True)
self.assertFalse(d.complete())
def test_system_exit(self):
# This would hang indefinitely before this fix:
# https://github.com/spotify/luigi/pull/439
luigi.build([RaiseSystemExit()], workers=2, local_scheduler=True)
def test_term_worker(self):
luigi.build([SendSignalTask(signal.SIGTERM)], workers=2, local_scheduler=True)
def test_kill_worker(self):
luigi.build([SendSignalTask(signal.SIGKILL)], workers=2, local_scheduler=True)
def test_purge_multiple_workers(self):
w = Worker(worker_processes=2, wait_interval=0.01)
t1 = SendSignalTask(signal.SIGTERM)
t2 = SendSignalTask(signal.SIGKILL)
w.add(t1)
w.add(t2)
w._run_task(t1.task_id)
w._run_task(t2.task_id)
time.sleep(1.0)
w._handle_next_task()
w._handle_next_task()
w._handle_next_task()
def test_stop_worker_kills_subprocesses(self):
with Worker(worker_processes=2) as w:
hung_task = HangTheWorkerTask()
w.add(hung_task)
w._run_task(hung_task.task_id)
pids = [p.pid for p in w._running_tasks.values()]
self.assertEqual(1, len(pids))
pid = pids[0]
def is_running():
return pid in {p.pid for p in psutil.Process().children()}
self.assertTrue(is_running())
self.assertFalse(is_running())
@mock.patch('luigi.worker.time')
def test_no_process_leak_from_repeatedly_running_same_task(self, worker_time):
with Worker(worker_processes=2) as w:
hung_task = HangTheWorkerTask()
w.add(hung_task)
w._run_task(hung_task.task_id)
children = set(psutil.Process().children())
# repeatedly try to run the same task id
for _ in range(10):
worker_time.sleep.reset_mock()
w._run_task(hung_task.task_id)
# should sleep after each attempt
worker_time.sleep.assert_called_once_with(mock.ANY)
# only one process should be running
self.assertEqual(children, set(psutil.Process().children()))
def test_time_out_hung_worker(self):
luigi.build([HangTheWorkerTask(0.1)], workers=2, local_scheduler=True)
def test_time_out_hung_single_worker(self):
luigi.build([HangTheWorkerTask(0.1)], workers=1, local_scheduler=True)
@skipOnTravis('https://travis-ci.org/spotify/luigi/jobs/72953986')
@mock.patch('luigi.worker.time')
def test_purge_hung_worker_default_timeout_time(self, mock_time):
w = Worker(worker_processes=2, wait_interval=0.01, timeout=5)
mock_time.time.return_value = 0
task = HangTheWorkerTask()
w.add(task)
w._run_task(task.task_id)
mock_time.time.return_value = 5
w._handle_next_task()
self.assertEqual(1, len(w._running_tasks))
mock_time.time.return_value = 6
w._handle_next_task()
self.assertEqual(0, len(w._running_tasks))
@skipOnTravis('https://travis-ci.org/spotify/luigi/jobs/76645264')
@mock.patch('luigi.worker.time')
def test_purge_hung_worker_override_timeout_time(self, mock_time):
w = Worker(worker_processes=2, wait_interval=0.01, timeout=5)
mock_time.time.return_value = 0
task = HangTheWorkerTask(worker_timeout=10)
w.add(task)
w._run_task(task.task_id)
mock_time.time.return_value = 10
w._handle_next_task()
self.assertEqual(1, len(w._running_tasks))
mock_time.time.return_value = 11
w._handle_next_task()
self.assertEqual(0, len(w._running_tasks))
class Dummy2Task(Task):
p = luigi.Parameter()
def output(self):
return MockTarget(self.p)
def run(self):
f = self.output().open('w')
f.write('test')
f.close()
class AssistantTest(unittest.TestCase):
def run(self, result=None):
self.sch = Scheduler(retry_delay=100, remove_delay=1000, worker_disconnect_delay=10)
self.assistant = Worker(scheduler=self.sch, worker_id='Y', assistant=True)
with Worker(scheduler=self.sch, worker_id='X') as w:
self.w = w
super(AssistantTest, self).run(result)
def test_get_work(self):
d = Dummy2Task('123')
self.w.add(d)
self.assertFalse(d.complete())
self.assistant.run()
self.assertTrue(d.complete())
def test_bad_job_type(self):
class Dummy3Task(Dummy2Task):
task_family = 'UnknownTaskFamily'
d = Dummy3Task('123')
self.w.add(d)
self.assertFalse(d.complete())
self.assertFalse(self.assistant.run())
self.assertFalse(d.complete())
self.assertEqual(list(self.sch.task_list('FAILED', '').keys()), [d.task_id])
def test_unimported_job_type(self):
MODULE_CONTENTS = b'''
import luigi
class UnimportedTask(luigi.Task):
def complete(self):
return False
'''
reg = luigi.task_register.Register._get_reg()
class UnimportedTask(luigi.Task):
task_module = None # Set it here, so it's generally settable
luigi.task_register.Register._set_reg(reg)
task = UnimportedTask()
# verify that it can't run the task without the module info necessary to import it
self.w.add(task)
self.assertFalse(self.assistant.run())
self.assertEqual(list(self.sch.task_list('FAILED', '').keys()), [task.task_id])
# check that it can import with the right module
with temporary_unloaded_module(MODULE_CONTENTS) as task.task_module:
self.w.add(task)
self.assertTrue(self.assistant.run())
self.assertEqual(list(self.sch.task_list('DONE', '').keys()), [task.task_id])
def test_unimported_job_sends_failure_message(self):
class NotInAssistantTask(luigi.Task):
task_family = 'Unknown'
task_module = None
task = NotInAssistantTask()
self.w.add(task)
self.assertFalse(self.assistant.run())
self.assertEqual(list(self.sch.task_list('FAILED', '').keys()), [task.task_id])
self.assertTrue(self.sch.fetch_error(task.task_id)['error'])
class ForkBombTask(luigi.Task):
depth = luigi.IntParameter()
breadth = luigi.IntParameter()
p = luigi.Parameter(default=(0, )) # ehm for some weird reason [0] becomes a tuple...?
def output(self):
return MockTarget('.'.join(map(str, self.p)))
def run(self):
with self.output().open('w') as f:
f.write('Done!')
def requires(self):
if len(self.p) < self.depth:
for i in range(self.breadth):
yield ForkBombTask(self.depth, self.breadth, self.p + (i, ))
class TaskLimitTest(unittest.TestCase):
def tearDown(self):
MockFileSystem().remove('')
@with_config({'worker': {'task_limit': '6'}})
def test_task_limit_exceeded(self):
w = Worker()
t = ForkBombTask(3, 2)
w.add(t)
w.run()
self.assertFalse(t.complete())
leaf_tasks = [ForkBombTask(3, 2, branch) for branch in [(0, 0, 0), (0, 0, 1), (0, 1, 0), (0, 1, 1)]]
self.assertEqual(3, sum(t.complete() for t in leaf_tasks),
"should have gracefully completed as much as possible even though the single last leaf didn't get scheduled")
@with_config({'worker': {'task_limit': '7'}})
def test_task_limit_not_exceeded(self):
w = Worker()
t = ForkBombTask(3, 2)
w.add(t)
w.run()
self.assertTrue(t.complete())
def test_no_task_limit(self):
w = Worker()
t = ForkBombTask(4, 2)
w.add(t)
w.run()
self.assertTrue(t.complete())
class WorkerConfigurationTest(unittest.TestCase):
def test_asserts_for_worker(self):
"""
Test that Worker() asserts that it's sanely configured
"""
Worker(wait_interval=1) # This shouldn't raise
self.assertRaises(AssertionError, Worker, wait_interval=0)
class WorkerWaitJitterTest(unittest.TestCase):
@with_config({'worker': {'wait_jitter': '10.0'}})
@mock.patch("random.uniform")
@mock.patch("time.sleep")
def test_wait_jitter(self, mock_sleep, mock_random):
""" verify configured jitter amount """
mock_random.return_value = 1.0
w = Worker()
x = w._sleeper()
next(x)
mock_random.assert_called_with(0, 10.0)
mock_sleep.assert_called_with(2.0)
mock_random.return_value = 2.0
next(x)
mock_random.assert_called_with(0, 10.0)
mock_sleep.assert_called_with(3.0)
@mock.patch("random.uniform")
@mock.patch("time.sleep")
def test_wait_jitter_default(self, mock_sleep, mock_random):
""" verify default jitter is as expected """
mock_random.return_value = 1.0
w = Worker()
x = w._sleeper()
next(x)
mock_random.assert_called_with(0, 5.0)
mock_sleep.assert_called_with(2.0)
mock_random.return_value = 3.3
next(x)
mock_random.assert_called_with(0, 5.0)
mock_sleep.assert_called_with(4.3)
class KeyboardInterruptBehaviorTest(LuigiTestCase):
def test_propagation_when_executing(self):
"""
Ensure that keyboard interrupts causes luigi to quit when you are
executing tasks.
TODO: Add a test that tests the multiprocessing (--worker >1) case
"""
class KeyboardInterruptTask(luigi.Task):
def run(self):
raise KeyboardInterrupt()
cmd = 'KeyboardInterruptTask --local-scheduler --no-lock'.split(' ')
self.assertRaises(KeyboardInterrupt, luigi_run, cmd)
def test_propagation_when_scheduling(self):
"""
Test that KeyboardInterrupt causes luigi to quit while scheduling.
"""
class KeyboardInterruptTask(luigi.Task):
def complete(self):
raise KeyboardInterrupt()
class ExternalKeyboardInterruptTask(luigi.ExternalTask):
def complete(self):
raise KeyboardInterrupt()
self.assertRaises(KeyboardInterrupt, luigi_run,
['KeyboardInterruptTask', '--local-scheduler', '--no-lock'])
self.assertRaises(KeyboardInterrupt, luigi_run,
['ExternalKeyboardInterruptTask', '--local-scheduler', '--no-lock'])
class WorkerPurgeEventHandlerTest(unittest.TestCase):
@mock.patch('luigi.worker.ContextManagedTaskProcess')
def test_process_killed_handler(self, task_proc):
result = []
@HangTheWorkerTask.event_handler(Event.PROCESS_FAILURE)
def store_task(t, error_msg):
self.assertTrue(error_msg)
result.append(t)
w = Worker()
task = HangTheWorkerTask()
task_process = mock.MagicMock(is_alive=lambda: False, exitcode=-14, task=task)
task_proc.return_value = task_process
w.add(task)
w._run_task(task.task_id)
w._handle_next_task()
self.assertEqual(result, [task])
@mock.patch('luigi.worker.time')
def test_timeout_handler(self, mock_time):
result = []
@HangTheWorkerTask.event_handler(Event.TIMEOUT)
def store_task(t, error_msg):
self.assertTrue(error_msg)
result.append(t)
w = Worker(worker_processes=2, wait_interval=0.01, timeout=5)
mock_time.time.return_value = 0
task = HangTheWorkerTask(worker_timeout=1)
w.add(task)
w._run_task(task.task_id)
mock_time.time.return_value = 3
w._handle_next_task()
self.assertEqual(result, [task])
@mock.patch('luigi.worker.time')
def test_timeout_handler_single_worker(self, mock_time):
result = []
@HangTheWorkerTask.event_handler(Event.TIMEOUT)
def store_task(t, error_msg):
self.assertTrue(error_msg)
result.append(t)
w = Worker(wait_interval=0.01, timeout=5)
mock_time.time.return_value = 0
task = HangTheWorkerTask(worker_timeout=1)
w.add(task)
w._run_task(task.task_id)
mock_time.time.return_value = 3
w._handle_next_task()
self.assertEqual(result, [task])
class PerTaskRetryPolicyBehaviorTest(LuigiTestCase):
def setUp(self):
super(PerTaskRetryPolicyBehaviorTest, self).setUp()
self.per_task_retry_count = 3
self.default_retry_count = 1
self.sch = Scheduler(retry_delay=0.1, retry_count=self.default_retry_count, prune_on_get_work=True)
def test_with_all_disabled_with_single_worker(self):
"""
With this test, a case which has a task (TestWrapperTask), requires two another tasks (TestErrorTask1,TestErrorTask1) which both is failed, is
tested.
Task TestErrorTask1 has default retry_count which is 1, but Task TestErrorTask2 has retry_count at task level as 2.
This test is running on single worker
"""
class TestErrorTask1(DummyErrorTask):
pass
e1 = TestErrorTask1()
class TestErrorTask2(DummyErrorTask):
retry_count = self.per_task_retry_count
e2 = TestErrorTask2()
class TestWrapperTask(luigi.WrapperTask):
def requires(self):
return [e2, e1]
wt = TestWrapperTask()
with Worker(scheduler=self.sch, worker_id='X', keep_alive=True, wait_interval=0.1, wait_jitter=0.05) as w1:
self.assertTrue(w1.add(wt))
self.assertFalse(w1.run())
self.assertEqual([wt.task_id], list(self.sch.task_list('PENDING', 'UPSTREAM_DISABLED').keys()))
self.assertEqual(sorted([e1.task_id, e2.task_id]), sorted(self.sch.task_list('DISABLED', '').keys()))
self.assertEqual(0, self.sch._state.get_task(wt.task_id).failures.num_failures())
self.assertEqual(self.per_task_retry_count, self.sch._state.get_task(e2.task_id).failures.num_failures())
self.assertEqual(self.default_retry_count, self.sch._state.get_task(e1.task_id).failures.num_failures())
def test_with_all_disabled_with_multiple_worker(self):
"""
With this test, a case which has a task (TestWrapperTask), requires two another tasks (TestErrorTask1,TestErrorTask1) which both is failed, is
tested.
Task TestErrorTask1 has default retry_count which is 1, but Task TestErrorTask2 has retry_count at task level as 2.
This test is running on multiple worker
"""
class TestErrorTask1(DummyErrorTask):
pass
e1 = TestErrorTask1()
class TestErrorTask2(DummyErrorTask):
retry_count = self.per_task_retry_count
e2 = TestErrorTask2()
class TestWrapperTask(luigi.WrapperTask):
def requires(self):
return [e2, e1]
wt = TestWrapperTask()
with Worker(scheduler=self.sch, worker_id='X', keep_alive=True, wait_interval=0.1, wait_jitter=0.05) as w1:
with Worker(scheduler=self.sch, worker_id='Y', keep_alive=True, wait_interval=0.1, wait_jitter=0.05) as w2:
with Worker(scheduler=self.sch, worker_id='Z', keep_alive=True, wait_interval=0.1, wait_jitter=0.05) as w3:
self.assertTrue(w1.add(wt))
self.assertTrue(w2.add(e2))
self.assertTrue(w3.add(e1))
self.assertFalse(w3.run())
self.assertFalse(w2.run())
self.assertTrue(w1.run())
self.assertEqual([wt.task_id], list(self.sch.task_list('PENDING', 'UPSTREAM_DISABLED').keys()))
self.assertEqual(sorted([e1.task_id, e2.task_id]), sorted(self.sch.task_list('DISABLED', '').keys()))
self.assertEqual(0, self.sch._state.get_task(wt.task_id).failures.num_failures())
self.assertEqual(self.per_task_retry_count, self.sch._state.get_task(e2.task_id).failures.num_failures())
self.assertEqual(self.default_retry_count, self.sch._state.get_task(e1.task_id).failures.num_failures())
def test_with_includes_success_with_single_worker(self):
"""
With this test, a case which has a task (TestWrapperTask), requires one (TestErrorTask1) FAILED and one (TestSuccessTask1) SUCCESS, is tested.
Task TestSuccessTask1 will be DONE successfully, but Task TestErrorTask1 will be failed and it has retry_count at task level as 2.
This test is running on single worker
"""
class TestSuccessTask1(DummyTask):
pass
s1 = TestSuccessTask1()
class TestErrorTask1(DummyErrorTask):
retry_count = self.per_task_retry_count
e1 = TestErrorTask1()
class TestWrapperTask(luigi.WrapperTask):
def requires(self):
return [e1, s1]
wt = TestWrapperTask()
with Worker(scheduler=self.sch, worker_id='X', keep_alive=True, wait_interval=0.1, wait_jitter=0.05) as w1:
self.assertTrue(w1.add(wt))
self.assertFalse(w1.run())
self.assertEqual([wt.task_id], list(self.sch.task_list('PENDING', 'UPSTREAM_DISABLED').keys()))
self.assertEqual([e1.task_id], list(self.sch.task_list('DISABLED', '').keys()))
self.assertEqual([s1.task_id], list(self.sch.task_list('DONE', '').keys()))
self.assertEqual(0, self.sch._state.get_task(wt.task_id).failures.num_failures())
self.assertEqual(self.per_task_retry_count, self.sch._state.get_task(e1.task_id).failures.num_failures())
self.assertEqual(0, self.sch._state.get_task(s1.task_id).failures.num_failures())
def test_with_includes_success_with_multiple_worker(self):
"""
With this test, a case which has a task (TestWrapperTask), requires one (TestErrorTask1) FAILED and one (TestSuccessTask1) SUCCESS, is tested.
Task TestSuccessTask1 will be DONE successfully, but Task TestErrorTask1 will be failed and it has retry_count at task level as 2.
This test is running on multiple worker
"""
class TestSuccessTask1(DummyTask):
pass
s1 = TestSuccessTask1()
class TestErrorTask1(DummyErrorTask):
retry_count = self.per_task_retry_count
e1 = TestErrorTask1()
class TestWrapperTask(luigi.WrapperTask):
def requires(self):
return [e1, s1]
wt = TestWrapperTask()
with Worker(scheduler=self.sch, worker_id='X', keep_alive=True, wait_interval=0.1, wait_jitter=0.05) as w1:
with Worker(scheduler=self.sch, worker_id='Y', keep_alive=True, wait_interval=0.1, wait_jitter=0.05) as w2:
with Worker(scheduler=self.sch, worker_id='Z', keep_alive=True, wait_interval=0.1, wait_jitter=0.05) as w3:
self.assertTrue(w1.add(wt))
self.assertTrue(w2.add(e1))
self.assertTrue(w3.add(s1))
self.assertTrue(w3.run())
self.assertFalse(w2.run())
self.assertTrue(w1.run())
self.assertEqual([wt.task_id], list(self.sch.task_list('PENDING', 'UPSTREAM_DISABLED').keys()))
self.assertEqual([e1.task_id], list(self.sch.task_list('DISABLED', '').keys()))
self.assertEqual([s1.task_id], list(self.sch.task_list('DONE', '').keys()))
self.assertEqual(0, self.sch._state.get_task(wt.task_id).failures.num_failures())
self.assertEqual(self.per_task_retry_count, self.sch._state.get_task(e1.task_id).failures.num_failures())
self.assertEqual(0, self.sch._state.get_task(s1.task_id).failures.num_failures())
def test_with_dynamic_dependencies_with_single_worker(self):
"""
With this test, a case includes dependency tasks(TestErrorTask1,TestErrorTask2) which both are failed.
Task TestErrorTask1 has default retry_count which is 1, but Task TestErrorTask2 has retry_count at task level as 2.
This test is running on single worker
"""
class TestErrorTask1(DummyErrorTask):
pass
e1 = TestErrorTask1()
class TestErrorTask2(DummyErrorTask):
retry_count = self.per_task_retry_count
e2 = TestErrorTask2()
class TestSuccessTask1(DummyTask):
pass
s1 = TestSuccessTask1()
class TestWrapperTask(DummyTask):
def requires(self):
return [s1]
def run(self):
super(TestWrapperTask, self).run()
yield e2, e1
wt = TestWrapperTask()
with Worker(scheduler=self.sch, worker_id='X', keep_alive=True, wait_interval=0.1, wait_jitter=0.05) as w1:
self.assertTrue(w1.add(wt))
self.assertFalse(w1.run())
self.assertEqual([wt.task_id], list(self.sch.task_list('PENDING', 'UPSTREAM_DISABLED').keys()))
self.assertEqual(sorted([e1.task_id, e2.task_id]), sorted(self.sch.task_list('DISABLED', '').keys()))
self.assertEqual(0, self.sch._state.get_task(wt.task_id).failures.num_failures())
self.assertEqual(0, self.sch._state.get_task(s1.task_id).failures.num_failures())
self.assertEqual(self.per_task_retry_count, self.sch._state.get_task(e2.task_id).failures.num_failures())
self.assertEqual(self.default_retry_count, self.sch._state.get_task(e1.task_id).failures.num_failures())
def test_with_dynamic_dependencies_with_multiple_workers(self):
"""
With this test, a case includes dependency tasks(TestErrorTask1,TestErrorTask2) which both are failed.
Task TestErrorTask1 has default retry_count which is 1, but Task TestErrorTask2 has retry_count at task level as 2.
This test is running on multiple worker
"""
class TestErrorTask1(DummyErrorTask):
pass
e1 = TestErrorTask1()
class TestErrorTask2(DummyErrorTask):
retry_count = self.per_task_retry_count
e2 = TestErrorTask2()
class TestSuccessTask1(DummyTask):
pass
s1 = TestSuccessTask1()
class TestWrapperTask(DummyTask):
def requires(self):
return [s1]
def run(self):
super(TestWrapperTask, self).run()
yield e2, e1
wt = TestWrapperTask()
with Worker(scheduler=self.sch, worker_id='X', keep_alive=True, wait_interval=0.1, wait_jitter=0.05) as w1:
with Worker(scheduler=self.sch, worker_id='Y', keep_alive=True, wait_interval=0.1, wait_jitter=0.05) as w2:
self.assertTrue(w1.add(wt))
self.assertTrue(w2.add(s1))
self.assertTrue(w2.run())
self.assertFalse(w1.run())
self.assertEqual([wt.task_id], list(self.sch.task_list('PENDING', 'UPSTREAM_DISABLED').keys()))
self.assertEqual(sorted([e1.task_id, e2.task_id]), sorted(self.sch.task_list('DISABLED', '').keys()))
self.assertEqual(0, self.sch._state.get_task(wt.task_id).failures.num_failures())
self.assertEqual(0, self.sch._state.get_task(s1.task_id).failures.num_failures())
self.assertEqual(self.per_task_retry_count, self.sch._state.get_task(e2.task_id).failures.num_failures())
self.assertEqual(self.default_retry_count, self.sch._state.get_task(e1.task_id).failures.num_failures())
|
pyminer.py
|
#!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 38400
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
|
example01.py
|
import threading
import time
start = time.perf_counter()
def do_something(number):
print(f'Do something {number}')
time.sleep(1)
print(f'Done sleep {number}')
threads = [threading.Thread(target=do_something, args=(number,))
for number in range(10)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
end = time.perf_counter()
print(f'Time {round(end - start, 2)}')
|
guimotomandualcomplete.py
|
import matplotlib.pyplot as plt
from mpl_toolkits import mplot3d
import numpy as np
import sympy as sp
from matplotlib.widgets import Slider,CheckButtons,Button,TextBox,RadioButtons
from robolink import *
import serial
import threading
import time
import sys
fig, ax = plt.subplots()
plt.subplots_adjust(left=0.37,bottom=0.28,right=1,top=1)
ax = plt.axes(projection = "3d")
def mmatrix(*matrices):
n=0
for m in matrices:
if (n==0):
ma=m
n=n+1
elif (n==1):
r=np.dot(ma,m)
n=n+1
else:
r=np.dot(r,m)
return r
def dibujar():
plt.draw()
plt.pause(0.001)
def sind(t):
res=np.sin(t*np.pi/180)
return res
def cosd(t):
res=np.cos(t*np.pi/180)
return res
def setaxis(lim=2):
x1=-lim
x2=lim
y1=-lim
y2=lim
z1=-lim
z2=lim
ax.set_xlim3d(x1,x2)
ax.set_ylim3d(y1,y2)
ax.set_zlim3d(z1,z2)
ax.view_init(elev=30,azim=40)
ax.grid(True)
def sistemafijo(rango=1):
x=[0,1*rango]
y=[0,1*rango]
z=[0,1*rango]
ax.plot3D(x,[0,0],[0,0],color='red')
ax.plot3D([0,0],y,[0,0],color='green')
ax.plot3D([0,0],[0,0],z,color='blue')
def rotax(t):
Rx=np.array(([1,0,0,0],[0,cosd(t),-sind(t),0],[0,sind(t),cosd(t),0],[0,0,0,1]))
return Rx
def rotay(t):
Ry=np.array(([cosd(t),0,sind(t),0],[0,1,0,0],[-sind(t),0,cosd(t),0],[0,0,0,1]))
return Ry
def rotaz(t):
Rz=np.array(([cosd(t),-sind(t),0,0],[sind(t),cosd(t),0,0],[0,0,1,0],[0,0,0,1]))
return Rz
def rotaxf(t,r):
px=r[0,3]
py=r[1,3]
pz=r[2,3]
Rx=np.array(([1,0,0,0],[0,cosd(t),-sind(t),0],[0,sind(t),cosd(t),0],[0,0,0,1]))
Rx=np.dot(Rx,r)
Rx[0,3]=px
Rx[1,3]=py
Rx[2,3]=pz
return Rx
def rotayf(t,r):
px=r[0,3]
py=r[1,3]
pz=r[2,3]
Ry=np.array(([cosd(t),0,sind(t),0],[0,1,0,0],[-sind(t),0,cosd(t),0],[0,0,0,1]))
Ry=np.dot(Ry,r)
Ry[0,3]=px
Ry[1,3]=py
Ry[2,3]=pz
return Ry
def rotazf(t,r):
px=r[0,3]
py=r[1,3]
pz=r[2,3]
Rz=np.array(([cosd(t),-sind(t),0,0],[sind(t),cosd(t),0,0],[0,0,1,0],[0,0,0,1]))
Rz=np.dot(Rz,r)
Rz[0,3]=px
Rz[1,3]=py
Rz[2,3]=pz
return Rz
def trasx(Dx):
Tx=np.array(([[1,0,0,Dx],[0,1,0,0],[0,0,1,0],[0,0,0,1]]))
return Tx
def trasy(Dy):
Ty=np.array(([[1,0,0,0],[0,1,0,Dy],[0,0,1,0],[0,0,0,1]]))
return Ty
def trasz(Dz):
Tz=np.array(([[1,0,0,0],[0,1,0,0],[0,0,1,Dz],[0,0,0,1]]))
return Tz
def ur5movej(p1,p2):
n=1
tetar=[p1[0],p1[1],p1[2],p1[3],p1[4],p1[5]]
paso=[0,0,0,0,0,0]
paso[0]=(p2[0]-p1[0])/30
paso[1]=(p2[1]-p1[1])/30
paso[2]=(p2[2]-p1[2])/30
paso[3]=(p2[3]-p1[3])/30
paso[4]=(p2[4]-p1[4])/30
paso[5]=(p2[5]-p1[5])/30
while n<31:
tetar[0]=tetar[0]+paso[0]
tetar[1]=tetar[1]+paso[1]
tetar[2]=tetar[2]+paso[2]
tetar[3]=tetar[3]+paso[3]
tetar[4]=tetar[4]+paso[4]
tetar[5]=tetar[5]+paso[5]
n=n+1
ax.cla()
setaxis(1000)
print(tetar)
ur5(tetar[0],tetar[1],tetar[2],tetar[3],tetar[4],tetar[5])
dibujar()
valores1=tetar.copy()
valores1[1]=valores1[1]-90
valores1[2]=valores1[2]-90
valores1[4]=valores1[4]+90
valores1[4]=valores1[4]%360
robot.MoveJ(valores1)
def ur5movel(p1,p2,sem):
con=0
n=0
pn=p1
p1x=p1[0,3]
p1y=p1[1,3]
p1z=p1[2,3]
p2x=p2[0,3]
p2y=p2[1,3]
p2z=p2[2,3]
dx=p2x-p1x
dy=p2y-p1y
dz=p2z-p1z
p1ea=mrot2eangle(p1)
angrot=obtangrot(p2,p1ea)
angrot[0]=angrot[0]/30
angrot[1]=angrot[1]/30
angrot[2]=angrot[2]/30
angroti=[0,0,0]
while n<1:
pn=p1@rotaz(angroti[2])@rotay(angroti[1])@rotax(angroti[0])
angroti[0]=angroti[0]+angrot[0]
angroti[1]=angroti[1]+angrot[1]
angroti[2]=angroti[2]+angrot[2]
x=p1x+dx*n
y=p1y+dy*n
z=p1z+dz*n
n+=1/30
con+=1
pn[0,3]=x
pn[1,3]=y
pn[2,3]=z
print(pn)
print(x)
print(y)
print(z)
tetas=ur5newton(pn,sem)
sem=tetas.copy()
ax.cla()
setaxis(1000)
ur5(tetas[0],tetas[1],tetas[2],tetas[3],tetas[4],tetas[5])
valores1=tetas.copy()
valores1[1]=valores1[1]-90
valores1[2]=valores1[2]-90
valores1[4]=valores1[4]+90
valores1[4]=valores1[4]%360
robot.MoveJ(valores1)
dibujar()
def ur5movec(p1,p2,sem):
print(p1)
print(p2)
x,y,z=sp.symbols('x,y,z')
con=0
n=0
pn=p1
x1=p1[0,3]
y1=p1[1,3]
z1=p1[2,3]
x2=p2[0,3]
y2=p2[1,3]
z2=p2[2,3]
p1x=p1[0,3]
p1y=p1[1,3]
p1z=p1[2,3]
p2x=p2[0,3]
p2y=p2[1,3]
p2z=p2[2,3]
dx=x2-x1
dy=y2-y1
dz=z2-z1
xm=x1+dx*0.5
ym=y1+dy*0.5
zm=z1+dz*0.5
r=np.sqrt((x1-xm)**2+(y1-ym)**2+(z1-zm)**2)
esfera=(x-xm)**2+(y-ym)**2+(z-zm)**2-r**2
nd=[dy,-dx,0]
plano=nd[0]*(x-x1)+nd[1]*(y-y1)+nd[2]*(z-z1)
cir=sp.nonlinsolve([esfera,plano],[x,y,z])
var=cir.free_symbols
cirnp1=sp.lambdify(var, cir.args[0], "numpy")
cirnp2=sp.lambdify(var, cir.args[1], "numpy")
p1ea=mrot2eangle(p1)
angrot=obtangrot(p2,p1ea)
angrot[0]=angrot[0]/50
angrot[1]=angrot[1]/50
angrot[2]=angrot[2]/50
angroti=[0,0,0]
if np.around(cirnp1(z1)[0],4)== x1:
rango1=np.arange(start=np.around(zm+r,4), stop=z2, step=-(zm+r-z2)/50)
puntos1=cirnp2(rango1)
rango2=np.arange(start=z1, stop=zm+r, step=(zm+r-z1)/50)
puntos2=cirnp1(rango2)
print(puntos1)
print(puntos2)
while n<puntos1[0].size:
pn=p1@rotaz(angroti[2])@rotay(angroti[1])@rotax(angroti[0])
angroti[0]=angroti[0]+angrot[0]
angroti[1]=angroti[1]+angrot[1]
angroti[2]=angroti[2]+angrot[2]
px=puntos2[0][n]
py=puntos2[1][n]
pz=puntos2[2][n]
n=n+1
pn[0,3]=px
pn[1,3]=py
pn[2,3]=pz
tetas=ur5newton(pn,sem)
sem=tetas.copy()
ax.cla()
setaxis(1000)
ur5(tetas[0],tetas[1],tetas[2],tetas[3],tetas[4],tetas[5])
valores1=tetas.copy()
valores1[1]=valores1[1]-90
valores1[2]=valores1[2]-90
valores1[4]=valores1[4]+90
valores1[4]=valores1[4]%360
robot.MoveJ(valores1)
dibujar()
n=0
while n<puntos2[0].size:
pn=p1@rotaz(angroti[2])@rotay(angroti[1])@rotax(angroti[0])
angroti[0]=angroti[0]+angrot[0]
angroti[1]=angroti[1]+angrot[1]
angroti[2]=angroti[2]+angrot[2]
px=puntos1[0][n]
py=puntos1[1][n]
pz=puntos1[2][n]
n=n+1
pn[0,3]=px
pn[1,3]=py
pn[2,3]=pz
tetas=ur5newton(pn,sem)
sem=tetas.copy()
ax.cla()
setaxis(1000)
ur5(tetas[0],tetas[1],tetas[2],tetas[3],tetas[4],tetas[5])
valores1=tetas.copy()
valores1[1]=valores1[1]-90
valores1[2]=valores1[2]-90
valores1[4]=valores1[4]+90
valores1[4]=valores1[4]%360
robot.MoveJ(valores1)
dibujar()
else:
rango1=np.arange(start=zm+r, stop=z2, step=-(zm+r-z2)/50)
puntos1=cirnp1(rango1)
rango2=np.arange(start=z1, stop=zm+r, step=(zm+r-z1)/50)
puntos2=cirnp2(rango2)
while n<puntos1[0].size:
pn=p1@rotaz(angroti[2])@rotay(angroti[1])@rotax(angroti[0])
angroti[0]=angroti[0]+angrot[0]
angroti[1]=angroti[1]+angrot[1]
angroti[2]=angroti[2]+angrot[2]
px=puntos2[0][n]
py=puntos2[1][n]
pz=puntos2[2][n]
n=n+1
pn[0,3]=px
pn[1,3]=py
pn[2,3]=pz
tetas=ur5newton(pn,sem)
sem=tetas.copy()
ax.cla()
setaxis(1000)
ur5(tetas[0],tetas[1],tetas[2],tetas[3],tetas[4],tetas[5])
valores1=tetas.copy()
valores1[1]=valores1[1]-90
valores1[2]=valores1[2]-90
valores1[4]=valores1[4]+90
valores1[4]=valores1[4]%360
robot.MoveJ(valores1)
dibujar()
n=0
while n<puntos2[0].size:
pn=p1@rotaz(angroti[2])@rotay(angroti[1])@rotax(angroti[0])
angroti[0]=angroti[0]+angrot[0]
angroti[1]=angroti[1]+angrot[1]
angroti[2]=angroti[2]+angrot[2]
px=puntos1[0][n]
py=puntos1[1][n]
pz=puntos1[2][n]
n=n+1
pn[0,3]=px
pn[1,3]=py
pn[2,3]=pz
tetas=ur5newton(pn,sem)
sem=tetas.copy()
ax.cla()
setaxis(1000)
ur5(tetas[0],tetas[1],tetas[2],tetas[3],tetas[4],tetas[5])
valores1=tetas.copy()
valores1[1]=valores1[1]-90
valores1[2]=valores1[2]-90
valores1[4]=valores1[4]+90
valores1[4]=valores1[4]%360
robot.MoveJ(valores1)
dibujar()
def mrot2eangle(r):
teta=[0,0,0]
if r[0,2]==1 or r[0,2]==-1:
teta[0]=0
teta[1]=r[0,2]*90
teta[2]=np.degrees(np.arctan2(r[1,0],r[1,1]))
else:
teta[0]=np.degrees(np.arctan2(-r[1,2],r[2,2]))
teta[1]=np.degrees(np.arcsin(r[0,2]))
teta[2]=np.degrees(np.arctan2(-r[0,1],r[0,0]))
return teta
def minv(R):
r=np.zeros((4,4))
a=np.zeros((3,3))
p=np.zeros((3,1))
a[0,0]=R[0,0]
a[0,1]=R[0,1]
a[0,2]=R[0,2]
a[1,0]=R[1,0]
a[1,1]=R[1,1]
a[1,2]=R[1,2]
a[2,0]=R[2,0]
a[2,1]=R[2,1]
a[2,2]=R[2,2]
a=np.transpose(a)
r[0,0]=a[0,0]
r[0,1]=a[0,1]
r[0,2]=a[0,2]
r[1,0]=a[1,0]
r[1,1]=a[1,1]
r[1,2]=a[1,2]
r[2,0]=a[2,0]
r[2,1]=a[2,1]
r[2,2]=a[2,2]
a=-1*a
p[0,0]=R[0,3]
p[1,0]=R[1,3]
p[2,0]=R[2,3]
p1=np.dot(a,p)
r[0,3]=p1[0,0]
r[1,3]=p1[1,0]
r[2,3]=p1[2,0]
r[3,3]=1
return r
def sistemamovil(r,rango=1):
ux=r[0,0]
uy=r[1,0]
uz=r[2,0]
vx=r[0,1]
vy=r[1,1]
vz=r[2,1]
wx=r[0,2]
wy=r[1,2]
wz=r[2,2]
px=r[0,3]
py=r[1,3]
pz=r[2,3]
ax.plot3D([px,px+ux*rango],[py,py+uy*rango],[pz,pz+uz*rango],color='red') #Dibuja eje movil u
ax.plot3D([px,px+vx*rango],[py,py+vy*rango],[pz,pz+vz*rango],color='green') #Dibuja eje movil v
ax.plot3D([px,px+wx*rango],[py,py+wy*rango],[pz,pz+wz*rango],color='blue') #Dibuja eje movil w
def ppp(d1,d2,d3):
t0=np.eye(4)
t01=trasz(d1)@rotax(-90)
t12=trasz(d2)@rotax(-90)@rotay(90)
t23=trasz(d3)@rotaz(180)
t02=t01@t12
t03=t02@t23
sistemafijo()
sistemamovil(t01)
sistemamovil(t02)
sistemamovil(t03)
ax.plot3D([t0[0,3],t01[0,3]],[t0[1,3],t01[1,3]],[t0[2,3],t01[2,3]],color='red')
ax.plot3D([t01[0,3],t02[0,3]],[t01[1,3],t02[1,3]],[t01[2,3],t02[2,3]],color='red')
ax.plot3D([t02[0,3],t03[0,3]],[t02[1,3],t03[1,3]],[t02[2,3],t03[2,3]],color='red')
def rpp(t1,d2,d3):
t0=np.eye(4)
t01=rotaz(t1)
t12=trasz(d2)
t23=rotay(90)@trasz(d3)
t02=t01@t12
t03=t02@t23
sistemafijo()
sistemamovil(t01)
sistemamovil(t02)
sistemamovil(t03)
ax.plot3D([t0[0,3],t01[0,3]],[t0[1,3],t01[1,3]],[t0[2,3],t01[2,3]],color='red')
ax.plot3D([t01[0,3],t02[0,3]],[t01[1,3],t02[1,3]],[t01[2,3],t02[2,3]],color='red')
ax.plot3D([t02[0,3],t03[0,3]],[t02[1,3],t03[1,3]],[t02[2,3],t03[2,3]],color='red')
def rrp(t1,t2,d3):
t0=np.eye(4)
t01=rotaz(t1)
t12=trasz(5)@rotay(90)@rotaz(90)@rotaz(t2)
t23=rotay(90)@rotaz(-90)@trasz(d3)
t02=t01@t12
t03=t02@t23
sistemafijo()
sistemamovil(t01)
sistemamovil(t02)
sistemamovil(t03)
ax.plot3D([t0[0,3],t01[0,3]],[t0[1,3],t01[1,3]],[t0[2,3],t01[2,3]],color='red')
ax.plot3D([t01[0,3],t02[0,3]],[t01[1,3],t02[1,3]],[t01[2,3],t02[2,3]],color='red')
ax.plot3D([t02[0,3],t03[0,3]],[t02[1,3],t03[1,3]],[t02[2,3],t03[2,3]],color='red')
def rrr(t1,t2,t3):
t0=np.eye(4)
t01=rotaz(t1)
t12=trasz(4)@rotax(90)@rotaz(t2)
t23=trasx(4)@rotaz(t3)
t34=trasx(4)@rotay(90)@rotaz(-90)
t02=t01@t12
t03=t02@t23
t04=t03@t34
sistemafijo()
sistemamovil(t01)
sistemamovil(t02)
sistemamovil(t03)
sistemamovil(t04)
ax.plot3D([t0[0,3],t01[0,3]],[t0[1,3],t01[1,3]],[t0[2,3],t01[2,3]],color='red')
ax.plot3D([t01[0,3],t02[0,3]],[t01[1,3],t02[1,3]],[t01[2,3],t02[2,3]],color='red')
ax.plot3D([t02[0,3],t03[0,3]],[t02[1,3],t03[1,3]],[t02[2,3],t03[2,3]],color='red')
ax.plot3D([t03[0,3],t04[0,3]],[t03[1,3],t04[1,3]],[t03[2,3],t04[2,3]],color='red')
def scara(t1,t2,d3,t4):
t0=np.eye(4)
t01=rotaz(t1)@trasz(4)
t12=trasx(4)
t23=rotaz(t2)@trasz(-1)
t34=trasx(4)@rotax(180)@rotaz(-90)
t45=trasz(d3)
t56=rotaz(t4)@trasz(1)
t02=t01@t12
t03=t02@t23
t04=t03@t34
t05=t04@t45
t06=t05@t56
sistemafijo()
sistemamovil(t01)
sistemamovil(t02)
sistemamovil(t03)
sistemamovil(t04)
sistemamovil(t05)
sistemamovil(t06)
ax.plot3D([t0[0,3],t01[0,3]],[t0[1,3],t01[1,3]],[t0[2,3],t01[2,3]],color='red')
ax.plot3D([t01[0,3],t02[0,3]],[t01[1,3],t02[1,3]],[t01[2,3],t02[2,3]],color='red')
ax.plot3D([t02[0,3],t03[0,3]],[t02[1,3],t03[1,3]],[t02[2,3],t03[2,3]],color='red')
ax.plot3D([t03[0,3],t04[0,3]],[t03[1,3],t04[1,3]],[t03[2,3],t04[2,3]],color='red')
ax.plot3D([t04[0,3],t05[0,3]],[t04[1,3],t05[1,3]],[t04[2,3],t05[2,3]],color='red')
ax.plot3D([t05[0,3],t06[0,3]],[t05[1,3],t06[1,3]],[t05[2,3],t06[2,3]],color='red')
def cobras800(t1,t2,d3,t4):
t0=np.eye(4)
t01=rotaz(t1)@trasz(342)
t12=trasx(425)
t23=rotaz(t2)@trasz(56)
t34=trasx(375)
t45=trasz(-210)@trasz(d3)
t56=rotax(180)@rotaz(-180)@rotaz(t4)
t02=t01@t12
t03=t02@t23
t04=t03@t34
t05=t04@t45
t06=t05@t56
sistemafijo(100)
#sistemamovil(t01,100)
#sistemamovil(t02,100)
sistemamovil(t03,100)
#sistemamovil(t04,100)
#sistemamovil(t05,100)
sistemamovil(t06,100)
ax.plot3D([t0[0,3],t01[0,3]],[t0[1,3],t01[1,3]],[t0[2,3],t01[2,3]],color='red')
ax.plot3D([t01[0,3],t02[0,3]],[t01[1,3],t02[1,3]],[t01[2,3],t02[2,3]],color='red')
ax.plot3D([t02[0,3],t03[0,3]],[t02[1,3],t03[1,3]],[t02[2,3],t03[2,3]],color='red')
ax.plot3D([t03[0,3],t04[0,3]],[t03[1,3],t04[1,3]],[t03[2,3],t04[2,3]],color='red')
ax.plot3D([t04[0,3],t05[0,3]],[t04[1,3],t05[1,3]],[t04[2,3],t05[2,3]],color='red')
ax.plot3D([t05[0,3],t06[0,3]],[t05[1,3],t06[1,3]],[t05[2,3],t06[2,3]],color='red')
def ur5(t1,t2,t3,t4,t5,t6,mb=np.eye(4)):
t0=np.eye(4)@mb
t01=mb@rotaz(t1)@trasz(89.2)#
t12=trasy(-134.2)@rotax(90)@rotaz(t2)#
t23=trasy(425)
t34=trasz(-118.45)@rotaz(t3)#
t45=trasx(392.25)@rotaz(t4)#
t56=trasz(94.75)@rotax(-90)@rotaz(t5)#
t67=trasz(94.75)
t78=trasx(82.5)@rotay(90)@rotaz(-90)@rotaz(t6)#
t02=t01@t12
t03=t02@t23
t04=t03@t34
t05=t04@t45
t06=t05@t56
t07=t06@t67
t08=t07@t78
print(t08)
sistemamovil(t0,100)
#sistemamovil(t01,100)
sistemamovil(t02,100)
sistemamovil(t03,100)
#sistemamovil(t04,100)
sistemamovil(t05,100)
sistemamovil(t06,100)
sistemamovil(t07,100)
sistemamovil(t08,100)
ax.plot3D([t0[0,3],t01[0,3]],[t0[1,3],t01[1,3]],[t0[2,3],t01[2,3]],color='red')
ax.plot3D([t01[0,3],t02[0,3]],[t01[1,3],t02[1,3]],[t01[2,3],t02[2,3]],color='red')
ax.plot3D([t02[0,3],t03[0,3]],[t02[1,3],t03[1,3]],[t02[2,3],t03[2,3]],color='red')
ax.plot3D([t03[0,3],t04[0,3]],[t03[1,3],t04[1,3]],[t03[2,3],t04[2,3]],color='red')
ax.plot3D([t04[0,3],t05[0,3]],[t04[1,3],t05[1,3]],[t04[2,3],t05[2,3]],color='red')
ax.plot3D([t05[0,3],t06[0,3]],[t05[1,3],t06[1,3]],[t05[2,3],t06[2,3]],color='red')
ax.plot3D([t06[0,3],t07[0,3]],[t06[1,3],t07[1,3]],[t06[2,3],t07[2,3]],color='red')
ax.plot3D([t07[0,3],t08[0,3]],[t07[1,3],t08[1,3]],[t07[2,3],t08[2,3]],color='red')
def motoman(tb,t1a,t2a,t3a,t4a,t5a,t6a,t7a,t1b,t2b,t3b,t4b,t5b,t6b,t7b):
T0=np.eye(4)
Ti=rotaz(tb)
Ti1=Ti@trasz(8)
Ti2=Ti1@trasx(1.57)
Ti3=Ti2@trasy(2.5)@rotax(-90)
Tib3=Ti2@trasy(-2.5)@rotax(90)
T01=Ti3@rotaz(270)@rotaz(t1a)@trasz(.3);
T12=trasx(-1.09)
T23=trasz(2.5)
T34=trasx(1.09)@rotay(90)@rotaz(t2a)
T45=rotay(-90)@trasx(1.2)
T56=trasz(1)
T67=trasx(-1.2)
T78=trasz(0.45)@rotaz(t3a)
T89=trasz(2.225)
T910=trasx(1.04)
T1011=trasz(1.225)
T1112=trasx(-1.04)@rotay(90)@rotaz(t4a)
T1213=rotay(-90)@trasx(-0.98)
T1314=trasz(1.4)
T1415=trasx(0.98)
T1516=trasz(0.7)@rotaz(t5a)
T1617=trasz(0.7)
T1718=trasx(-0.86)
T1819=trasz(1.4)
T1920=trasx(0.86)@rotay(90)@rotaz(t6a)
T2021=rotay(-90)@trasx(0.8)
T2122=trasz(0.9)
T2223=trasx(-0.8)
T2324=trasz(0.9)@rotaz(90)@rotaz(t7a)
T02=T01@T12
T03=T02@T23
T04=T03@T34
T05=T04@T45
T06=T05@T56
T07=T06@T67
T08=T07@T78
T09=T08@T89
T10=T09@T910
T11=T10@T1011
T12=T11@T1112
T13=T12@T1213
T14=T13@T1314
T15=T14@T1415
T16=T15@T1516
T17=T16@T1617
T18=T17@T1718
T19=T18@T1819
T20=T19@T1920
T21=T20@T2021
T22=T21@T2122
T23=T22@T2223
T24=T23@T2324
print(T24)
Tb01=Tib3@rotaz(270)@rotaz(t1b)@trasz(0.3)
Tb12=trasx(-1.09)
Tb23=trasz(2.5)
TB34=trasx(1.09)@rotay(90)@rotaz(t2b)
Tb45=rotay(-90)@trasx(1.2)
Tb56=trasz(1)
Tb67=trasx(-1.2)
Tb78=trasz(0.45)@rotaz(t3b)
Tb89=trasz(2.225)
TB910=trasx(1.04)
Tb1011=trasz(1.225)
Tb1112=trasx(-1.04)@rotay(90)@rotaz(t4b)
Tb1213=rotay(-90)@trasx(-0.98)
Tb1314=trasz(1.4)
Tb1415=trasx(0.98)
Tb1516=trasz(0.7)@rotaz(t5b)
Tb1617=trasz(0.7)
Tb1718=trasx(-0.86)
Tb1819=trasz(1.4)
Tb1920=trasx(0.86)@rotay(90)@rotaz(t6b)
Tb2021=rotay(-90)@trasx(0.8)
Tb2122=trasz(0.9)
Tb2223=trasx(-0.8)
Tb2324=trasz(0.9)@rotaz(90)@rotaz(t7b)
Tb02=Tb01@Tb12
Tb03=Tb02@Tb23
Tb04=Tb03@TB34
Tb05=Tb04@Tb45
Tb06=Tb05@Tb56
Tb07=Tb06@Tb67
Tb08=Tb07@Tb78
Tb09=Tb08@Tb89
Tb10=Tb09@TB910
Tb11=Tb10@Tb1011
Tb12=Tb11@Tb1112
Tb13=Tb12@Tb1213
Tb14=Tb13@Tb1314
Tb15=Tb14@Tb1415
Tb16=Tb15@Tb1516
Tb17=Tb16@Tb1617
Tb18=Tb17@Tb1718
Tb19=Tb18@Tb1819
Tb20=Tb19@Tb1920
Tb21=Tb20@Tb2021
Tb22=Tb21@Tb2122
Tb23=Tb22@Tb2223
Tb24=Tb23@Tb2324
sistemafijo()
sistemamovil(T0)
sistemamovil(T01)
sistemamovil(T04)
sistemamovil(T08)
sistemamovil(T12)
sistemamovil(T16)
sistemamovil(T20)
sistemamovil(T24)
sistemamovil(T0)
sistemamovil(Tb01)
sistemamovil(Tb04)
sistemamovil(Tb08)
sistemamovil(Tb12)
sistemamovil(Tb16)
sistemamovil(Tb20)
sistemamovil(Tb24)
ax.plot3D([Ti[0,3], Ti1[0,3]], [Ti[1,3], Ti1[1,3]], [Ti[2,3], Ti1[2,3]],color='red')
ax.plot3D([Ti1[0,3], Ti2[0,3]], [Ti1[1,3], Ti2[1,3]], [Ti1[2,3], Ti2[2,3]],color='red')
ax.plot3D([Ti2[0,3], Ti3[0,3]], [Ti2[1,3], Ti3[1,3]], [Ti2[2,3], Ti3[2,3]],color='red')
ax.plot3D([Ti3[0,3], T01[0,3]], [Ti3[1,3], T01[1,3]], [Ti3[2,3], T01[2,3]],color='red')
ax.plot3D([T01[0,3], T02[0,3]], [T01[1,3], T02[1,3]], [T01[2,3], T02[2,3]],color='red')
ax.plot3D([T02[0,3], T03[0,3]], [T02[1,3], T03[1,3]], [T02[2,3], T03[2,3]],color='red')
ax.plot3D([T03[0,3], T04[0,3]], [T03[1,3], T04[1,3]], [T03[2,3], T04[2,3]],color='red')
ax.plot3D([T04[0,3], T05[0,3]], [T04[1,3], T05[1,3]], [T04[2,3], T05[2,3]],color='red')
ax.plot3D([T05[0,3], T06[0,3]], [T05[1,3], T06[1,3]], [T05[2,3], T06[2,3]],color='red')
ax.plot3D([T06[0,3], T07[0,3]], [T06[1,3], T07[1,3]], [T06[2,3], T07[2,3]],color='red')
ax.plot3D([T07[0,3], T08[0,3]], [T07[1,3], T08[1,3]], [T07[2,3], T08[2,3]],color='red')
ax.plot3D([T08[0,3], T09[0,3]], [T08[1,3], T09[1,3]], [T08[2,3], T09[2,3]],color='red')
ax.plot3D([T09[0,3], T10[0,3]], [T09[1,3], T10[1,3]], [T09[2,3], T10[2,3]],color='red')
ax.plot3D([T10[0,3], T11[0,3]], [T10[1,3], T11[1,3]], [T10[2,3], T11[2,3]],color='red')
ax.plot3D([T11[0,3], T12[0,3]], [T11[1,3], T12[1,3]], [T11[2,3], T12[2,3]],color='red')
ax.plot3D([T12[0,3], T13[0,3]], [T12[1,3], T13[1,3]], [T12[2,3], T13[2,3]],color='red')
ax.plot3D([T13[0,3], T14[0,3]], [T13[1,3], T14[1,3]], [T13[2,3], T14[2,3]],color='red')
ax.plot3D([T14[0,3], T15[0,3]], [T14[1,3], T15[1,3]], [T14[2,3], T15[2,3]],color='red')
ax.plot3D([T15[0,3], T16[0,3]], [T15[1,3], T16[1,3]], [T15[2,3], T16[2,3]],color='red')
ax.plot3D([T16[0,3], T17[0,3]], [T16[1,3], T17[1,3]], [T16[2,3], T17[2,3]],color='red')
ax.plot3D([T17[0,3], T18[0,3]], [T17[1,3], T18[1,3]], [T17[2,3], T18[2,3]],color='red')
ax.plot3D([T18[0,3], T19[0,3]], [T18[1,3], T19[1,3]], [T18[2,3], T19[2,3]],color='red')
ax.plot3D([T19[0,3], T20[0,3]], [T19[1,3], T20[1,3]], [T19[2,3], T20[2,3]],color='red')
ax.plot3D([T20[0,3], T21[0,3]], [T20[1,3], T21[1,3]], [T20[2,3], T21[2,3]],color='red')
ax.plot3D([T21[0,3], T22[0,3]], [T21[1,3], T22[1,3]], [T21[2,3], T22[2,3]],color='red')
ax.plot3D([T22[0,3], T23[0,3]], [T22[1,3], T23[1,3]], [T22[2,3], T23[2,3]],color='red')
ax.plot3D([T23[0,3], T24[0,3]], [T23[1,3], T24[1,3]], [T23[2,3], T24[2,3]],color='red')
ax.plot3D([Ti[0,3], Ti1[0,3]], [Ti[1,3], Ti1[1,3]], [Ti[2,3], Ti1[2,3]],color='red')
ax.plot3D([Ti1[0,3], Ti2[0,3]], [Ti1[1,3], Ti2[1,3]], [Ti1[2,3], Ti2[2,3]],color='red')
ax.plot3D([Ti2[0,3], Tib3[0,3]], [Ti2[1,3], Tib3[1,3]], [Ti2[2,3], Tib3[2,3]],color='red')
ax.plot3D([Tib3[0,3], Tb01[0,3]], [Tib3[1,3], Tb01[1,3]], [Tib3[2,3], Tb01[2,3]],color='red')
ax.plot3D([Tb01[0,3], Tb02[0,3]], [Tb01[1,3], Tb02[1,3]], [Tb01[2,3], Tb02[2,3]],color='red')
ax.plot3D([Tb02[0,3], Tb03[0,3]], [Tb02[1,3], Tb03[1,3]], [Tb02[2,3], Tb03[2,3]],color='red')
ax.plot3D([Tb03[0,3], Tb04[0,3]], [Tb03[1,3], Tb04[1,3]], [Tb03[2,3], Tb04[2,3]],color='red')
ax.plot3D([Tb04[0,3], Tb05[0,3]], [Tb04[1,3], Tb05[1,3]], [Tb04[2,3], Tb05[2,3]],color='red')
ax.plot3D([Tb05[0,3], Tb06[0,3]], [Tb05[1,3], Tb06[1,3]], [Tb05[2,3], Tb06[2,3]],color='red')
ax.plot3D([Tb06[0,3], Tb07[0,3]], [Tb06[1,3], Tb07[1,3]], [Tb06[2,3], Tb07[2,3]],color='red')
ax.plot3D([Tb07[0,3], Tb08[0,3]], [Tb07[1,3], Tb08[1,3]], [Tb07[2,3], Tb08[2,3]],color='red')
ax.plot3D([Tb08[0,3], Tb09[0,3]], [Tb08[1,3], Tb09[1,3]], [Tb08[2,3], Tb09[2,3]],color='red')
ax.plot3D([Tb09[0,3], Tb10[0,3]], [Tb09[1,3], Tb10[1,3]], [Tb09[2,3], Tb10[2,3]],color='red')
ax.plot3D([Tb10[0,3], Tb11[0,3]], [Tb10[1,3], Tb11[1,3]], [Tb10[2,3], Tb11[2,3]],color='red')
ax.plot3D([Tb11[0,3], Tb12[0,3]], [Tb11[1,3], Tb12[1,3]], [Tb11[2,3], Tb12[2,3]],color='red')
ax.plot3D([Tb12[0,3], Tb13[0,3]], [Tb12[1,3], Tb13[1,3]], [Tb12[2,3], Tb13[2,3]],color='red')
ax.plot3D([Tb13[0,3], Tb14[0,3]], [Tb13[1,3], Tb14[1,3]], [Tb13[2,3], Tb14[2,3]],color='red')
ax.plot3D([Tb14[0,3], Tb15[0,3]], [Tb14[1,3], Tb15[1,3]], [Tb14[2,3], Tb15[2,3]],color='red')
ax.plot3D([Tb15[0,3], Tb16[0,3]], [Tb15[1,3], Tb16[1,3]], [Tb15[2,3], Tb16[2,3]],color='red')
ax.plot3D([Tb16[0,3], Tb17[0,3]], [Tb16[1,3], Tb17[1,3]], [Tb16[2,3], Tb17[2,3]],color='red')
ax.plot3D([Tb17[0,3], Tb18[0,3]], [Tb17[1,3], Tb18[1,3]], [Tb17[2,3], Tb18[2,3]],color='red')
ax.plot3D([Tb18[0,3], Tb19[0,3]], [Tb18[1,3], Tb19[1,3]], [Tb18[2,3], Tb19[2,3]],color='red')
ax.plot3D([Tb19[0,3], Tb20[0,3]], [Tb19[1,3], Tb20[1,3]], [Tb19[2,3], Tb20[2,3]],color='red')
ax.plot3D([Tb20[0,3], Tb21[0,3]], [Tb20[1,3], Tb21[1,3]], [Tb20[2,3], Tb21[2,3]],color='red')
ax.plot3D([Tb21[0,3], Tb22[0,3]], [Tb21[1,3], Tb22[1,3]], [Tb21[2,3], Tb22[2,3]],color='red')
ax.plot3D([Tb22[0,3], Tb23[0,3]], [Tb22[1,3], Tb23[1,3]], [Tb22[2,3], Tb23[2,3]],color='red')
ax.plot3D([Tb23[0,3], Tb24[0,3]], [Tb23[1,3], Tb24[1,3]], [Tb23[2,3], Tb24[2,3]],color='red')
def accmotoman(tb,t1a,t2a,t3a,t4a,t5a,t6a,t7a,t1b,t2b,t3b,t4b,t5b,t6b,t7b):
T0=np.eye(4)
Ti=trasz(893.5)
Ti1=Ti@trasx(92.5)@rotaz(tb)
Ti2=Ti1@trasx(100)@trasz(306.5)
Ti3=Ti2@rotax(-90)@rotaz(-180)@rotaz(t1a)@trasz(265)
Tib3=Ti2@rotax(90)@rotaz(-180)@rotaz(t1b)@trasz(265)
T01=Ti3@rotax(-90)@rotaz(t2a)
T12=trasz(-80)
T23=trasy(-90)
T34=trasz(80)
T45=trasy(-90)@rotax(90)@rotaz(t3a)
T56=trasz(90)
T67=trasy(-80)
T78=trasz(90)
T89=trasy(80)@rotax(-90)@rotaz(t4a)
T910=trasz(80)
T1011=trasy(-90)
T1112=trasz(-80)
T1213=trasy(-90)@rotax(90)@rotaz(t5a)
T1314=trasz(90)
T1415=trasy(80)
T1516=trasz(90)
T1617=trasy(-80)@rotax(-90)@rotaz(t6a)
T1718=trasz(-80)
T1819=trasy(-87.5)
T1920=trasz(80)
T2021=trasy(-87.5)@rotax(90)@rotaz(t7a)
T02=T01@T12
T03=T02@T23
T04=T03@T34
T05=T04@T45
T06=T05@T56
T07=T06@T67
T08=T07@T78
T09=T08@T89
T10=T09@T910
T11=T10@T1011
T12=T11@T1112
T13=T12@T1213
T14=T13@T1314
T15=T14@T1415
T16=T15@T1516
T17=T16@T1617
T18=T17@T1718
T19=T18@T1819
T20=T19@T1920
T21=T20@T2021
print("derecho ++++++")
print(T21)
Tb01=Tib3@rotax(-90)@rotaz(t2b)
Tb12=trasz(-80)
Tb23=trasy(-90)
Tb34=trasz(80)
Tb45=trasy(-90)@rotax(90)@rotaz(t3b)
Tb56=trasz(90)
Tb67=trasy(-80)
Tb78=trasz(90)
Tb89=trasy(80)@rotax(-90)@rotaz(t4b)
Tb910=trasz(80)
Tb1011=trasy(-90)
Tb1112=trasz(-80)
Tb1213=trasy(-90)@rotax(90)@rotaz(t5b)
Tb1314=trasz(90)
Tb1415=trasy(80)
Tb1516=trasz(90)
Tb1617=trasy(-80)@rotax(-90)@rotaz(t6b)
Tb1718=trasz(-80)
Tb1819=trasy(-87.5)
Tb1920=trasz(80)
Tb2021=trasy(-87.5)@rotax(90)@rotaz(t7b)
Tb02=Tb01@Tb12
Tb03=Tb02@Tb23
Tb04=Tb03@Tb34
Tb05=Tb04@Tb45
Tb06=Tb05@Tb56
Tb07=Tb06@Tb67
Tb08=Tb07@Tb78
Tb09=Tb08@Tb89
Tb10=Tb09@Tb910
Tb11=Tb10@Tb1011
Tb12=Tb11@Tb1112
Tb13=Tb12@Tb1213
Tb14=Tb13@Tb1314
Tb15=Tb14@Tb1415
Tb16=Tb15@Tb1516
Tb17=Tb16@Tb1617
Tb18=Tb17@Tb1718
Tb19=Tb18@Tb1819
Tb20=Tb19@Tb1920
Tb21=Tb20@Tb2021
print("Izquierdo ++++++")
print(Tb21)
sistemafijo(100)
sistemamovil(T0,100)
sistemamovil(T01,100)
sistemamovil(T05,100)
sistemamovil(T09,100)
sistemamovil(T13,100)
sistemamovil(T17,100)
sistemamovil(T21,100)
sistemamovil(Tb01,100)
sistemamovil(Tb05,100)
sistemamovil(Tb09,100)
sistemamovil(Tb13,100)
sistemamovil(Tb17,100)
sistemamovil(Tb21,100)
ax.plot3D([T0[0,3], Ti[0,3]], [T0[1,3], Ti[1,3]], [T0[2,3], Ti[2,3]],color='red')
ax.plot3D([Ti[0,3], Ti1[0,3]], [Ti[1,3], Ti1[1,3]], [Ti[2,3], Ti1[2,3]],color='red')
ax.plot3D([Ti1[0,3], Ti2[0,3]], [Ti1[1,3], Ti2[1,3]], [Ti1[2,3], Ti2[2,3]],color='red')
ax.plot3D([Ti2[0,3], Ti3[0,3]], [Ti2[1,3], Ti3[1,3]], [Ti2[2,3], Ti3[2,3]],color='red')
ax.plot3D([Ti3[0,3], T01[0,3]], [Ti3[1,3], T01[1,3]], [Ti3[2,3], T01[2,3]],color='red')
ax.plot3D([T01[0,3], T02[0,3]], [T01[1,3], T02[1,3]], [T01[2,3], T02[2,3]],color='red')
ax.plot3D([T02[0,3], T03[0,3]], [T02[1,3], T03[1,3]], [T02[2,3], T03[2,3]],color='red')
ax.plot3D([T03[0,3], T04[0,3]], [T03[1,3], T04[1,3]], [T03[2,3], T04[2,3]],color='red')
ax.plot3D([T04[0,3], T05[0,3]], [T04[1,3], T05[1,3]], [T04[2,3], T05[2,3]],color='red')
ax.plot3D([T05[0,3], T06[0,3]], [T05[1,3], T06[1,3]], [T05[2,3], T06[2,3]],color='red')
ax.plot3D([T06[0,3], T07[0,3]], [T06[1,3], T07[1,3]], [T06[2,3], T07[2,3]],color='red')
ax.plot3D([T07[0,3], T08[0,3]], [T07[1,3], T08[1,3]], [T07[2,3], T08[2,3]],color='red')
ax.plot3D([T08[0,3], T09[0,3]], [T08[1,3], T09[1,3]], [T08[2,3], T09[2,3]],color='red')
ax.plot3D([T09[0,3], T10[0,3]], [T09[1,3], T10[1,3]], [T09[2,3], T10[2,3]],color='red')
ax.plot3D([T10[0,3], T11[0,3]], [T10[1,3], T11[1,3]], [T10[2,3], T11[2,3]],color='red')
ax.plot3D([T11[0,3], T12[0,3]], [T11[1,3], T12[1,3]], [T11[2,3], T12[2,3]],color='red')
ax.plot3D([T12[0,3], T13[0,3]], [T12[1,3], T13[1,3]], [T12[2,3], T13[2,3]],color='red')
ax.plot3D([T13[0,3], T14[0,3]], [T13[1,3], T14[1,3]], [T13[2,3], T14[2,3]],color='red')
ax.plot3D([T14[0,3], T15[0,3]], [T14[1,3], T15[1,3]], [T14[2,3], T15[2,3]],color='red')
ax.plot3D([T15[0,3], T16[0,3]], [T15[1,3], T16[1,3]], [T15[2,3], T16[2,3]],color='red')
ax.plot3D([T16[0,3], T17[0,3]], [T16[1,3], T17[1,3]], [T16[2,3], T17[2,3]],color='red')
ax.plot3D([T17[0,3], T18[0,3]], [T17[1,3], T18[1,3]], [T17[2,3], T18[2,3]],color='red')
ax.plot3D([T18[0,3], T19[0,3]], [T18[1,3], T19[1,3]], [T18[2,3], T19[2,3]],color='red')
ax.plot3D([T19[0,3], T20[0,3]], [T19[1,3], T20[1,3]], [T19[2,3], T20[2,3]],color='red')
ax.plot3D([T20[0,3], T21[0,3]], [T20[1,3], T21[1,3]], [T20[2,3], T21[2,3]],color='red')
ax.plot3D([Ti[0,3], Ti1[0,3]], [Ti[1,3], Ti1[1,3]], [Ti[2,3], Ti1[2,3]],color='red')
ax.plot3D([Ti1[0,3], Ti2[0,3]], [Ti1[1,3], Ti2[1,3]], [Ti1[2,3], Ti2[2,3]],color='red')
ax.plot3D([Ti2[0,3], Tib3[0,3]], [Ti2[1,3], Tib3[1,3]], [Ti2[2,3], Tib3[2,3]],color='red')
ax.plot3D([Tib3[0,3], Tb01[0,3]], [Tib3[1,3], Tb01[1,3]], [Tib3[2,3], Tb01[2,3]],color='red')
ax.plot3D([Tb01[0,3], Tb02[0,3]], [Tb01[1,3], Tb02[1,3]], [Tb01[2,3], Tb02[2,3]],color='red')
ax.plot3D([Tb02[0,3], Tb03[0,3]], [Tb02[1,3], Tb03[1,3]], [Tb02[2,3], Tb03[2,3]],color='red')
ax.plot3D([Tb03[0,3], Tb04[0,3]], [Tb03[1,3], Tb04[1,3]], [Tb03[2,3], Tb04[2,3]],color='red')
ax.plot3D([Tb04[0,3], Tb05[0,3]], [Tb04[1,3], Tb05[1,3]], [Tb04[2,3], Tb05[2,3]],color='red')
ax.plot3D([Tb05[0,3], Tb06[0,3]], [Tb05[1,3], Tb06[1,3]], [Tb05[2,3], Tb06[2,3]],color='red')
ax.plot3D([Tb06[0,3], Tb07[0,3]], [Tb06[1,3], Tb07[1,3]], [Tb06[2,3], Tb07[2,3]],color='red')
ax.plot3D([Tb07[0,3], Tb08[0,3]], [Tb07[1,3], Tb08[1,3]], [Tb07[2,3], Tb08[2,3]],color='red')
ax.plot3D([Tb08[0,3], Tb09[0,3]], [Tb08[1,3], Tb09[1,3]], [Tb08[2,3], Tb09[2,3]],color='red')
ax.plot3D([Tb09[0,3], Tb10[0,3]], [Tb09[1,3], Tb10[1,3]], [Tb09[2,3], Tb10[2,3]],color='red')
ax.plot3D([Tb10[0,3], Tb11[0,3]], [Tb10[1,3], Tb11[1,3]], [Tb10[2,3], Tb11[2,3]],color='red')
ax.plot3D([Tb11[0,3], Tb12[0,3]], [Tb11[1,3], Tb12[1,3]], [Tb11[2,3], Tb12[2,3]],color='red')
ax.plot3D([Tb12[0,3], Tb13[0,3]], [Tb12[1,3], Tb13[1,3]], [Tb12[2,3], Tb13[2,3]],color='red')
ax.plot3D([Tb13[0,3], Tb14[0,3]], [Tb13[1,3], Tb14[1,3]], [Tb13[2,3], Tb14[2,3]],color='red')
ax.plot3D([Tb14[0,3], Tb15[0,3]], [Tb14[1,3], Tb15[1,3]], [Tb14[2,3], Tb15[2,3]],color='red')
ax.plot3D([Tb15[0,3], Tb16[0,3]], [Tb15[1,3], Tb16[1,3]], [Tb15[2,3], Tb16[2,3]],color='red')
ax.plot3D([Tb16[0,3], Tb17[0,3]], [Tb16[1,3], Tb17[1,3]], [Tb16[2,3], Tb17[2,3]],color='red')
ax.plot3D([Tb17[0,3], Tb18[0,3]], [Tb17[1,3], Tb18[1,3]], [Tb17[2,3], Tb18[2,3]],color='red')
ax.plot3D([Tb18[0,3], Tb19[0,3]], [Tb18[1,3], Tb19[1,3]], [Tb18[2,3], Tb19[2,3]],color='red')
ax.plot3D([Tb19[0,3], Tb20[0,3]], [Tb19[1,3], Tb20[1,3]], [Tb19[2,3], Tb20[2,3]],color='red')
ax.plot3D([Tb20[0,3], Tb21[0,3]], [Tb20[1,3], Tb21[1,3]], [Tb20[2,3], Tb21[2,3]],color='red')
def accmotomanv(tb,t1a,t2a,t3a,t4a,t5a,t6a,t7a,t1b,t2b,t3b,t4b,t5b,t6b,t7b,mh=np.eye(4)):
T0=np.eye(4)
Ti=trasz(893.5)
Ti1=Ti@trasx(92.5)@rotaz(tb)
Ti2=Ti1@trasx(100)@trasz(306.5)
Ti3=Ti2@rotax(-90)@rotaz(-180)@rotaz(t1a)@trasz(265)
Tib3=Ti2@rotax(90)@rotaz(-180)@rotaz(t1b)@trasz(265)
T01=Ti3@rotax(-90)@rotaz(t2a)
T12=trasz(-80)
T23=trasy(-90)
T34=trasz(80)
T45=trasy(-90)@rotax(90)@rotaz(t3a)
T56=trasz(90)
T67=trasy(-80)
T78=trasz(90)
T89=trasy(80)@rotax(-90)@rotaz(t4a)
T910=trasz(80)
T1011=trasy(-90)
T1112=trasz(-80)
T1213=trasy(-90)@rotax(90)@rotaz(t5a)
T1314=trasz(90)
T1415=trasy(80)
T1516=trasz(90)
T1617=trasy(-80)@rotax(-90)@rotaz(t6a)
T1718=trasz(-80)
T1819=trasy(-87.5)
T1920=trasz(80)
T2021=trasy(-87.5)@rotax(90)@rotaz(t7a)
T02=T01@T12
T03=T02@T23
T04=T03@T34
T05=T04@T45
T06=T05@T56
T07=T06@T67
T08=T07@T78
T09=T08@T89
T10=T09@T910
T11=T10@T1011
T12=T11@T1112
T13=T12@T1213
T14=T13@T1314
T15=T14@T1415
T16=T15@T1516
T17=T16@T1617
T18=T17@T1718
T19=T18@T1819
T20=T19@T1920
T21=T20@T2021
Tb01=Tib3@rotax(-90)@rotaz(t2b)
Tb12=trasz(-80)
Tb23=trasy(-90)
Tb34=trasz(80)
Tb45=trasy(-90)@rotax(90)@rotaz(t3b)
Tb56=trasz(90)
Tb67=trasy(-80)
Tb78=trasz(90)
Tb89=trasy(80)@rotax(-90)@rotaz(t4b)
Tb910=trasz(80)
Tb1011=trasy(-90)
Tb1112=trasz(-80)
Tb1213=trasy(-90)@rotax(90)@rotaz(t5b)
Tb1314=trasz(90)
Tb1415=trasy(80)
Tb1516=trasz(90)
Tb1617=trasy(-80)@rotax(-90)@rotaz(t6b)
Tb1718=trasz(-80)
Tb1819=trasy(-87.5)
Tb1920=trasz(80)
Tb2021=trasy(-87.5)@rotax(90)@rotaz(t7b)
Tb02=Tb01@Tb12
Tb03=Tb02@Tb23
Tb04=Tb03@Tb34
Tb05=Tb04@Tb45
Tb06=Tb05@Tb56
Tb07=Tb06@Tb67
Tb08=Tb07@Tb78
Tb09=Tb08@Tb89
Tb10=Tb09@Tb910
Tb11=Tb10@Tb1011
Tb12=Tb11@Tb1112
Tb13=Tb12@Tb1213
Tb14=Tb13@Tb1314
Tb15=Tb14@Tb1415
Tb16=Tb15@Tb1516
Tb17=Tb16@Tb1617
Tb18=Tb17@Tb1718
Tb19=Tb18@Tb1819
Tb20=Tb19@Tb1920
Tb21=Tb20@Tb2021
return T21, Tb21
def motomannewton(vd,vd2,sem,mh=np.eye(4)):
d=0.01
calc=True
j=np.zeros([24,15])
b=np.ones([24,1])
tb=sem[0]
t1=sem[1]
t2=sem[2]
t3=sem[3]
t4=sem[4]
t5=sem[5]
t6=sem[6]
t7=sem[7]
tb1=sem[8]
tb2=sem[9]
tb3=sem[10]
tb4=sem[11]
tb5=sem[12]
tb6=sem[13]
tb7=sem[14]
tbd=tb+d
t1d=t1+d
t2d=t2+d
t3d=t3+d
t4d=t4+d
t5d=t5+d
t6d=t6+d
t7d=t7+d
tb1d=tb1+d
tb2d=tb2+d
tb3d=tb3+d
tb4d=tb4+d
tb5d=tb5+d
tb6d=tb6+d
tb7d=tb7+d
con=0
while calc:
con=con+1
t,t2al=accmotomanv(tb,t1,t2,t3,t4,t5,t6,t7,tb1,tb2,tb3,tb4,tb5,tb6,tb7)
tn=t-vd
tn2=t2al-vd2
b[0,0]=tn[0,0]
b[1,0]=tn[0,1]
b[2,0]=tn[0,2]
b[3,0]=tn[0,3]
b[4,0]=tn[1,0]
b[5,0]=tn[1,1]
b[6,0]=tn[1,2]
b[7,0]=tn[1,3]
b[8,0]=tn[2,0]
b[9,0]=tn[2,1]
b[10,0]=tn[2,2]
b[11,0]=tn[2,3]
b[12,0]=tn2[0,0]
b[13,0]=tn2[0,1]
b[14,0]=tn2[0,2]
b[15,0]=tn2[0,3]
b[16,0]=tn2[1,0]
b[17,0]=tn2[1,1]
b[18,0]=tn2[1,2]
b[19,0]=tn2[1,3]
b[20,0]=tn2[2,0]
b[21,0]=tn2[2,1]
b[22,0]=tn2[2,2]
b[23,0]=tn2[2,3]
tv1=[t1d,t1,t1,t1,t1,t1,t1,t1,t1,t1,t1,t1,t1,t1,t1]
tv2=[t2,t2d,t2,t2,t2,t2,t2,t2,t2,t2,t2,t2,t2,t2,t2]
tv3=[t3,t3,t3d,t3,t3,t3,t3,t3,t3,t3,t3,t3,t3,t3,t3]
tv4=[t4,t4,t4,t4d,t4,t4,t4,t4,t4,t4,t4,t4,t4,t4,t4]
tv5=[t5,t5,t5,t5,t5d,t5,t5,t5,t5,t5,t5,t5,t5,t5,t5]
tv6=[t6,t6,t6,t6,t6,t6d,t6,t6,t6,t6,t6,t6,t6,t6,t6]
tv7=[t7,t7,t7,t7,t7,t7,t7d,t7,t7,t7,t7,t7,t7,t7,t7]
tvb1=[tb1,tb1,tb1,tb1,tb1,tb1,tb1,tb1d,tb1,tb1,tb1,tb1,tb1,tb1,tb1]
tvb2=[tb2,tb2,tb2,tb2,tb2,tb2,tb2,tb2,tb2d,tb2,tb2,tb2,tb2,tb2,tb2]
tvb3=[tb3,tb3,tb3,tb3,tb3,tb3,tb3,tb3,tb3,tb3d,tb3,tb3,tb3,tb3,tb3]
tvb4=[tb4,tb4,tb4,tb4,tb4,tb4,tb4,tb4,tb4,tb4,tb4d,tb4,tb4,tb4,tb4]
tvb5=[tb5,tb5,tb5,tb5,tb5,tb5,tb5,tb5,tb5,tb5,tb5,tb5d,tb5,tb5,tb5]
tvb6=[tb6,tb6,tb6,tb6,tb6,tb6,tb6,tb6,tb6,tb6,tb6,tb6,tb6d,tb6,tb6]
tvb7=[tb7,tb7,tb7,tb7,tb7,tb7,tb7,tb7,tb7,tb7,tb7,tb7,tb7,tb7d,tb7]
tvb=[tb,tb,tb,tb,tb,tb,tb,tb,tb,tb,tb,tb,tb,tb,tbd]
n=0
while n<15:
td,td2=accmotomanv(tvb[n],tv1[n],tv2[n],tv3[n],tv4[n],tv5[n],tv6[n],tv7[n],tvb1[n],tvb2[n],tvb3[n],tvb4[n],tvb5[n],tvb6[n],tvb7[n],mh)
tj=(td-t)/d
tj2=(td2-t2al)/d
j[0,n]=tj[0,0]
j[1,n]=tj[0,1]
j[2,n]=tj[0,2]
j[3,n]=tj[0,3]
j[4,n]=tj[1,0]
j[5,n]=tj[1,1]
j[6,n]=tj[1,2]
j[7,n]=tj[1,3]
j[8,n]=tj[2,0]
j[9,n]=tj[2,1]
j[10,n]=tj[2,2]
j[11,n]=tj[2,3]
j[12,n]=tj2[0,0]
j[13,n]=tj2[0,1]
j[14,n]=tj2[0,2]
j[15,n]=tj2[0,3]
j[16,n]=tj2[1,0]
j[17,n]=tj2[1,1]
j[18,n]=tj2[1,2]
j[19,n]=tj2[1,3]
j[20,n]=tj2[2,0]
j[21,n]=tj2[2,1]
j[22,n]=tj2[2,2]
j[23,n]=tj2[2,3]
n=n+1
R=np.linalg.pinv(j)@(-b)
t1=t1+R[0,0]
t2=t2+R[1,0]
t3=t3+R[2,0]
t4=t4+R[3,0]
t5=t5+R[4,0]
t6=t6+R[5,0]
t7=t6+R[6,0]
tb1=tb1+R[7,0]
tb2=tb2+R[8,0]
tb3=tb3+R[9,0]
tb4=tb4+R[10,0]
tb5=tb5+R[11,0]
tb6=tb6+R[12,0]
tb7=tb7+R[13,0]
tb=tb+R[14,0]
t1=t1%360
t2=t2%360
t3=t3%360
t4=t4%360
t5=t5%360
t6=t6%360
t7=t7%360
tb1=tb1%360
tb2=tb2%360
tb3=tb3%360
tb4=tb4%360
tb5=tb5%360
tb6=tb6%360
tb7=tb7%360
tb=tb%360
t1d=t1+d
t2d=t2+d
t3d=t3+d
t4d=t4+d
t5d=t5+d
t6d=t6+d
t7d=t7+d
tb1d=tb1+d
tb2d=tb2+d
tb3d=tb3+d
tb4d=tb4+d
tb5d=tb5+d
tb6d=tb6+d
tb7d=tb7+d
tbd=tb+d
if (abs(b[0,0])<0.0001 and abs(b[1,0])<0.0001 and abs(b[2,0])<0.0001 and abs(b[3,0])<0.0001 and abs(b[4,0])<0.0001 and abs(b[5,0])<0.0001 and abs(b[6,0])<0.0001 and abs(b[7,0])<0.0001 and abs(b[8,0])<0.0001 and abs(b[9,0])<0.0001 and abs(b[10,0])<0.0001 and abs(b[11,0])<0.0001 and abs(b[12,0])<0.0001 and abs(b[13,0])<0.0001 and abs(b[14,0])<0.0001 and abs(b[15,0])<0.0001 and abs(b[16,0])<0.0001 and abs(b[17,0])<0.0001 and abs(b[18,0])<0.0001 and abs(b[19,0])<0.0001 and abs(b[20,0])<0.0001 and abs(b[21,0])<0.0001 and abs(b[22,0])<0.0001 and abs(b[23,0])<0.0001):
calc=False
if con>100:
calc=False
t1=sem[0]
t2=sem[1]
t3=sem[2]
t4=sem[3]
t5=sem[4]
t6=sem[5]
t7=sem[6]
tb1=sem[7]
tb2=sem[8]
tb3=sem[9]
tb4=sem[10]
tb5=sem[11]
tb6=sem[12]
tb7=sem[13]
tb=sem[14]
tetas=[tb,t1,t2,t3,t4,t5,t6,t7,tb1,tb2,tb3,tb4,tb5,tb6,tb7]
print(con)
print(tetas)
return tetas
def accmotomanv2(tb,t1a,t2a,t3a,t4a,t5a,t6a,t7a,t1b,t2b,t3b,t4b,t5b,t6b,t7b,mh=np.eye(4)):
T0=np.eye(4)
Ti=trasz(893.5)
Ti1=Ti@trasx(92.5)@rotaz(tb)
Ti2=Ti1@trasx(100)@trasz(306.5)
Ti3=Ti2@rotax(-90)@rotaz(-180)@rotaz(t1a)@trasz(265)
Tib3=Ti2@rotax(90)@rotaz(-180)@rotaz(t1b)@trasz(265)
T01=Ti3@rotax(-90)@rotaz(t2a)
T12=trasz(-80)
T23=trasy(-90)
T34=trasz(80)
T45=trasy(-90)@rotax(90)@rotaz(t3a)
T56=trasz(90)
T67=trasy(-80)
T78=trasz(90)
T89=trasy(80)@rotax(-90)@rotaz(t4a)
T910=trasz(80)
T1011=trasy(-90)
T1112=trasz(-80)
T1213=trasy(-90)@rotax(90)@rotaz(t5a)
T1314=trasz(90)
T1415=trasy(80)
T1516=trasz(90)
T1617=trasy(-80)@rotax(-90)@rotaz(t6a)
T1718=trasz(-80)
T1819=trasy(-87.5)
T1920=trasz(80)
T2021=trasy(-87.5)@rotax(90)@rotaz(t7a)
T02=T01@T12
T03=T02@T23
T04=T03@T34
T05=T04@T45
T06=T05@T56
T07=T06@T67
T08=T07@T78
T09=T08@T89
T10=T09@T910
T11=T10@T1011
T12=T11@T1112
T13=T12@T1213
T14=T13@T1314
T15=T14@T1415
T16=T15@T1516
T17=T16@T1617
T18=T17@T1718
T19=T18@T1819
T20=T19@T1920
T21=T20@T2021
Tb01=Tib3@rotax(-90)@rotaz(t2b)
Tb12=trasz(-80)
Tb23=trasy(-90)
Tb34=trasz(80)
Tb45=trasy(-90)@rotax(90)@rotaz(t3b)
Tb56=trasz(90)
Tb67=trasy(-80)
Tb78=trasz(90)
Tb89=trasy(80)@rotax(-90)@rotaz(t4b)
Tb910=trasz(80)
Tb1011=trasy(-90)
Tb1112=trasz(-80)
Tb1213=trasy(-90)@rotax(90)@rotaz(t5b)
Tb1314=trasz(90)
Tb1415=trasy(80)
Tb1516=trasz(90)
Tb1617=trasy(-80)@rotax(-90)@rotaz(t6b)
Tb1718=trasz(-80)
Tb1819=trasy(-87.5)
Tb1920=trasz(80)
Tb2021=trasy(-87.5)@rotax(90)@rotaz(t7b)
Tb02=Tb01@Tb12
Tb03=Tb02@Tb23
Tb04=Tb03@Tb34
Tb05=Tb04@Tb45
Tb06=Tb05@Tb56
Tb07=Tb06@Tb67
Tb08=Tb07@Tb78
Tb09=Tb08@Tb89
Tb10=Tb09@Tb910
Tb11=Tb10@Tb1011
Tb12=Tb11@Tb1112
Tb13=Tb12@Tb1213
Tb14=Tb13@Tb1314
Tb15=Tb14@Tb1415
Tb16=Tb15@Tb1516
Tb17=Tb16@Tb1617
Tb18=Tb17@Tb1718
Tb19=Tb18@Tb1819
Tb20=Tb19@Tb1920
Tb21=Tb20@Tb2021
return T21, Tb21
def motomannewton2(vd,vd2,sem,mh=np.eye(4)):
d=10
calc=True
j=np.zeros([24,15])
b=np.ones([24,1])
tb=sem[0]
t1=sem[1]
t2=sem[2]
t3=sem[3]
t4=sem[4]
t5=sem[5]
t6=sem[6]
t7=sem[7]
tb1=sem[8]
tb2=sem[9]
tb3=sem[10]
tb4=sem[11]
tb5=sem[12]
tb6=sem[13]
tb7=sem[14]
tbd=tb+d
t1d=t1+d
t2d=t2+d
t3d=t3+d
t4d=t4+d
t5d=t5+d
t6d=t6+d
t7d=t7+d
tb1d=tb1+d
tb2d=tb2+d
tb3d=tb3+d
tb4d=tb4+d
tb5d=tb5+d
tb6d=tb6+d
tb7d=tb7+d
con=0
while calc:
con=con+1
t,t2al=accmotomanv(tb,t1,t2,t3,t4,t5,t6,t7,tb1,tb2,tb3,tb4,tb5,tb6,tb7)
#print(t)
#print(t2al)
#print(vd)
#print(vd2)
tn=t-vd
tn2=t2al-vd2
#print(tn)
#print(tn2)
b[0,0]=tn[0,0]
b[1,0]=tn[0,1]
b[2,0]=tn[0,2]
b[3,0]=tn[0,3]
b[4,0]=tn[1,0]
b[5,0]=tn[1,1]
b[6,0]=tn[1,2]
b[7,0]=tn[1,3]
b[8,0]=tn[2,0]
b[9,0]=tn[2,1]
b[10,0]=tn[2,2]
b[11,0]=tn[2,3]
b[12,0]=tn2[0,0]
b[13,0]=tn2[0,1]
b[14,0]=tn2[0,2]
b[15,0]=tn2[0,3]
b[16,0]=tn2[1,0]
b[17,0]=tn2[1,1]
b[18,0]=tn2[1,2]
b[19,0]=tn2[1,3]
b[20,0]=tn2[2,0]
b[21,0]=tn2[2,1]
b[22,0]=tn2[2,2]
b[23,0]=tn2[2,3]
#print(np.around(b,4))
tv1=[t1d,t1,t1,t1,t1,t1,t1,t1,t1,t1,t1,t1,t1,t1,t1]
tv2=[t2,t2d,t2,t2,t2,t2,t2,t2,t2,t2,t2,t2,t2,t2,t2]
tv3=[t3,t3,t3d,t3,t3,t3,t3,t3,t3,t3,t3,t3,t3,t3,t3]
tv4=[t4,t4,t4,t4d,t4,t4,t4,t4,t4,t4,t4,t4,t4,t4,t4]
tv5=[t5,t5,t5,t5,t5d,t5,t5,t5,t5,t5,t5,t5,t5,t5,t5]
tv6=[t6,t6,t6,t6,t6,t6d,t6,t6,t6,t6,t6,t6,t6,t6,t6]
tv7=[t7,t7,t7,t7,t7,t7,t7d,t7,t7,t7,t7,t7,t7,t7,t7]
tvb1=[tb1,tb1,tb1,tb1,tb1,tb1,tb1,tb1d,tb1,tb1,tb1,tb1,tb1,tb1,tb1]
tvb2=[tb2,tb2,tb2,tb2,tb2,tb2,tb2,tb2,tb2d,tb2,tb2,tb2,tb2,tb2,tb2]
tvb3=[tb3,tb3,tb3,tb3,tb3,tb3,tb3,tb3,tb3,tb3d,tb3,tb3,tb3,tb3,tb3]
tvb4=[tb4,tb4,tb4,tb4,tb4,tb4,tb4,tb4,tb4,tb4,tb4d,tb4,tb4,tb4,tb4]
tvb5=[tb5,tb5,tb5,tb5,tb5,tb5,tb5,tb5,tb5,tb5,tb5,tb5d,tb5,tb5,tb5]
tvb6=[tb6,tb6,tb6,tb6,tb6,tb6,tb6,tb6,tb6,tb6,tb6,tb6,tb6d,tb6,tb6]
tvb7=[tb7,tb7,tb7,tb7,tb7,tb7,tb7,tb7,tb7,tb7,tb7,tb7,tb7,tb7d,tb7]
tvb=[tb,tb,tb,tb,tb,tb,tb,tb,tb,tb,tb,tb,tb,tb,tbd]
n=0
while n<15:
td,td2=accmotomanv(tvb[n],tv1[n],tv2[n],tv3[n],tv4[n],tv5[n],tv6[n],tv7[n],tvb1[n],tvb2[n],tvb3[n],tvb4[n],tvb5[n],tvb6[n],tvb7[n],mh)
tj=(td-t)/d
tj2=(td2-t2al)/d
j[0,n]=tj[0,0]
j[1,n]=tj[0,1]
j[2,n]=tj[0,2]
j[3,n]=tj[0,3]
j[4,n]=tj[1,0]
j[5,n]=tj[1,1]
j[6,n]=tj[1,2]
j[7,n]=tj[1,3]
j[8,n]=tj[2,0]
j[9,n]=tj[2,1]
j[10,n]=tj[2,2]
j[11,n]=tj[2,3]
j[12,n]=tj2[0,0]
j[13,n]=tj2[0,1]
j[14,n]=tj2[0,2]
j[15,n]=tj2[0,3]
j[16,n]=tj2[1,0]
j[17,n]=tj2[1,1]
j[18,n]=tj2[1,2]
j[19,n]=tj2[1,3]
j[20,n]=tj2[2,0]
j[21,n]=tj2[2,1]
j[22,n]=tj2[2,2]
j[23,n]=tj2[2,3]
n=n+1
R=np.linalg.pinv(j)@(-b)
#print(np.around(j,4))
#print(np.around(R,4))
t1=t1+R[0,0]
t2=t2+R[1,0]
t3=t3+R[2,0]
t4=t4+R[3,0]
t5=t5+R[4,0]
t6=t6+R[5,0]
t7=t7+R[6,0]
tb1=tb1+R[7,0]
tb2=tb2+R[8,0]
tb3=tb3+R[9,0]
tb4=tb4+R[10,0]
tb5=tb5+R[11,0]
tb6=tb6+R[12,0]
tb7=tb7+R[13,0]
tb=tb+R[14,0]
t1=t1%360
t2=t2%360
t3=t3%360
t4=t4%360
t5=t5%360
t6=t6%360
t7=t7%360
tb1=tb1%360
tb2=tb2%360
tb3=tb3%360
tb4=tb4%360
tb5=tb5%360
tb6=tb6%360
tb7=tb7%360
tb=tb%360
if tb>180:
tb=-(360-tb)
if tb<-180:
tb=360+tb
if t1>180:
t1=-(360-t1)
if t1<-180:
t1=360+t1
if t2>180:
t2=-(360-t2)
if t2<-180:
t2=360+t2
if t3>180:
t3=-(360-t3)
if t3<-180:
t3=360+t3
if t4>180:
t4=-(360-t4)
if t4<-180:
t4=360+t4
if t5>180:
t5=-(360-t5)
if t5<-180:
t5=360+t5
if t6>180:
t6=-(360-t6)
if t6<-180:
t6=360+t6
if t7>180:
t7=-(360-t7)
if t7<-180:
t7=360+t7
if tb1>180:
tb1=-(360-tb1)
if tb1<-180:
tb1=360+tb1
if tb2>180:
tb2=-(360-tb2)
if tb2<-180:
tb2=360+tb2
if tb3>180:
tb3=-(360-tb3)
if tb3<-180:
tb3=360+tb3
if tb4>180:
tb4=-(360-tb4)
if tb4<-180:
tb4=360+tb4
if tb5>180:
tb5=-(360-tb5)
if tb5<-180:
tb5=360+tb5
if tb6>180:
tb6=-(360-tb6)
if tb6<-180:
tb6=360+tb6
if tb7>180:
tb7=-(360-tb7)
if tb7<-180:
tb7=360+tb7
t1d=t1+d
t2d=t2+d
t3d=t3+d
t4d=t4+d
t5d=t5+d
t6d=t6+d
t7d=t7+d
tb1d=tb1+d
tb2d=tb2+d
tb3d=tb3+d
tb4d=tb4+d
tb5d=tb5+d
tb6d=tb6+d
tb7d=tb7+d
tbd=tb+d
if (abs(b[0,0])<0.0001 and abs(b[1,0])<0.0001 and abs(b[2,0])<0.0001 and abs(b[3,0])<0.0001 and abs(b[4,0])<0.0001 and abs(b[5,0])<0.0001 and abs(b[6,0])<0.0001 and abs(b[7,0])<0.0001 and abs(b[8,0])<0.0001 and abs(b[9,0])<0.0001 and abs(b[10,0])<0.0001 and abs(b[11,0])<0.0001 and abs(b[12,0])<0.0001 and abs(b[13,0])<0.0001 and abs(b[14,0])<0.0001 and abs(b[15,0])<0.0001 and abs(b[16,0])<0.0001 and abs(b[17,0])<0.0001 and abs(b[18,0])<0.0001 and abs(b[19,0])<0.0001 and abs(b[20,0])<0.0001 and abs(b[21,0])<0.0001 and abs(b[22,0])<0.0001 and abs(b[23,0])<0.0001):
calc=False
if con>100:
calc=False
t1=sem[1]
t2=sem[2]
t3=sem[3]
t4=sem[4]
t5=sem[5]
t6=sem[6]
t7=sem[7]
tb1=sem[8]
tb2=sem[9]
tb3=sem[10]
tb4=sem[11]
tb5=sem[12]
tb6=sem[13]
tb7=sem[14]
tb=sem[0]
tetas=[tb,t1,t2,t3,t4,t5,t6,t7,tb1,tb2,tb3,tb4,tb5,tb6,tb7]
print(con)
print(tetas)
return tetas
def motomanmovej(p1,p2):
n=1
y=0
while y<15:
if p1[y]>180:
p1[y]=-(360-p1[y])
if p1[y]<-180:
p1[y]=360+p1[y]
y=y+1
y=0
while y<15:
if p2[y]>180:
p2[y]=-(360-p2[y])
if p2[y]<-180:
p2[y]=360+p2[y]
y=y+1
tetas=[p1[0],p1[1],p1[2],p1[3],p1[4],p1[5],p1[6],p1[7],p1[8],p1[9],p1[10],p1[11],p1[12],p1[13],p1[14]]
paso=[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
paso[0]=(p2[0]-p1[0])/30
paso[1]=(p2[1]-p1[1])/30
paso[2]=(p2[2]-p1[2])/30
paso[3]=(p2[3]-p1[3])/30
paso[4]=(p2[4]-p1[4])/30
paso[5]=(p2[5]-p1[5])/30
paso[6]=(p2[6]-p1[6])/30
paso[7]=(p2[7]-p1[7])/30
paso[8]=(p2[8]-p1[8])/30
paso[9]=(p2[9]-p1[9])/30
paso[10]=(p2[10]-p1[10])/30
paso[11]=(p2[11]-p1[11])/30
paso[12]=(p2[12]-p1[12])/30
paso[13]=(p2[13]-p1[13])/30
paso[14]=(p2[14]-p1[14])/30
while n<31:
tetas[0]=tetas[0]+paso[0]
tetas[1]=tetas[1]+paso[1]
tetas[2]=tetas[2]+paso[2]
tetas[3]=tetas[3]+paso[3]
tetas[4]=tetas[4]+paso[4]
tetas[5]=tetas[5]+paso[5]
tetas[6]=tetas[6]+paso[6]
tetas[7]=tetas[7]+paso[7]
tetas[8]=tetas[8]+paso[8]
tetas[9]=tetas[9]+paso[9]
tetas[10]=tetas[10]+paso[10]
tetas[11]=tetas[11]+paso[11]
tetas[12]=tetas[12]+paso[12]
tetas[13]=tetas[13]+paso[13]
tetas[14]=tetas[14]+paso[14]
n=n+1
ax.cla()
setaxis(1000)
#print(tetar)
accmotoman(tetas[0],tetas[1],tetas[2],tetas[3],tetas[4],tetas[5],tetas[6],tetas[7],tetas[8],tetas[9],tetas[10],tetas[11],tetas[12],tetas[13],tetas[14])
dibujar()
return tetas
def motomanmovel(tetas,tetas2):
con=0
n=0
sem=tetas.copy()
mtr1,mtr2=accmotomanv(tetas[0],tetas[1],tetas[2],tetas[3],tetas[4],tetas[5],tetas[6],tetas[7],tetas[8],tetas[9],tetas[10],tetas[11],tetas[12],tetas[13],tetas[14])
mtr12,mtr22=accmotomanv(tetas2[0],tetas2[1],tetas2[2],tetas2[3],tetas2[4],tetas2[5],tetas2[6],tetas2[7],tetas2[8],tetas2[9],tetas2[10],tetas2[11],tetas2[12],tetas2[13],tetas2[14])
pn=mtr1
pn2=mtr2
p1xd=mtr1[0,3]
p1yd=mtr1[1,3]
p1zd=mtr1[2,3]
p1xi=mtr2[0,3]
p1yi=mtr2[1,3]
p1zi=mtr2[2,3]
p2xd=mtr12[0,3]
p2yd=mtr12[1,3]
p2zd=mtr12[2,3]
p2xi=mtr22[0,3]
p2yi=mtr22[1,3]
p2zi=mtr22[2,3]
dxd=p2xd-p1xd
dyd=p2yd-p1yd
dzd=p2zd-p1zd
dxi=p2xi-p1xi
dyi=p2yi-p1yi
dzi=p2zi-p1zi
angrot1=obtangrot(mtr12,mtr1)
angrot1[0]=angrot1[0]/30
angrot1[1]=angrot1[1]/30
angrot1[2]=angrot1[2]/30
angroti1=[0,0,0]
angrot2=obtangrot(mtr22,mtr2)
angrot2[0]=angrot2[0]/30
angrot2[1]=angrot2[1]/30
angrot2[2]=angrot2[2]/30
angroti2=[0,0,0]
while n<1:
pn=mtr1@rotaz(angroti1[2])@rotay(angroti1[1])@rotax(angroti1[0])
angroti1[0]=angroti1[0]+angrot1[0]
angroti1[1]=angroti1[1]+angrot1[1]
angroti1[2]=angroti1[2]+angrot1[2]
xd=p1xd+dxd*n
yd=p1yd+dyd*n
zd=p1zd+dzd*n
pn2=mtr1@rotaz(angroti2[2])@rotay(angroti2[1])@rotax(angroti2[0])
angroti2[0]=angroti2[0]+angrot2[0]
angroti2[1]=angroti2[1]+angrot2[1]
angroti2[2]=angroti2[2]+angrot2[2]
xi=p1xi+dxi*n
yi=p1yi+dyi*n
zi=p1zi+dzi*n
n+=1/30
con+=1
pn[0,3]=xd
pn[1,3]=yd
pn[2,3]=zd
pn2[0,3]=xi
pn2[1,3]=yi
pn2[2,3]=zi
tetas=motomannewton2(pn,pn2,sem)
sem=[tetas[0],tetas[1],tetas[2],tetas[3],tetas[4],tetas[5],tetas[6],tetas[7],tetas[8],tetas[9],tetas[10],tetas[11],tetas[12],tetas[13],tetas[14]]
ax.cla()
setaxis(1000)
accmotoman(tetas[0],tetas[1],tetas[2],tetas[3],tetas[4],tetas[5],tetas[6],tetas[7],tetas[8],tetas[9],tetas[10],tetas[11],tetas[12],tetas[13],tetas[14])
dibujar()
return tetas
def IRB1600(t1,t2,t3,t4,t5,t6):
t0=np.eye(4)
t01=rotaz(t1)#
t12=trasz(486.5)@rotax(-90)#
t23=trasx(150)@rotaz(t2)
t34=trasy(-700)@rotaz(90)@rotaz(t3)#
t45=trasx(-110)@rotax(90)@rotaz(t4)#
t56=trasz(640)@rotax(-90)@rotaz(t5)#
t67=trasy(-150)@rotax(90)@rotaz(t6)
t78=trasz(50)#
t02=t01@t12
t03=t02@t23
t04=t03@t34
t05=t04@t45
t06=t05@t56
t07=t06@t67
t08=t07@t78
sistemafijo(100)
sistemamovil(t01,100)
#sistemamovil(t02,100)
sistemamovil(t03,100)
sistemamovil(t04,100)
sistemamovil(t05,100)
sistemamovil(t06,100)
sistemamovil(t07,100)
#sistemamovil(t08,100)
ax.plot3D([t0[0,3],t01[0,3]],[t0[1,3],t01[1,3]],[t0[2,3],t01[2,3]],color='red')
ax.plot3D([t01[0,3],t02[0,3]],[t01[1,3],t02[1,3]],[t01[2,3],t02[2,3]],color='red')
ax.plot3D([t02[0,3],t03[0,3]],[t02[1,3],t03[1,3]],[t02[2,3],t03[2,3]],color='red')
ax.plot3D([t03[0,3],t04[0,3]],[t03[1,3],t04[1,3]],[t03[2,3],t04[2,3]],color='red')
ax.plot3D([t04[0,3],t05[0,3]],[t04[1,3],t05[1,3]],[t04[2,3],t05[2,3]],color='red')
ax.plot3D([t05[0,3],t06[0,3]],[t05[1,3],t06[1,3]],[t05[2,3],t06[2,3]],color='red')
ax.plot3D([t06[0,3],t07[0,3]],[t06[1,3],t07[1,3]],[t06[2,3],t07[2,3]],color='red')
ax.plot3D([t07[0,3],t08[0,3]],[t07[1,3],t08[1,3]],[t07[2,3],t08[2,3]],color='red')
return(t08)
def IRB1600v(t1,t2,t3,t4,t5,t6,mh=np.eye(4)):
t0=np.eye(4)
t01=rotaz(t1)#
t12=trasz(486.5)@rotax(-90)#
t23=trasx(150)@rotaz(t2)
t34=trasy(-700)@rotaz(90)@rotaz(t3)#
t45=trasx(-110)@rotax(90)@rotaz(t4)#
t56=trasz(640)@rotax(-90)@rotaz(t5)#
t67=trasy(-150)@rotax(90)@rotaz(t6)
t78=trasz(50)#
t02=t01@t12
t03=t02@t23
t04=t03@t34
t05=t04@t45
t06=t05@t56
t07=t06@t67
t08=t07@t78
t08=t08@mh
#print(t08)
return t08
def IRB1600newton(vd,sem,mh=np.eye(4)):
d=0.01
calc=True
j=np.zeros([12,6])
b=np.ones([12,1])
t1=sem[0]
t2=sem[1]
t3=sem[2]
t4=sem[3]
t5=sem[4]
t6=sem[5]
t1d=t1+d
t2d=t2+d
t3d=t3+d
t4d=t4+d
t5d=t5+d
t6d=t6+d
con=0
while calc:
con=con+1
t=IRB1600v(t1,t2,t3,t4,t5,t6,mh)
tn=t-vd
b[0,0]=tn[0,0]
b[1,0]=tn[0,1]
b[2,0]=tn[0,2]
b[3,0]=tn[0,3]
b[4,0]=tn[1,0]
b[5,0]=tn[1,1]
b[6,0]=tn[1,2]
b[7,0]=tn[1,3]
b[8,0]=tn[2,0]
b[9,0]=tn[2,1]
b[10,0]=tn[2,2]
b[11,0]=tn[2,3]
tv1=[t1d,t1,t1,t1,t1,t1]
tv2=[t2,t2d,t2,t2,t2,t2]
tv3=[t3,t3,t3d,t3,t3,t3]
tv4=[t4,t4,t4,t4d,t4,t4]
tv5=[t5,t5,t5,t5,t5d,t5]
tv6=[t6,t6,t6,t6,t6,t6d]
n=0
while n<6:
td=IRB1600v(tv1[n],tv2[n],tv3[n],tv4[n],tv5[n],tv6[n],mh)
tj=(td-t)/d
j[0,n]=tj[0,0]
j[1,n]=tj[0,1]
j[2,n]=tj[0,2]
j[3,n]=tj[0,3]
j[4,n]=tj[1,0]
j[5,n]=tj[1,1]
j[6,n]=tj[1,2]
j[7,n]=tj[1,3]
j[8,n]=tj[2,0]
j[9,n]=tj[2,1]
j[10,n]=tj[2,2]
j[11,n]=tj[2,3]
n=n+1
R=np.linalg.pinv(j)@(-b)
t1=t1+R[0,0]
t2=t2+R[1,0]
t3=t3+R[2,0]
t4=t4+R[3,0]
t5=t5+R[4,0]
t6=t6+R[5,0]
t1=t1%360
t2=t2%360
t3=t3%360
t4=t4%360
t5=t5%360
t6=t6%360
if t1>180:
t1=-(360-t1)
if t1<-180:
t1=360+t1
if t2>150:
t2=-(360-t2)
if t2<-90:
t2=360+t2
if t3>79:
t3=-(360-t3)
if t3<-238:
t3=360+t3
if t4>155:
t4=-(360-t4)
if t4<-155:
t4=360+t4
if t5>135:
t5=-(360-t5)
if t5<-90:
t5=360+t5
if t6>200:
t6=-(360-t6)
if t6<-200:
t6=360+t6
if t2>150 or t2<-90:
if (abs(b[0,0])<0.1 and abs(b[1,0])<0.1 and abs(b[2,0])<0.1 and abs(b[3,0])<0.1 and abs(b[4,0])<0.1 and abs(b[5,0])<0.1 and abs(b[6,0])<0.1 and abs(b[7,0])<0.1 and abs(b[8,0])<0.1 and abs(b[9,0])<0.1 and abs(b[10,0])<0.1 and abs(b[11,0])<0.1):
print("**********")
print("**********")
print("T2 fuera de limite")
print("**********")
print("**********")
t2=t2+100
b[0,0]=1
if t3>79 or t3<-238:
if (abs(b[0,0])<0.1 and abs(b[1,0])<0.1 and abs(b[2,0])<0.1 and abs(b[3,0])<0.1 and abs(b[4,0])<0.1 and abs(b[5,0])<0.1 and abs(b[6,0])<0.1 and abs(b[7,0])<0.1 and abs(b[8,0])<0.1 and abs(b[9,0])<0.1 and abs(b[10,0])<0.1 and abs(b[11,0])<0.1):
print("**********")
print("**********")
print("T3 fuera de limite")
print("**********")
print("**********")
t3=t3+100
b[0,0]=1
if t4>155 or t4<-155:
if (abs(b[0,0])<0.1 and abs(b[1,0])<0.1 and abs(b[2,0])<0.1 and abs(b[3,0])<0.1 and abs(b[4,0])<0.1 and abs(b[5,0])<0.1 and abs(b[6,0])<0.1 and abs(b[7,0])<0.1 and abs(b[8,0])<0.1 and abs(b[9,0])<0.1 and abs(b[10,0])<0.1 and abs(b[11,0])<0.1):
print("**********")
print("**********")
print("T4 fuera de limite")
print("**********")
print("**********")
t4=t4+100
b[0,0]=1
print()
if t5>135 or t5<-90:
if (abs(b[0,0])<0.1 and abs(b[1,0])<0.1 and abs(b[2,0])<0.1 and abs(b[3,0])<0.1 and abs(b[4,0])<0.1 and abs(b[5,0])<0.1 and abs(b[6,0])<0.1 and abs(b[7,0])<0.1 and abs(b[8,0])<0.1 and abs(b[9,0])<0.1 and abs(b[10,0])<0.1 and abs(b[11,0])<0.1):
print("**********")
print("**********")
print("T5 fuera de limite")
print("**********")
print("**********")
t5=t5+100
b[0,0]=1
t1d=t1+d
t2d=t2+d
t3d=t3+d
t4d=t4+d
t5d=t5+d
t6d=t6+d
if (abs(b[0,0])<0.0001 and abs(b[1,0])<0.0001 and abs(b[2,0])<0.0001 and abs(b[3,0])<0.0001 and abs(b[4,0])<0.0001 and abs(b[5,0])<0.0001 and abs(b[6,0])<0.0001 and abs(b[7,0])<0.0001 and abs(b[8,0])<0.0001 and abs(b[9,0])<0.0001 and abs(b[10,0])<0.0001 and abs(b[11,0])<0.0001):
calc=False
if con>100:
calc=False
t1=sem[0]
t2=sem[1]
t3=sem[2]
t4=sem[3]
t5=sem[4]
t6=sem[5]
tetas=[t1,t2,t3,t4,t5,t6]
print(con)
print(tetas)
return tetas
def ur5v(t1,t2,t3,t4,t5,t6,mh=np.eye(4),mb=np.eye(4)):
t0=np.eye(4)
t01=mb@rotaz(t1)@trasz(89.2)#
t12=trasy(-134.2)@rotax(90)@rotaz(t2)#
t23=trasy(425)
t34=trasz(-118.45)@rotaz(t3)#
t45=trasx(392.25)@rotaz(t4)#
t56=trasz(94.75)@rotax(-90)@rotaz(t5)#
t67=trasz(94.75)
t78=trasx(82.5)@rotay(90)@rotaz(-90)@rotaz(t6)#
t02=t01@t12
t03=t02@t23
t04=t03@t34
t05=t04@t45
t06=t05@t56
t07=t06@t67
t08=t07@t78
t08=t08@mh
return t08
def ur5newton(vd,sem,mh=np.eye(4),mb=np.eye(4)):
d=0.01
calc=True
j=np.zeros([12,6])
b=np.ones([12,1])
t1=sem[0]
t2=sem[1]
t3=sem[2]
t4=sem[3]
t5=sem[4]
t6=sem[5]
t1d=t1+d
t2d=t2+d
t3d=t3+d
t4d=t4+d
t5d=t5+d
t6d=t6+d
con=0
while calc:
con=con+1
t=ur5v(t1,t2,t3,t4,t5,t6,mh,mb)
tn=t-vd
b[0,0]=tn[0,0]
b[1,0]=tn[0,1]
b[2,0]=tn[0,2]
b[3,0]=tn[0,3]
b[4,0]=tn[1,0]
b[5,0]=tn[1,1]
b[6,0]=tn[1,2]
b[7,0]=tn[1,3]
b[8,0]=tn[2,0]
b[9,0]=tn[2,1]
b[10,0]=tn[2,2]
b[11,0]=tn[2,3]
tv1=[t1d,t1,t1,t1,t1,t1]
tv2=[t2,t2d,t2,t2,t2,t2]
tv3=[t3,t3,t3d,t3,t3,t3]
tv4=[t4,t4,t4,t4d,t4,t4]
tv5=[t5,t5,t5,t5,t5d,t5]
tv6=[t6,t6,t6,t6,t6,t6d]
n=0
while n<6:
td=ur5v(tv1[n],tv2[n],tv3[n],tv4[n],tv5[n],tv6[n],mh,mb)
tj=(td-t)/d
j[0,n]=tj[0,0]
j[1,n]=tj[0,1]
j[2,n]=tj[0,2]
j[3,n]=tj[0,3]
j[4,n]=tj[1,0]
j[5,n]=tj[1,1]
j[6,n]=tj[1,2]
j[7,n]=tj[1,3]
j[8,n]=tj[2,0]
j[9,n]=tj[2,1]
j[10,n]=tj[2,2]
j[11,n]=tj[2,3]
n=n+1
R=np.linalg.pinv(j)@(-b)
t1=t1+R[0,0]
t2=t2+R[1,0]
t3=t3+R[2,0]
t4=t4+R[3,0]
t5=t5+R[4,0]
t6=t6+R[5,0]
t1=t1%360
t2=t2%360
t3=t3%360
t4=t4%360
t5=t5%360
t6=t6%360
if t1>180:
t1=-(360-t1)
if t1<-180:
t1=360+t1
if t2>180:
t2=-(360-t2)
if t2<-180:
t2=360+t2
if t3>180:
t3=-(360-t3)
if t3<-180:
t3=360+t3
if t4>180:
t4=-(360-t4)
if t4<-180:
t4=360+t4
if t5>180:
t5=-(360-t5)
if t5<-180:
t5=360+t5
if t6>180:
t6=-(360-t6)
if t6<-180:
t6=360+t6
t1d=t1+d
t2d=t2+d
t3d=t3+d
t4d=t4+d
t5d=t5+d
t6d=t6+d
if (abs(b[0,0])<0.0001 and abs(b[1,0])<0.0001 and abs(b[2,0])<0.0001 and abs(b[3,0])<0.0001 and abs(b[4,0])<0.0001 and abs(b[5,0])<0.0001 and abs(b[6,0])<0.0001 and abs(b[7,0])<0.0001 and abs(b[8,0])<0.0001 and abs(b[9,0])<0.0001 and abs(b[10,0])<0.0001 and abs(b[11,0])<0.0001):
calc=False
if con>100:
calc=False
t1=sem[0]
t2=sem[1]
t3=sem[2]
t4=sem[3]
t5=sem[4]
t6=sem[5]
tetas=[t1,t2,t3,t4,t5,t6]
print(con)
print(tetas)
return tetas
def obtangrotv(t1,t2,t3,p1ea):
#t0=rotax(p1ea[0])@rotay(p1ea[1])@rotaz(p1ea[2])
t=p1ea@rotaz(t3)@rotay(t2)@rotax(t1)
return t
def obtangrot(p2,p1ea):
d=0.01
calc=True
j=np.zeros([9,3])
b=np.ones([9,1])
t1=0
t2=0
t3=0
t1d=t1+d
t2d=t2+d
t3d=t3+d
con=0
while calc:
con=con+1
t=obtangrotv(t1,t2,t3,p1ea)
tn=t-p2
b[0,0]=tn[0,0]
b[1,0]=tn[0,1]
b[2,0]=tn[0,2]
b[3,0]=tn[1,0]
b[4,0]=tn[1,1]
b[5,0]=tn[1,2]
b[6,0]=tn[2,0]
b[7,0]=tn[2,1]
b[8,0]=tn[2,2]
tv1=[t1d,t1,t1]
tv2=[t2,t2d,t2]
tv3=[t3,t3,t3d]
n=0
while n<3:
td=obtangrotv(tv1[n],tv2[n],tv3[n],p1ea)
tj=(td-t)/d
j[0,n]=tj[0,0]
j[1,n]=tj[0,1]
j[2,n]=tj[0,2]
j[3,n]=tj[1,0]
j[4,n]=tj[1,1]
j[5,n]=tj[1,2]
j[6,n]=tj[2,0]
j[7,n]=tj[2,1]
j[8,n]=tj[2,2]
n=n+1
R=np.linalg.pinv(j)@(-b)
t1=t1+R[0,0]
t2=t2+R[1,0]
t3=t3+R[2,0]
t1=t1%360
t2=t2%360
t3=t3%360
if t1>180:
t1=-(360-t1)
if t1<-180:
t1=360+t1
if t2>180:
t2=-(360-t2)
if t2<-180:
t2=360+t2
if t3>180:
t3=-(360-t3)
if t3<-180:
t3=360+t3
t1d=t1+d
t2d=t2+d
t3d=t3+d
if (abs(b[0,0])<0.0001 and abs(b[1,0])<0.0001 and abs(b[2,0])<0.0001 and abs(b[3,0])<0.0001 and abs(b[4,0])<0.0001 and abs(b[5,0])<0.0001 and abs(b[6,0])<0.0001 and abs(b[7,0])<0.0001 and abs(b[8,0])<0.0001):
calc=False
if con>100:
calc=False
t1=sem[0]
t2=sem[1]
t3=sem[2]
tetas=[t1,t2,t3]
print(con)
print(tetas)
return tetas
def herramienta1v(z=0,so=6.4):
t0=np.eye(4)
t01=rotaz(z)@trasz(100)#
t12=trasy(-39)#
t23=trasz(30)
t34=rotax(-6.21)@trasy(-79)#
t45=trasz(225)#
t56=rotax(-45)@trasz(111)#
t67=trasz(so)
t02=t01@t12
t03=t02@t23
t04=t03@t34
t05=t04@t45
t06=t05@t56
t07=t06@t67
return t07
def herramienta1(mr,z=0,so=6.4):
t0=np.eye(4)
t01=mr@rotaz(z)@trasz(100)#
t12=trasy(-39)#
t23=trasz(30)
t34=rotax(-6.21)@trasy(-79)#
t45=trasz(225)#
t56=rotax(-45)@trasz(111)#
t67=trasz(so)
t02=t01@t12
t03=t02@t23
t04=t03@t34
t05=t04@t45
t06=t05@t56
t07=t06@t67
sistemamovil(t01,100)
sistemamovil(t02,100)
sistemamovil(t03,100)
sistemamovil(t04,100)
sistemamovil(t05,100)
sistemamovil(t06,100)
sistemamovil(t07,100)
ax.plot3D([mr[0,3],t01[0,3]],[mr[1,3],t01[1,3]],[mr[2,3],t01[2,3]],color='red')
ax.plot3D([t01[0,3],t02[0,3]],[t01[1,3],t02[1,3]],[t01[2,3],t02[2,3]],color='red')
ax.plot3D([t02[0,3],t03[0,3]],[t02[1,3],t03[1,3]],[t02[2,3],t03[2,3]],color='red')
ax.plot3D([t03[0,3],t04[0,3]],[t03[1,3],t04[1,3]],[t03[2,3],t04[2,3]],color='red')
ax.plot3D([t04[0,3],t05[0,3]],[t04[1,3],t05[1,3]],[t04[2,3],t05[2,3]],color='red')
ax.plot3D([t05[0,3],t06[0,3]],[t05[1,3],t06[1,3]],[t05[2,3],t06[2,3]],color='red')
ax.plot3D([t06[0,3],t07[0,3]],[t06[1,3],t07[1,3]],[t06[2,3],t07[2,3]],color='red')
def animsistemamovilx(t):
n=0
while n<t:
ax.cla()
setaxis(-1,1,-1,1,-1,1)
r=rotax(n)
sistemafijo()
sistemamovil(r)
n=n+1
dibujar()
def animsistemamovily(t):
n=0
while n<t:
ax.cla()
setaxis(-1,1,-1,1,-1,1)
r=rotay(n)
sistemafijo()
sistemamovil(r)
n=n+1
dibujar()
def animsistemamovilz(t):
n=0
while n<t:
ax.cla()
setaxis()
r=rotaz(n)
sistemafijo()
sistemamovil(r)
n=n+1
dibujar()
def muevemoscax(t):
n=0
while n<t:
ax.cla()
setaxis()
r=rotax(n)
ax.scatter(0,0.4,0.6,'o')
Auvw=np.array([[0],[0.4],[0.6]])
Axyz=np.dot(r,Auvw)
x=Axyz[0,0]
y=Axyz[1,0]
z=Axyz[2,0]
sistemafijo()
sistemamovil(r)
ax.scatter(x,y,z,'o')
n=n+1
dibujar()
def muevemoscay(t):
n=0
while n<t:
ax.cla()
setaxis()
r=rotay(n)
ax.scatter(0,0.4,0.6,'o')
Auvw=np.array([[0],[0.4],[0.6]])
Axyz=np.dot(r,Auvw)
x=Axyz[0,0]
y=Axyz[1,0]
z=Axyz[2,0]
sistemafijo()
sistemamovil(r)
ax.scatter(x,y,z,'o')
n=n+1
dibujar()
def muevemoscaz(t):
n=0
while n<t:
ax.cla()
setaxis()
r=rotaz(n)
ax.scatter(0,0.4,0.6,'o')
Auvw=np.array([[0],[0.4],[0.6]])
Axyz=np.dot(r,Auvw)
x=Axyz[0,0]
y=Axyz[1,0]
z=Axyz[2,0]
sistemafijo()
sistemamovil(r)
ax.scatter(x,y,z,'o')
n=n+1
dibujar()
def dibujarcaja(d=1,w=1,l=1,r=0):
#setaxis()
a1=np.array([[0],[0],[0],[1]], dtype=object)
b1=np.array([[0],[0],[l],[1]], dtype=object)
c1=np.array([[0],[w],[l],[1]], dtype=object)
d1=np.array([[0],[w],[0],[1]], dtype=object)
e1=np.array([[d],[0],[0],[1]], dtype=object)
f1=np.array([[d],[0],[l],[1]], dtype=object)
g1=np.array([[d],[w],[l],[1]], dtype=object)
h1=np.array([[d],[w],[0],[1]], dtype=object)
a=np.dot(r,a1)
b=np.dot(r,b1)
c=np.dot(r,c1)
d=np.dot(r,d1)
e=np.dot(r,e1)
f=np.dot(r,f1)
g=np.dot(r,g1)
h=np.dot(r,h1)
ax.plot3D([a[0,0],b[0,0]],[a[1,0],b[1,0]],[a[2,0],b[2,0]],color='red')
ax.plot3D([a[0,0],d[0,0]],[a[1,0],d[1,0]],[a[2,0],d[2,0]],color='red')
ax.plot3D([a[0,0],e[0,0]],[a[1,0],e[1,0]],[a[2,0],e[2,0]],color='red')
ax.plot3D([b[0,0],c[0,0]],[b[1,0],c[1,0]],[b[2,0],c[2,0]],color='red')
ax.plot3D([b[0,0],f[0,0]],[b[1,0],f[1,0]],[b[2,0],f[2,0]],color='red')
ax.plot3D([c[0,0],d[0,0]],[c[1,0],d[1,0]],[c[2,0],d[2,0]],color='red')
ax.plot3D([c[0,0],g[0,0]],[c[1,0],g[1,0]],[c[2,0],g[2,0]],color='red')
ax.plot3D([d[0,0],h[0,0]],[d[1,0],h[1,0]],[d[2,0],h[2,0]],color='red')
ax.plot3D([e[0,0],h[0,0]],[e[1,0],h[1,0]],[e[2,0],h[2,0]],color='red')
ax.plot3D([e[0,0],f[0,0]],[e[1,0],f[1,0]],[e[2,0],f[2,0]],color='red')
ax.plot3D([g[0,0],f[0,0]],[g[1,0],f[1,0]],[g[2,0],f[2,0]],color='red')
ax.plot3D([g[0,0],h[0,0]],[g[1,0],h[1,0]],[g[2,0],h[2,0]],color='red')
def animcajax(t):
n=0
while n<t:
ax.cla()
setaxis()
r=rotax(n)
dibujarcaja(r=r)
n=n+1
sistemafijo()
dibujar()
def animcajay(t):
n=0
while n<t:
ax.cla()
setaxis()
r=rotay(n)
dibujarcaja(r=r)
n=n+1
sistemafijo()
dibujar()
def animcajaz(t):
n=0
while n<t:
ax.cla()
setaxis()
r=rotaz(n)
dibujarcaja(r=r)
n=n+1
sistemafijo()
dibujar()
def animcajaxyz(t1,t2,t3,t4):
n=0
while n<t1:
ax.cla()
setaxis()
r=rotaz(n)
dibujarcaja(r=r)
n=n+1
sistemafijo()
dibujar()
Rc=r
n=0
while n<t2:
ax.cla()
setaxis()
r=rotax(n)
r=np.dot(r,Rc)
dibujarcaja(r=r)
n=n+1
sistemafijo()
dibujar()
Rc=r
n=0
while n<t3:
ax.cla()
setaxis()
r=rotay(n)
r=np.dot(Rc,r)
dibujarcaja(r=r)
n=n+1
sistemafijo()
dibujar()
Rc=r
n=0
while n<t4:
ax.cla()
setaxis()
r=rotax(n)
r=np.dot(r,Rc)
dibujarcaja(r=r)
n=n+1
sistemafijo()
dibujar()
# Ryft4 Rzft2 Rxft1 I Rxmt3 Rzmt5
def animcajaxyz2(t1,t2,t3,t4,t5):
n1=0
n2=0
n3=0
n4=0
n5=0
while n1<t1:
ax.cla()
setaxis()
r=mmatrix(rotay(n4),rotaz(n2),rotax(n1),rotax(n3),rotaz(n5))
dibujarcaja(r=r)
n1=n1+1
sistemafijo()
sistemamovil(r)
dibujar()
Rc=r
n=0
while n2<t2:
ax.cla()
setaxis()
r=mmatrix(rotay(n4),rotaz(n2),rotax(n1),rotax(n3),rotaz(n5))
dibujarcaja(r=r)
n2=n2+1
sistemafijo()
sistemamovil(r)
dibujar()
Rc=r
n=0
while n3<t3:
ax.cla()
setaxis()
r=mmatrix(rotay(n4),rotaz(n2),rotax(n1),rotax(n3),rotaz(n5))
dibujarcaja(r=r)
n3=n3+1
sistemafijo()
sistemamovil(r)
dibujar()
Rc=r
n=0
while n4<t4:
ax.cla()
setaxis()
r=mmatrix(rotay(n4),rotaz(n2),rotax(n1),rotax(n3),rotaz(n5))
dibujarcaja(r=r)
n4=n4+1
sistemafijo()
sistemamovil(r)
dibujar()
Rc=r
n=0
while n5<t5:
ax.cla()
setaxis()
r=mmatrix(rotay(n4),rotaz(n2),rotax(n1),rotax(n3),rotaz(n5))
dibujarcaja(r=r)
n5=n5+1
sistemafijo()
sistemamovil(r)
dibujar()
def animcajaxyzt(Dx,t1,t2):
n=0
while n<Dx+0.01:
ax.cla()
setaxis(4)
r=trasx(n)
print(r)
dibujarcaja(r=r)
n=n+0.2
sistemafijo()
sistemamovil(r)
dibujar()
Rc=r
n=0
while n<t1+0.01:
ax.cla()
setaxis(4)
r=rotaz(n)
r=np.dot(Rc,r)
dibujarcaja(r=r)
n=n+5
sistemafijo()
sistemamovil(r)
dibujar()
Rc=r
n=0
while n<t2+0.01:
ax.cla()
setaxis(4)
r=rotaxf(n,Rc)
dibujarcaja(r=r)
n=n+5
sistemafijo()
sistemamovil(r)
dibujar()
def animcajaxyzt2(Dx,Dy,t1,t2):
n=0
while n<Dx+0.01:
ax.cla()
setaxis(4)
r=trasx(n)
a=minv(r)
a1=np.linalg.inv(r)
print('incio')
print('r')
print(np.round(r,3))
print('a')
print(np.round(a,3))
print('a1')
print(np.round(a1,3))
print('fin')
dibujarcaja(r=r)
n=n+0.2
sistemafijo()
sistemamovil(r)
dibujar()
Rc=r
n=0
while n<Dy+0.01:
ax.cla()
setaxis(4)
r=trasy(n)
r=np.dot(Rc,r)
a=minv(r)
a1=np.linalg.inv(r)
print('incio')
print('r')
print(np.round(r,3))
print('a')
print(np.round(a,3))
print('a1')
print(np.round(a1,3))
print('fin')
dibujarcaja(r=r)
n=n+0.2
sistemafijo()
sistemamovil(r)
dibujar()
Rc=r
n=0
while n<t1+0.01:
ax.cla()
setaxis(4)
r=rotaz(n)
r=np.dot(Rc,r)
a=minv(r)
a1=np.linalg.inv(r)
print('incio')
print('r')
print(np.round(r,3))
print('a')
print(np.round(a,3))
print('a1')
print(np.round(a1,3))
print('fin')
dibujarcaja(r=r)
n=n+5
sistemafijo()
sistemamovil(r)
dibujar()
Rc=r
n=0
while n<t2+0.01:
ax.cla()
setaxis(4)
r=rotaxf(n,Rc)
a=minv(r)
a1=np.linalg.inv(r)
print('incio')
print('r')
print(np.round(r,3))
print('a')
print(np.round(a,3))
print('a1')
print(np.round(a1,3))
print('fin')
dibujarcaja(r=r)
n=n+5
sistemafijo()
sistemamovil(r)
dibujar()
def animejeresaotro():
n=0
while n<3+0.01:
ax.cla()
setaxis(10)
tab=trasx(n)
n=n+0.2
sistemafijo()
sistemamovil(tab)
dibujar()
Rtab=tab
n=0
while n<5+0.01:
ax.cla()
setaxis(10)
tab=trasy(n)
tab=np.dot(Rtab,tab)
n=n+0.2
sistemafijo()
sistemamovil(tab)
dibujar()
Rtab=tab
n=0
while n<45+0.01:
ax.cla()
setaxis(10)
tab=rotax(n)
tab=np.dot(Rtab,tab)
n=n+5
sistemafijo()
sistemamovil(tab)
dibujar()
n=0
while n>-5-0.01:
ax.cla()
setaxis(10)
tac=trasx(n)
n=n-0.2
sistemafijo()
sistemamovil(tac)
sistemamovil(tab)
dibujar()
Rtac=tac
n=0
while n>-4-0.01:
ax.cla()
setaxis(10)
tac=trasy(n)
tac=np.dot(Rtac,tac)
n=n-0.2
sistemafijo()
sistemamovil(tac)
sistemamovil(tab)
dibujar()
tba=minv(tab)
tbc=np.dot(tba,tac)
n=0
while n>-6-0.01:
ax.cla()
setaxis(10)
#ntbc=rotazf(n,tbc)
ntbc=np.dot(trasy(n),tbc)
tac=np.dot(tab,ntbc)
n=n-0.2
sistemafijo()
sistemamovil(tac)
sistemamovil(tab)
dibujar()
def animppp(d1,d2,d3):
n1=0
n2=0
n3=0
while n1<d1+0.01:
ax.cla()
setaxis(10)
ppp(n1,n2,n3)
n1=n1+0.2
dibujar()
while n2<d2+0.01:
ax.cla()
setaxis(10)
ppp(n1,n2,n3)
n2=n2+0.2
dibujar()
while n3<d3+0.01:
ax.cla()
setaxis(10)
ppp(n1,n2,n3)
n3=n3+0.2
dibujar()
def animrpp(t1,d2,d3):
n1=0
n2=2
n3=1
while n1<t1+0.01:
ax.cla()
setaxis(5)
rpp(n1,n2,n3)
n1=n1+5
dibujar()
while n2<d2+0.01:
ax.cla()
setaxis(5)
rpp(n1,n2,n3)
n2=n2+0.2
dibujar()
while n3<d3+0.01:
ax.cla()
setaxis(5)
rpp(n1,n2,n3)
n3=n3+0.2
dibujar()
def animrrp(t1,t2,d3):
n1=0
n2=0
n3=1
while n1<t1+0.01:
ax.cla()
setaxis(5)
rrp(n1,n2,n3)
n1=n1+5
dibujar()
while n2<t2+0.01:
ax.cla()
setaxis(5)
rrp(n1,n2,n3)
n2=n2+5
dibujar()
while n3<d3+0.01:
ax.cla()
setaxis(5)
rrp(n1,n2,n3)
n3=n3+0.2
dibujar()
def animrrr(t1,t2,t3):
n1=0
n2=0
n3=0
while n1<t1+0.01:
ax.cla()
setaxis(5)
rrr(n1,n2,n3)
n1=n1+5
dibujar()
while n2<t2+0.01:
ax.cla()
setaxis(5)
rrr(n1,n2,n3)
n2=n2+5
dibujar()
while n3<t3+0.01:
ax.cla()
setaxis(5)
rrr(n1,n2,n3)
n3=n3+5
dibujar()
def animscara(t1,t2,d3,t4):
n1=0
n2=0
n3=1
n4=0
while n1<t1+0.01:
ax.cla()
setaxis(5)
scara(n1,n2,n3,n4)
n1=n1+5
dibujar()
while n2<t2+0.01:
ax.cla()
setaxis(5)
scara(n1,n2,n3,n4)
n2=n2+5
dibujar()
while n3<d3+0.01:
ax.cla()
setaxis(5)
scara(n1,n2,n3,n4)
n3=n3+0.2
dibujar()
while n4<t4+0.01:
ax.cla()
setaxis(5)
scara(n1,n2,n3,n4)
n4=n4+5
dibujar()
def animcobras800(t1,t2,d3,t4):
n1=0
n2=0
n3=1
n4=0
while n1<t1+0.01:
ax.cla()
setaxis(1000)
cobras800(n1,n2,n3,n4)
n1=n1+5
dibujar()
while n2<t2+0.01:
ax.cla()
setaxis(1000)
cobras800(n1,n2,n3,n4)
n2=n2+5
dibujar()
while n3<d3+0.01:
ax.cla()
setaxis(1000)
cobras800(n1,n2,n3,n4)
n3=n3+5
dibujar()
while n4<t4+0.01:
ax.cla()
setaxis(1000)
cobras800(n1,n2,n3,n4)
n4=n4+5
dibujar()
def animur5(t1,t2,t3,t4,t5,t6):
n1=0
n2=0
n3=0
n4=0
n5=0
n6=0
while n1<t1+0.01:
ax.cla()
setaxis(1000)
ur5(n1,n2,n3,n4,n5,n6)
n1=n1+5
dibujar()
while n2<t2+0.01:
ax.cla()
setaxis(1000)
ur5(n1,n2,n3,n4,n5,n6)
n2=n2+5
dibujar()
while n3<t3+0.01:
ax.cla()
setaxis(1000)
ur5(n1,n2,n3,n4,n5,n6)
n3=n3+5
dibujar()
while n4<t4+0.01:
ax.cla()
setaxis(1000)
ur5(n1,n2,n3,n4,n5,n6)
n4=n4+5
dibujar()
while n5<t5+0.01:
ax.cla()
setaxis(1000)
ur5(n1,n2,n3,n4,n5,n6)
n5=n5+5
dibujar()
while n6<t6+0.01:
ax.cla()
setaxis(1000)
ur5(n1,n2,n3,n4,n5,n6)
n6=n6+5
dibujar()
def animur5(tb,t1a,t2a,t3a,t4a,t5a,t6a,t7a,t1b,t2b,t3b,t4b,t5b,t6b,t7b):
nb=0
n1a=0
n2a=0
n3a=0
n4a=0
n5a=0
n6a=0
n7a=0
n1b=0
n2b=0
n3b=0
n4b=0
n5b=0
n6b=0
n7b=0
while n1a<t1a+0.01:
ax.cla()
setaxis(15)
motoman(nb,n1a,n2a,n3a,n4a,n5a,n6a,n7a,n1b,n2b,n3b,n4b,n5b,n6b,n7b)
n1a=n1a+5
dibujar()
while n2a<t2a+0.01:
ax.cla()
setaxis(15)
motoman(nb,n1a,n2a,n3a,n4a,n5a,n6a,n7a,n1b,n2b,n3b,n4b,n5b,n6b,n7b)
n2a=n2a+5
dibujar()
while n3a<t3a+0.01:
ax.cla()
setaxis(15)
motoman(nb,n1a,n2a,n3a,n4a,n5a,n6a,n7a,n1b,n2b,n3b,n4b,n5b,n6b,n7b)
n3a=n3a+5
dibujar()
while n4a<t4a+0.01:
ax.cla()
setaxis(15)
motoman(nb,n1a,n2a,n3a,n4a,n5a,n6a,n7a,n1b,n2b,n3b,n4b,n5b,n6b,n7b)
n4a=n4a+5
dibujar()
while n5a<t5a+0.01:
ax.cla()
setaxis(15)
motoman(nb,n1a,n2a,n3a,n4a,n5a,n6a,n7a,n1b,n2b,n3b,n4b,n5b,n6b,n7b)
n5a=n5a+5
dibujar()
while n6a<t6a+0.01:
ax.cla()
setaxis(15)
motoman(nb,n1a,n2a,n3a,n4a,n5a,n6a,n7a,n1b,n2b,n3b,n4b,n5b,n6b,n7b)
n6a=n6a+5
dibujar()
while n7a<t7a+0.01:
ax.cla()
setaxis(15)
motoman(nb,n1a,n2a,n3a,n4a,n5a,n6a,n7a,n1b,n2b,n3b,n4b,n5b,n6b,n7b)
n7a=n7a+5
dibujar()
while n1b<t1b+0.01:
ax.cla()
setaxis(15)
motoman(nb,n1a,n2a,n3a,n4a,n5a,n6a,n7a,n1b,n2b,n3b,n4b,n5b,n6b,n7b)
n1b=n1b+5
dibujar()
while n2b<t2b+0.01:
ax.cla()
setaxis(15)
motoman(nb,n1a,n2a,n3a,n4a,n5a,n6a,n7a,n1b,n2b,n3b,n4b,n5b,n6b,n7b)
n2b=n2b+5
dibujar()
while n3b<t3b+0.01:
ax.cla()
setaxis(15)
motoman(nb,n1a,n2a,n3a,n4a,n5a,n6a,n7a,n1b,n2b,n3b,n4b,n5b,n6b,n7b)
n3b=n3b+5
dibujar()
while n4b<t4b+0.01:
ax.cla()
setaxis(15)
motoman(nb,n1a,n2a,n3a,n4a,n5a,n6a,n7a,n1b,n2b,n3b,n4b,n5b,n6b,n7b)
n4b=n4b+5
dibujar()
while n5b<t5b+0.01:
ax.cla()
setaxis(15)
motoman(nb,n1a,n2a,n3a,n4a,n5a,n6a,n7a,n1b,n2b,n3b,n4b,n5b,n6b,n7b)
n5b=n5b+5
dibujar()
while n6b<t6b+0.01:
ax.cla()
setaxis(15)
motoman(nb,n1a,n2a,n3a,n4a,n5a,n6a,n7a,n1b,n2b,n3b,n4b,n5b,n6b,n7b)
n6b=n6b+5
dibujar()
while n7b<t7b+0.01:
ax.cla()
setaxis(15)
motoman(nb,n1a,n2a,n3a,n4a,n5a,n6a,n7a,n1b,n2b,n3b,n4b,n5b,n6b,n7b)
n7b=n7b+5
dibujar()
while nb<tb+0.01:
ax.cla()
setaxis(15)
motoman(nb,n1a,n2a,n3a,n4a,n5a,n6a,n7a,n1b,n2b,n3b,n4b,n5b,n6b,n7b)
nb=nb+5
dibujar()
def animIRB1600(t1,t2,t3,t4,t5,t6):
n1=0
n2=0
n3=0
n4=0
n5=0
n6=0
while n1<t1+0.01:
ax.cla()
setaxis(1500)
IRB1600(n1,n2,n3,n4,n5,n6)
n1=n1+5
dibujar()
while n2<t2+0.01:
ax.cla()
setaxis(1500)
IRB1600(n1,n2,n3,n4,n5,n6)
n2=n2+5
dibujar()
while n3<t3+0.01:
ax.cla()
setaxis(1500)
IRB1600(n1,n2,n3,n4,n5,n6)
n3=n3+5
dibujar()
while n4<t4+0.01:
ax.cla()
setaxis(1500)
IRB1600(n1,n2,n3,n4,n5,n6)
n4=n4+5
dibujar()
while n5<t5+0.01:
ax.cla()
setaxis(1500)
IRB1600(n1,n2,n3,n4,n5,n6)
n5=n5+5
dibujar()
while n6<t6+0.01:
ax.cla()
setaxis(1500)
IRB1600(n1,n2,n3,n4,n5,n6)
n6=n6+5
dibujar()
def animIRB1600newton():
n1=0
n2=0
n3=0
n4=0
vd=IRB1600v(0,0,0,0,0,0)
sem=[0,0,0,0,0,0]
while n1<200+0.01:
vdn=vd@trasx(n1)
ax.cla()
setaxis(1500)
tetas=IRB1600newton(vdn,sem)
IRB1600(tetas[0],tetas[1],tetas[2],tetas[3],tetas[4],tetas[5])
n1=n1+20
dibujar()
sem=[tetas[0],tetas[1],tetas[2],tetas[3],tetas[4],tetas[5]]
vd=vdn
while n2>-45-0.01:
vdn=vd@rotax(n2)
ax.cla()
setaxis(1500)
tetas=IRB1600newton(vdn,sem)
IRB1600(tetas[0],tetas[1],tetas[2],tetas[3],tetas[4],tetas[5])
n2=n2-5
dibujar()
sem=[tetas[0],tetas[1],tetas[2],tetas[3],tetas[4],tetas[5]]
vd=vdn
while n3<300+0.01:
vdn=vd@trasy(n3)
ax.cla()
setaxis(1500)
tetas=IRB1600newton(vdn,sem)
IRB1600(tetas[0],tetas[1],tetas[2],tetas[3],tetas[4],tetas[5])
n3=n3+20
dibujar()
sem=[tetas[0],tetas[1],tetas[2],tetas[3],tetas[4],tetas[5]]
vd=vdn
while n4<200+0.01:
vdn=vd@trasz(n4)
ax.cla()
setaxis(1500)
tetas=IRB1600newton(vdn,sem)
IRB1600(tetas[0],tetas[1],tetas[2],tetas[3],tetas[4],tetas[5])
n4=n4+20
dibujar()
sem=[tetas[0],tetas[1],tetas[2],tetas[3],tetas[4],tetas[5]]
vd=vdn
def animIRB1600newtoncirculo(r):
n1=0
vd=IRB1600v(0,0,0,0,0,0)
sem=[0,0,0,0,0,0]
while n1<2*np.pi+0.01:
x=np.cos(n1)
y=np.sin(n1)
x=x-1
vdn=vd@trasx(-x*r)
vdn=vdn@trasy(-y*r)
ax.cla()
setaxis(1500)
tetas=IRB1600newton(vdn,sem)
IRB1600(tetas[0],tetas[1],tetas[2],tetas[3],tetas[4],tetas[5])
robot.MoveJ(tetas)
n1=n1+(np.pi/30)
dibujar()
sem=[tetas[0],tetas[1],tetas[2],tetas[3],tetas[4],tetas[5]]
vd=vdn
def animIRB1600newtoncirculoh1(r):
n1=0
mh=herramienta1v(-90)
vd=IRB1600v(0,0,0,0,0,0,mh)
sem=[0,0,0,0,0,0]
while n1<2*np.pi+0.01:
x=np.cos(n1)
y=np.sin(n1)
x=x-1
vdn=vd@rotax(45)
vdn=vdn@trasx(-x*r)
vdn=vdn@trasy(-y*r)
ax.cla()
setaxis(1500)
tetas=IRB1600newton(vdn,sem,mh)
IRB1600(tetas[0],tetas[1],tetas[2],tetas[3],tetas[4],tetas[5])
mr=IRB1600v(tetas[0],tetas[1],tetas[2],tetas[3],tetas[4],tetas[5])
herramienta1(mr,-90)
robot.MoveJ(tetas)
n1=n1+(np.pi/30)
dibujar()
sem=[tetas[0],tetas[1],tetas[2],tetas[3],tetas[4],tetas[5]]
vd=vdn
def animur5newtoncirculo(r):
n1=0
vd=ur5v(0,0,0,0,0,0)
sem=[0,0,0,0,0,0]
while n1<2*np.pi+0.01:
x=np.cos(n1)
y=np.sin(n1)
x=x-1
#vdn=vd@rotax(-90)
vdn=vd@trasy(-x*r)
vdn=vdn@trasx(y*r)
ax.cla()
setaxis(1500)
tetas=ur5newton(vdn,sem)
ur5(tetas[0],tetas[1],tetas[2],tetas[3],tetas[4],tetas[5])
valores1=tetas.copy()
valores1[1]=valores1[1]-90
valores1[2]=valores1[2]-90
valores1[4]=valores1[4]+90
valores1[4]=valores1[4]%360
robot.MoveJ(valores1)
n1=n1+(np.pi/30)
dibujar()
sem=[tetas[0],tetas[1],tetas[2],tetas[3],tetas[4],tetas[5]]
vd=vdn
def animur5newtoncirculoh1(r):
n1=0
mh=herramienta1v()
vd=ur5v(0,0,0,0,0,0,mh)
sem=[0,0,0,0,0,0]
while n1<2*np.pi+0.01:
x=np.cos(n1)
y=np.sin(n1)
x=x-1
vdn=vd@rotax(51.21)
vdn=vdn@trasy(-x*r)
vdn=vdn@trasx(y*r)
ax.cla()
setaxis(1500)
tetas=ur5newton(vdn,sem,mh)
ur5(tetas[0],tetas[1],tetas[2],tetas[3],tetas[4],tetas[5])
mr=ur5v(tetas[0],tetas[1],tetas[2],tetas[3],tetas[4],tetas[5])
herramienta1(mr)
valores1=tetas.copy()
valores1[1]=valores1[1]-90
valores1[2]=valores1[2]-90
valores1[4]=valores1[4]+90
valores1[4]=valores1[4]%360
robot.MoveJ(valores1)
n1=n1+(np.pi/30)
dibujar()
sem=[tetas[0],tetas[1],tetas[2],tetas[3],tetas[4],tetas[5]]
vd=vdn
def animur5newtoncirculome(r):
n1=0
mb=trasx(1200)@rotaz(180)@trasy(220)
vd=ur5v(0,0,0,0,0,0)
sem=[0,0,0,0,0,0]
while n1<2*np.pi+0.01:
x=np.cos(n1)
y=np.sin(n1)
x=x-1
#vdn=vd@rotax(-90)
vdn=vd@trasy(-x*r)
vdn=vdn@trasx(y*r)
vdn2=vdn@trasz(200)@rotax(180)@rotaz(180)
ax.cla()
setaxis(1500)
tetas=ur5newton(vdn,sem)
tetas2=ur5newton(vdn2,sem,mb=mb)
ur5(tetas[0],tetas[1],tetas[2],tetas[3],tetas[4],tetas[5])
ur5(tetas2[0],tetas2[1],tetas2[2],tetas2[3],tetas2[4],tetas2[5],mb)
valores1=tetas.copy()
valores1e=tetas2.copy()
valores1[1]=valores1[1]-90
valores1[2]=valores1[2]-90
valores1[4]=valores1[4]+90
valores1[4]=valores1[4]%360
valores1e[1]=valores1e[1]-90
valores1e[2]=valores1e[2]-90
valores1e[4]=valores1e[4]+90
valores1e[4]=valores1e[4]%360
#robot.MoveJ(valores1)
#robote.MoveJ(valores1e)
n1=n1+(np.pi/30)
dibujar()
sem=[tetas[0],tetas[1],tetas[2],tetas[3],tetas[4],tetas[5]]
vd=vdn
def animur5newtoncirculoh1me(r):
n1=0
mb=trasx(1250)@rotaz(180)@trasy(220)
mh=herramienta1v()
vd=ur5v(0,0,0,0,0,0,mh)
sem=[0,0,0,0,0,0]
while n1<2*np.pi+0.01:
x=np.cos(n1)
y=np.sin(n1)
x=x-1
vdn=vd@rotax(0)
vdn=vdn@trasy(-x*r)
vdn=vdn@trasx(y*r)
vdn2=vdn@trasz(200)@rotax(180)@rotaz(180)
ax.cla()
setaxis(1500)
tetas=ur5newton(vdn,sem,mh)
tetas2=ur5newton(vdn2,sem,mb=mb)
ur5(tetas[0],tetas[1],tetas[2],tetas[3],tetas[4],tetas[5])
ur5(tetas2[0],tetas2[1],tetas2[2],tetas2[3],tetas2[4],tetas2[5],mb)
mr=ur5v(tetas[0],tetas[1],tetas[2],tetas[3],tetas[4],tetas[5])
herramienta1(mr)
valores1=tetas.copy()
valores1[1]=valores1[1]-90
valores1[2]=valores1[2]-90
valores1[4]=valores1[4]+90
valores1[4]=valores1[4]%360
#robot.MoveJ(valores1)
n1=n1+(np.pi/30)
dibujar()
sem=[tetas[0],tetas[1],tetas[2],tetas[3],tetas[4],tetas[5]]
vd=vdn
arduino=serial.Serial("COM5",9600,timeout=1)
time.sleep(1)
leyendo=True
mtr1,mtr2=accmotomanv(0,90,-90,-90,-130,0,-60,90,90,90,-90,-130,0,-60,90)
tetas=[0,90,-90,-90,-130,0,-60,90,90,90,-90,-130,0,-60,90]
movesel=1
puntosmove=[0,0,0,0,0,0,0,0,0]
selbrazo=1;
selbrazopuntos=[0,0,0,0,0,0,0,0,0]
mtr12=mtr1.copy()
modo=1
puntos=np.zeros((10,4,4))
tetaspuntos=np.zeros((10,15))
puntonum=-1
x1omin=0
x1omax=1023
x1nmin=-10
x1nmax=10
x1or=(x1omax-x1omin)
x1nr=(x1nmax-x1nmin)
x2omin=0
x2omax=1023
x2nmin=-10
x2nmax=10
x2or=(x2omax-x2omin)
x2nr=(x2nmax-x2nmin)
x3omin=0
x3omax=1023
x3nmin=-10
x3nmax=10
x3or=(x3omax-x3omin)
x3nr=(x3nmax-x3nmin)
y1omin=0
y1omax=1023
y1nmin=-10
y1nmax=10
y1or=(y1omax-y1omin)
y1nr=(y1nmax-y1nmin)
y2omin=0
y2omax=1023
y2nmin=-10
y2nmax=10
y2or=(y2omax-y2omin)
y2nr=(y2nmax-y2nmin)
y3omin=0
y3omax=1023
y3nmin=-10
y3nmax=10
y3or=(y3omax-y3omin)
y3nr=(y3nmax-y3nmin)
def leerarduino():
global selbrazo
global leyendo
global modo
global mtr1
global mtr2
global mtr12
sem=[0,90,-90,-90,-130,0,-60,90,90,90,-90,-130,0,-60,90]
global tetas
time.sleep(1)
while leyendo:
mensaje="0"
arduino.write(mensaje.encode('ascii'))
vals=arduino.readline().decode('ascii')
print(vals)
pos1=vals.index(",")
x1=vals[0:pos1]
pos2=vals.index(",",(pos1+1))
y1=vals[pos1+1:pos2]
pos3=vals.index(",",(pos2+1))
x2=vals[pos2+1:pos3]
pos4=vals.index(",",(pos3+1))
y2=vals[pos3+1:pos4]
pos5=vals.index(",",(pos4+1))
x3=vals[pos4+1:pos5]
y3=vals[pos5+1:]
x1=int(x1)
x2=int(x2)
x3=int(x3)
y1=int(y1)
y2=int(y2)
y3=int(y3)
x1n=(((x1-x1omin)*x1nr)/x1or)+x1nmin
x2n=(((x2-x2omin)*x2nr)/x2or)+x2nmin
x3n=(((x3-x3omin)*x3nr)/x3or)+x3nmin
y1n=(((y1-y1omin)*y1nr)/y1or)+y1nmin
y2n=(((y2-y2omin)*y2nr)/y2or)+y2nmin
y3n=(((y3-y3omin)*y3nr)/y3or)+y3nmin
if (abs(x1n)<1.8):
x1n=0
if (abs(x2n)<1.8):
x2n=0
if (abs(x3n)<1.8):
x3n=0
if (abs(y1n)<1.8):
y1n=0
if (abs(y2n)<1.8):
y2n=0
if (abs(y3n)<1.8):
y3n=0
print(x1n)
print(x2n)
print(x3n)
print(y1n)
print(y2n)
print(y3n)
if modo==1:
if selbrazo==1:
tetas[1]=tetas[1]+x1n
tetas[2]=tetas[2]+y1n
tetas[3]=tetas[3]+x2n
tetas[4]=tetas[4]+y2n
tetas[5]=tetas[5]+x3n
tetas[6]=tetas[6]+y3n
tetas[7]=tetas[7]
ax.cla()
setaxis(1000)
accmotoman(tetas[0],tetas[1],tetas[2],tetas[3],tetas[4],tetas[5],tetas[6],tetas[7],tetas[8],tetas[9],tetas[10],tetas[11],tetas[12],tetas[13],tetas[14])
mtr1,mtr2=accmotomanv(tetas[0],tetas[1],tetas[2],tetas[3],tetas[4],tetas[5],tetas[6],tetas[7],tetas[8],tetas[9],tetas[10],tetas[11],tetas[12],tetas[13],tetas[14])
sem=[tetas[0],tetas[1],tetas[2],tetas[3],tetas[4],tetas[5],tetas[6],tetas[7],tetas[8],tetas[9],tetas[10],tetas[11],tetas[12],tetas[13],tetas[14]]
dibujar()
elif selbrazo==2:
tetas[8]=tetas[8]+x1n
tetas[9]=tetas[9]+y1n
tetas[10]=tetas[10]+x2n
tetas[11]=tetas[11]+y2n
tetas[12]=tetas[12]+x3n
tetas[13]=tetas[13]+y3n
tetas[14]=tetas[14]
ax.cla()
setaxis(1000)
accmotoman(tetas[0],tetas[1],tetas[2],tetas[3],tetas[4],tetas[5],tetas[6],tetas[7],tetas[8],tetas[9],tetas[10],tetas[11],tetas[12],tetas[13],tetas[14])
mtr1,mtr2=accmotomanv(tetas[0],tetas[1],tetas[2],tetas[3],tetas[4],tetas[5],tetas[6],tetas[7],tetas[8],tetas[9],tetas[10],tetas[11],tetas[12],tetas[13],tetas[14])
sem=[tetas[0],tetas[1],tetas[2],tetas[3],tetas[4],tetas[5],tetas[6],tetas[7],tetas[8],tetas[9],tetas[10],tetas[11],tetas[12],tetas[13],tetas[14]]
dibujar()
elif selbrazo==3:
tetas[1]=tetas[1]+x1n
tetas[2]=tetas[2]+y1n
tetas[3]=tetas[3]+x2n
tetas[4]=tetas[4]+y2n
tetas[5]=tetas[5]+x3n
tetas[6]=tetas[6]+y3n
mtr1,mtr2=accmotomanv(tetas[0],tetas[1],tetas[2],tetas[3],tetas[4],tetas[5],tetas[6],tetas[7],tetas[8],tetas[9],tetas[10],tetas[11],tetas[12],tetas[13],tetas[14])
mtr2=mtr1@mtr12
sem=[tetas[0],tetas[1],tetas[2],tetas[3],tetas[4],tetas[5],tetas[6],tetas[7],tetas[8],tetas[9],tetas[10],tetas[11],tetas[12],tetas[13],tetas[14]]
tetas=motomannewton2(mtr1,mtr2,sem)
ax.cla()
setaxis(1000)
accmotoman(tetas[0],tetas[1],tetas[2],tetas[3],tetas[4],tetas[5],tetas[6],tetas[7],tetas[8],tetas[9],tetas[10],tetas[11],tetas[12],tetas[13],tetas[14])
mtr1,mtr2=accmotomanv(tetas[0],tetas[1],tetas[2],tetas[3],tetas[4],tetas[5],tetas[6],tetas[7],tetas[8],tetas[9],tetas[10],tetas[11],tetas[12],tetas[13],tetas[14])
sem=[tetas[0],tetas[1],tetas[2],tetas[3],tetas[4],tetas[5],tetas[6],tetas[7],tetas[8],tetas[9],tetas[10],tetas[11],tetas[12],tetas[13],tetas[14]]
dibujar()
pass
elif modo==2:
if selbrazo==1:
ax.cla()
setaxis(1000)
mtr1=mtr1@trasx(x1n*5)
mtr1=mtr1@rotax(y1n)
mtr1=mtr1@trasy(x2n*5)
mtr1=mtr1@rotay(y2n)
mtr1=mtr1@trasz(x3n*5)
mtr1=mtr1@rotaz(y3n)
tetas=motomannewton2(mtr1,mtr2,sem)
accmotoman(tetas[0],tetas[1],tetas[2],tetas[3],tetas[4],tetas[5],tetas[6],tetas[7],tetas[8],tetas[9],tetas[10],tetas[11],tetas[12],tetas[13],tetas[14])
sem=[tetas[0],tetas[1],tetas[2],tetas[3],tetas[4],tetas[5],tetas[6],tetas[7],tetas[8],tetas[9],tetas[10],tetas[11],tetas[12],tetas[13],tetas[14]]
dibujar()
elif selbrazo==2:
ax.cla()
setaxis(1000)
mtr2=mtr2@trasx(x1n*5)
mtr2=mtr2@rotax(y1n)
mtr2=mtr2@trasy(x2n*5)
mtr2=mtr2@rotay(y2n)
mtr2=mtr2@trasz(x3n*5)
mtr2=mtr2@rotaz(y3n)
tetas=motomannewton2(mtr1,mtr2,sem)
accmotoman(tetas[0],tetas[1],tetas[2],tetas[3],tetas[4],tetas[5],tetas[6],tetas[7],tetas[8],tetas[9],tetas[10],tetas[11],tetas[12],tetas[13],tetas[14])
sem=[tetas[0],tetas[1],tetas[2],tetas[3],tetas[4],tetas[5],tetas[6],tetas[7],tetas[8],tetas[9],tetas[10],tetas[11],tetas[12],tetas[13],tetas[14]]
dibujar()
elif selbrazo==3:
ax.cla()
setaxis(1000)
mtr1=mtr1@trasx(x1n*5)
mtr1=mtr1@rotax(y1n)
mtr1=mtr1@trasy(x2n*5)
mtr1=mtr1@rotay(y2n)
mtr1=mtr1@trasz(x3n*5)
mtr1=mtr1@rotaz(y3n)
mtr2=mtr1@mtr12
tetas=motomannewton2(mtr1,mtr2,sem)
accmotoman(tetas[0],tetas[1],tetas[2],tetas[3],tetas[4],tetas[5],tetas[6],tetas[7],tetas[8],tetas[9],tetas[10],tetas[11],tetas[12],tetas[13],tetas[14])
sem=[tetas[0],tetas[1],tetas[2],tetas[3],tetas[4],tetas[5],tetas[6],tetas[7],tetas[8],tetas[9],tetas[10],tetas[11],tetas[12],tetas[13],tetas[14]]
dibujar()
elif modo==3:
if selbrazo==1:
ax.cla()
setaxis(1000)
mtr1=trasx(x1n*5)@mtr1
mtr1=rotaxf(y1n,mtr1)
mtr1=trasy(x2n*5)@mtr1
mtr1=rotayf(y2n,mtr1)
mtr1=trasz(x3n*5)@mtr1
mtr1=rotazf(y3n,mtr1)
tetas=motomannewton2(mtr1,mtr2,sem)
accmotoman(tetas[0],tetas[1],tetas[2],tetas[3],tetas[4],tetas[5],tetas[6],tetas[7],tetas[8],tetas[9],tetas[10],tetas[11],tetas[12],tetas[13],tetas[14])
sem=[tetas[0],tetas[1],tetas[2],tetas[3],tetas[4],tetas[5],tetas[6],tetas[7],tetas[8],tetas[9],tetas[10],tetas[11],tetas[12],tetas[13],tetas[14]]
dibujar()
elif selbrazo==2:
ax.cla()
setaxis(1000)
mtr2=trasx(x1n*5)@mtr2
mtr2=rotaxf(y1n,mtr2)
mtr2=trasy(x2n*5)@mtr2
mtr2=rotayf(y2n,mtr2)
mtr2=trasz(x3n*5)@mtr2
mtr2=rotazf(y3n,mtr2)
tetas=motomannewton2(mtr1,mtr2,sem)
accmotoman(tetas[0],tetas[1],tetas[2],tetas[3],tetas[4],tetas[5],tetas[6],tetas[7],tetas[8],tetas[9],tetas[10],tetas[11],tetas[12],tetas[13],tetas[14])
sem=[tetas[0],tetas[1],tetas[2],tetas[3],tetas[4],tetas[5],tetas[6],tetas[7],tetas[8],tetas[9],tetas[10],tetas[11],tetas[12],tetas[13],tetas[14]]
dibujar()
elif selbrazo==3:
ax.cla()
setaxis(1000)
mtr1=trasx(x1n*5)@mtr1
mtr1=rotaxf(y1n,mtr1)
mtr1=trasy(x2n*5)@mtr1
mtr1=rotayf(y2n,mtr1)
mtr1=trasz(x3n*5)@mtr1
mtr1=rotazf(y3n,mtr1)
mtr2=mtr1@mtr12
tetas=motomannewton2(mtr1,mtr2,sem)
accmotoman(tetas[0],tetas[1],tetas[2],tetas[3],tetas[4],tetas[5],tetas[6],tetas[7],tetas[8],tetas[9],tetas[10],tetas[11],tetas[12],tetas[13],tetas[14])
sem=[tetas[0],tetas[1],tetas[2],tetas[3],tetas[4],tetas[5],tetas[6],tetas[7],tetas[8],tetas[9],tetas[10],tetas[11],tetas[12],tetas[13],tetas[14]]
dibujar()
print("finalizando hilo")
axbtnvalm=plt.axes([0.42,0.06,0.15,0.06])
axbtnmove=plt.axes([0.6,0.06,0.15,0.06])
axbtnmode=plt.axes([0.42,0.13,0.15,0.06])
axbtn1=plt.axes([0.85,0.01,0.12,0.08])
axtxtpuntos = plt.axes([0.02, 0.2, 0.34, 0.75])
axbtnsecuencia=plt.axes([0.04,0.13,0.22,0.06])
axbtnreset=plt.axes([0.04,0.06,0.11,0.06])
axrbtnselbrazo=plt.axes([0.27,0.02,0.14,0.175])
btnmodo=Button(axbtnmode,'modo: Joint')
btnvalm=Button(axbtnvalm,'Grabar')
btnmove=Button(axbtnmove,'Move J')
button1=Button(axbtn1,'cerrar')
btnsecuencia=Button(axbtnsecuencia,'Realizar secuencia')
btnreset=Button(axbtnreset,'Reset')
txtpuntos = TextBox(axtxtpuntos, '',initial='Empieza a grabar puntos')
radioselbrazo = RadioButtons(axrbtnselbrazo, ('Brazo 1', 'Brazo 2', '2 Brazos'))
ax.cla()
setaxis(1000)
accmotoman(0,90,-90,-90,-130,0,-60,90,90,90,-90,-130,0,-60,90)
dibujar()
def grabar(event):
global puntos
global puntonum
global tetas
global tetaspuntos
global movesel
global puntosmove
if puntonum<9:
puntonum+=1
tetaspuntos[puntonum,:]=tetas
print(puntos)
print(tetaspuntos)
n=0
texto=""
if puntonum>0:
puntosmove[puntonum-1]=movesel
while n<puntonum:
if puntosmove[n]==1:
texto=texto+"p"+str(n)+" p"+str(n+1)+" MoveJ\n"
elif puntosmove[n]==2:
texto=texto+"p"+str(n)+" p"+str(n+1)+" MoveL\n"
else:
texto=texto+"p"+str(n)+" p"+str(n+1)+" MoveC\n"
n=n+1
txtpuntos.set_val(texto)
else:
print("No hay mas espacios para grabar")
def cerrar(event):
global leyendo
leyendo=False
plt.close()
hilo1.join()
arduino.close()
sys.exit()
def mode(event):
global modo
if modo==1:
modo=2
btnmodo.label.set_text("modo: Tool")
elif modo==2:
modo=3
btnmodo.label.set_text("modo: World")
elif modo==3:
modo=1
btnmodo.label.set_text("modo: Joint")
def move(event):
global movesel
if movesel==1:
movesel=2
btnmove.label.set_text("Move L")
elif movesel==2:
movesel=3
btnmove.label.set_text("Move C")
elif movesel==3:
movesel=1
btnmove.label.set_text("Move J")
def realizarsecuencia(event):
global puntos
global puntonum
global tetas
global tetaspuntos
global puntosmove
p1=np.zeros((4,4))
p2=np.zeros((4,4))
tetas1=[0,0,0,0,0,0]
tetas2=[0,0,0,0,0,0]
n=0
if puntonum>0:
tetaanterior=tetas1=tetaspuntos[n,:]
while n<puntonum:
if puntosmove[n]==1:
tetas1=tetaspuntos[n,:]
tetas2=tetaspuntos[n+1,:]
tetaanterior=motomanmovej(tetaanterior,tetas2)
elif puntosmove[n]==2:
tetas1=tetaspuntos[n,:]
tetas2=tetaspuntos[n+1,:]
tetaanterior=motomanmovel(tetaanterior,tetas2)
else:
tetas1=tetaspuntos[n,:]
p1=puntos[n,:,:]
p2=puntos[n+1,:,:]
ur5movec(p1,p2,tetas1)
n=n+1
tetas=tetaanterior
else:
print("No hay suficientes puntos grabados")
def resetboton(event):
global puntonum
puntonum=-1
txtpuntos.set_val("")
def brazoseleccionado(label):
global selbrazo
global mtr1
global mtr2
global mtr12
if label=='Brazo 1':
selbrazo=1
elif label=='Brazo 2':
selbrazo=2
elif label=='2 Brazos':
selbrazo=3
mtr12=minv(mtr1)@mtr2
btnmodo.on_clicked(mode)
button1.on_clicked(cerrar)
btnvalm.on_clicked(grabar)
btnmove.on_clicked(move)
btnreset.on_clicked(resetboton)
btnsecuencia.on_clicked(realizarsecuencia)
radioselbrazo.on_clicked(brazoseleccionado)
hilo1=threading.Thread(target=leerarduino(),daemon=True)
hilo1.start()
#Motoman CSDA10F
|
test_events.py
|
#!/usr/bin/env python
import time
import hookio
import threading
import itertools
import functools
import json
import logging
from six.moves import queue
log = logging.getLogger(__name__)
get_error_model = {
"error": True,
"message": """\
"anonymous" does not have the role "events::read" which is required to access "/marak/events"
If you are the owner of this resource try logging in at https://hook.io/login
If any access keys have been created you can also provide \
a `hook_private_key` parameter to access the service.\
""",
"user": "anonymous",
"role": "events::read",
"type": "unauthorized-role-access"
}
create_error_model = {
"error": True,
"message": """\
"anonymous" does not have the role "hook::create" which is required to access "/new"
If you are the owner of this resource try logging in at https://hook.io/login
If any access keys have been created you can also provide \
a `hook_private_key` parameter to access the service.\
""",
"user": "anonymous",
"role": "hook::create",
"type": "unauthorized-role-access"
}
def test_events(sdk):
res = sdk.events.get('marak')
assert type(res) == list
prev_hit = max(row['time'] for row in res)
# counters = itertools.groupby(sorted(res, key=lambda row:row['type']), lambda row:row['type'])
# prev_counters = dict((k,len(list(v))) for k,v in counters)
time.sleep(1) # wait for time change
res = sdk.events.get('marak', anonymous=True)
assert res == get_error_model
res = sdk.hook.source('marak/echo')
# source = res
assert res.startswith("module['exports']")
time.sleep(15) # Wait for events processing on server side
res = sdk.events.get('marak')
assert type(res) == list
# assert max(row['time'] for row in res) > prev_hit
new_list = [row['type'] for row in res if row['time'] > prev_hit]
# all_list = [row['type'] for row in res]
# counters = itertools.groupby(sorted(res, key=lambda row:row['type']), lambda row:row['type'])
# counters = dict((k,(prev_counters.get(k, 0), len(list(v)))) for k,v in counters)
assert new_list
# check = 'events::read', 'hook::create', 'hook::source::read'
# assert any(e in new_list for e in check)
def stream_process(q, e):
def _stream_process(item):
q.put(item)
if e.wait(0.1):
raise SystemExit
return _stream_process
def noop(arg):
return arg
def async_events_stream_template(name, func_factory, line2obj):
sdk = hookio.createClient({'max_retries': 3})
assert sdk.hook_private_key
q = queue.Queue()
e = threading.Event()
func = functools.partial(sdk.events.stream, 'marak')
thread_func = func_factory(func, stream_process(q, e))
t = threading.Thread(name=name, target=thread_func)
t.daemon = 1
t.start()
for i in itertools.count():
assert i < 200
try:
line = q.get(timeout=1)
except queue.Empty:
break
assert line
obj = line2obj(line)
assert 'time' in obj
res = sdk.events.get('marak', anonymous=True)
assert res == get_error_model
e.set()
try:
line = q.get(timeout=60)
except queue.Empty:
# warning
return
assert line
obj = line2obj(line)
assert 'time' in obj
def test_events_stream_raw():
def func_factory(func, streaming):
return functools.partial(func, streaming=streaming, raw=True)
async_events_stream_template("test_events_stream_raw", func_factory, json.loads)
def test_events_stream_obj():
def func_factory(func, streaming):
return functools.partial(func, streaming=streaming, raw=False)
async_events_stream_template("test_events_stream_obj", func_factory, noop)
def stream_iter_thread(streaming, func):
gen = func()
try:
for row in gen:
log.debug('stream_iter_thread: %r', row)
streaming(row)
except SystemExit:
gen.close()
raise
def test_events_stream_iter_raw():
def func_factory(func, streaming):
def iter_factory():
resp = func(streaming=True, raw=True)
for line in resp.iter_lines(chunk_size=1):
if not isinstance(line, str):
line = line.decode(resp.encoding or 'utf-8', errors='replace')
yield line
return functools.partial(stream_iter_thread, streaming, iter_factory)
async_events_stream_template("test_events_stream_iter_raw", func_factory, json.loads)
def test_events_stream_iter():
def func_factory(func, streaming):
def iter_factory():
res = func(streaming=True, raw=False)
assert isinstance(res, hookio.utils.Response2JSONLinesIterator)
iterobj = iter(res)
try:
for row in iterobj:
yield row
finally:
res.response.close()
iterobj.close()
return functools.partial(stream_iter_thread, streaming, iter_factory)
async_events_stream_template("test_events_stream_iter", func_factory, noop)
|
DotaServiceClient.py
|
from collections import Counter
from time import time
import asyncio
import math
import os
import math
import random
import shutil
import torch.multiprocessing as mp
from grpclib.client import Channel
from google.protobuf.json_format import MessageToDict
from tensorboardX import SummaryWriter
from pympler.tracker import SummaryTracker
from dotaservice.protos.DotaService_grpc import DotaServiceStub
from dotaservice.protos.dota_gcmessages_common_bot_script_pb2 import CMsgBotWorldState
from dotaservice.protos.DotaService_pb2 import Action
from dotaservice.protos.DotaService_pb2 import Empty
from dotaservice.protos.DotaService_pb2 import Config
from dotaservice.protos.DotaService_pb2 import HostMode
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch
import torch.nn as nn
import torch.nn.functional as F
class ActorNCritic(torch.nn.Module):
def __init__(self, num_inputs, action_space, num_hidden):
super(ActorNCritic, self).__init__()
self.num_hidden = num_hidden
self.num_inputs = num_inputs
self.action_space = action_space
self.a_lstm_layer = nn.LSTM(num_inputs, self.num_hidden)
self.c_lstm_layer = nn.LSTM(num_inputs, self.num_hidden)
self.critic_linear = nn.Linear(self.num_hidden, 1)
self.actor_linear = nn.Linear(self.num_hidden, self.action_space)
self.train()
def forward(self, inputs):
t = torch.FloatTensor(inputs)
t = t.view(len(inputs),1,-1)
a_lstm_outs,_ = self.a_lstm_layer(t)
a_lstm_out = a_lstm_outs[-1]
a_lstm_out = F.tanh(a_lstm_out)
actor_out = self.actor_linear(a_lstm_out)
c_lstm_outs,_ = self.c_lstm_layer(t)
c_lstm_out = c_lstm_outs[-1]
c_lstm_out = F.tanh(c_lstm_out)
critic_out = self.critic_linear(c_lstm_out)
return actor_out, critic_out
def clone(self):
m = ActorNCritic(self.num_inputs, self.action_space, self.num_hidden)
m.load_state_dict(self.state_dict())
return m
lr = 1e-3
num_hidden = 16
MODE_NORMAL=0
MODE_AUTO=1
config = Config(
ticks_per_observation=10,
host_timescale=5,
host_mode=HostMode.Value('DEDICATED'),
)
def get_hero_unit(state, id=0):
for unit in state.units:
if unit.unit_type == CMsgBotWorldState.UnitType.Value('HERO') and unit.player_id == id:
return unit
raise ValueError("hero {} not found in state:\n{}".format(id, state))
def get_moving_vec(idx):
if (idx == 0):
return (0,0)
rad = idx * math.pi / 4 - math.pi
x = math.cos(rad)
y = math.sin(rad)
return x,y
xp_to_reach_level = {
1: 0,
2: 230,
3: 600,
4: 1080,
5: 1680,
6: 2300,
7: 2940,
8: 3600,
9: 4280,
10: 5080,
11: 5900,
12: 6740,
13: 7640,
14: 8865,
15: 10115,
16: 11390,
17: 12690,
18: 14015,
19: 15415,
20: 16905,
21: 18405,
22: 20155,
23: 22155,
24: 24405,
25: 26905
}
action_none = CMsgBotWorldState.Action()
action_none.actionType = CMsgBotWorldState.Action.Type.Value(
'DOTA_UNIT_ORDER_NONE')
def get_total_xp(level, xp_needed_to_level):
if level == 25:
return xp_to_reach_level[level]
xp_required_for_next_level = xp_to_reach_level[level + 1] - xp_to_reach_level[level]
missing_xp_for_next_level = (xp_required_for_next_level - xp_needed_to_level)
return xp_to_reach_level[level] + missing_xp_for_next_level
def get_reward(prev_state, state):
"""Get the reward."""
unit_init = get_hero_unit(prev_state)
unit = get_hero_unit(state)
reward = {'xp': 0, 'hp': 0, 'death': 0, 'dist': 0, 'lh': 0}
xp_init = get_total_xp(level=unit_init.level, xp_needed_to_level=unit_init.xp_needed_to_level)
xp = get_total_xp(level=unit.level, xp_needed_to_level=unit.xp_needed_to_level)
reward['xp'] = (xp - xp_init) * 0.02 # One creep will result in 0.114 reward
if unit_init.is_alive and unit.is_alive:
hp_init = unit_init.health / unit_init.health_max
hp = unit.health / unit.health_max
reward['hp'] = (hp - hp_init) * 1.0
if unit_init.is_alive and not unit.is_alive:
reward['death'] = - 0.5 # Death should be a big penalty
# Last-hit reward
lh = unit.last_hits - unit_init.last_hits
reward['lh'] = lh * 0.5
# Help him get to mid, for minor speed boost
dt = state.dota_time - prev_state.dota_time
dist_mid_init = math.sqrt(unit_init.location.x**2 + unit_init.location.y**2)
dist_mid = math.sqrt(unit.location.x**2 + unit.location.y**2)
reward_dist = (dist_mid_init - dist_mid) /\
((unit_init.base_movement_speed + unit.base_movement_speed) * dt) * 0.1
#print(dt, reward['dist'])
total_reward = sum(reward.values())
return total_reward, reward_dist
def calc_reward(state, prev_state):
return get_reward(prev_state, state)
config = Config(
ticks_per_observation=10,
host_timescale=10,
host_mode=HostMode.Value('DEDICATED'),
)
class DotaServiceEnv(object):
def __init__(self, rank, config, host, port):
loop = asyncio.get_event_loop()
channel = Channel(host, port, loop=loop)
self.env = DotaServiceStub(channel) # place holder
self.config = config
self.gamma = 0.7
self.gamma_dist = 0.
self.tau = 1.0
self.entropy_coef = 0.01
self.epsilon = 0.2
self.buffer_size = 1000
self.out_classes = 9
self.update_steps = 3
self.rank = rank
self.writer = SummaryWriter(comment='_%d'%self.rank)
self.a3c_model = ActorCritic(8, self.out_classes, num_hidden)
self.optimizer = optim.SGD(self.a3c_model.parameters(), lr=lr)
self.MODE = MODE_NORMAL
def reset(self):
self.states = []
self.actions = []
self.entropies = []
self.values = []
self.rewards = []
self.log_probs = []
self.raw_log_probs = []
self.raw_probs = []
def set_model(self, model):
self.a3c_model = model
self.optimizer = optim.SGD(self.a3c_model.parameters(), lr=lr)
def get_model(self):
return self.a3c_model
@staticmethod
def unit2input(unit):
loc = [unit.location.x / 7000., unit.location.y / 7000.]
return loc
@staticmethod
def print_unit_list(state):
for unit in state.units:
if unit.unit_type == CMsgBotWorldState.UnitType.Value('HERO') or unit.unit_type == CMsgBotWorldState.UnitType.Value('LANE_CREEP'):
print('[debug] unit list: {}'.format(unit))
@staticmethod
def get_unit_list(state):
r = []
for unit in state.units:
if unit.unit_type == CMsgBotWorldState.UnitType.Value('LANE_CREEP')\
or unit.unit_type == CMsgBotWorldState.UnitType.Value('TOWER')\
or unit.unit_type == CMsgBotWorldState.UnitType.Value('BUILDING')\
or unit.unit_type == CMsgBotWorldState.UnitType.Value('BARRACKS')\
or unit.unit_type == CMsgBotWorldState.UnitType.Value('FORT')\
or unit.unit_type == CMsgBotWorldState.UnitType.Value('HERO'):
r.append(unit)
return r
def ppo_train_actor(self, old_model):
self.a3c_model.zero_grad()
self.optimizer.zero_grad()
l = 0.0
R = torch.zeros(1, 1)
reduced_r = []
for i in reversed(range(len(self.rewards))):
R = self.gamma * R + self.rewards[i][0]
reduced_r.append(R)
reduced_r = list(reversed(reduced_r))
for i in range(len(self.rewards)):
reduced_r[i] += self.rewards[i][1]
idxs = list(range(len(self.rewards)))
random.shuffle(idxs)
#TODO: turn `for loop` to tensor operations
for i in idxs:
new_prob, v = self.a3c_model(self.states[i])
new_prob = F.softmax(new_prob)
old_prob, _ = old_model(self.states[i])
old_prob = F.softmax(old_prob)
adv = reduced_r[i] - v.data
onehot_act = torch.zeros(self.out_classes)
onehot_act[self.actions[i]] = 1
ratio = torch.sum(new_prob * onehot_act) / torch.sum(old_prob * onehot_act)
surr = ratio * adv
l = l - min(surr, torch.clamp(ratio, 1.0 - self.epsilon, 1.0 + self.epsilon)*adv)
l = l / len(idxs)
#print("train/policy_loss", l.item())
l.backward(retain_graph=True)
self.optimizer.step()
return l.item()
def ppo_train_critic(self):
self.a3c_model.zero_grad()
self.optimizer.zero_grad()
R = torch.zeros(1, 1)
l = 0.0
reduced_r = []
for i in reversed(range(len(self.rewards))):
R = self.gamma * R + self.rewards[i][0]
reduced_r.append(R)
reduced_r = list(reversed(reduced_r))
for i in range(len(self.rewards)):
reduced_r[i] += self.rewards[i][1]
idxs = list(range(len(self.rewards)))
random.shuffle(idxs)
for i in idxs:
adv = reduced_r[i] - self.a3c_model(self.states[i])[1]
l = l + adv ** 2
l = l / len(idxs)
l.backward(retain_graph=True)
#print("train/value_loss", l.item())
self.optimizer.step()
return l.item()
def teacher_train(self):
#TODO: teacher_loss * (1 - Gini coefficient)
#TODO: entropy loss
self.a3c_model.zero_grad()
self.optimizer.zero_grad()
teacher_loss = 0
labels = torch.cat(self.predefined_steps)
#balance loss
weight = torch.zeros((self.out_classes,))
for i in range(self.out_classes):
weight[i] = torch.sum(labels==i)
if weight[i] > 0:
weight[i] = 1./ weight[i]
nll = nn.NLLLoss(weight=weight)
log_probs = torch.cat(self.raw_log_probs)
teacher_loss = nll(log_probs, labels) * 0.1#0.1 as coeff
teacher_loss.backward(retain_graph=True)
self.optimizer.step()
def train(self):
#just make it simple
self.a3c_model.train()
old_model = self.a3c_model.clone()
#self.teacher_train()
l = 0
for _ in range(self.update_steps):
l = self.ppo_train_actor(old_model)
print("train/policy_loss", l)
self.writer.add_scalar("train/policy_loss", l / len(self.states))
if self.MODE == MODE_NORMAL:
for _ in range(self.update_steps):
l= self.ppo_train_critic()
print("train/value_loss", l)
self.writer.add_scalar("train/value_loss", l / len(self.states))
total_reward = np.sum(self.rewards)
print("MODE {} total reward {} avg reward {}".format(self.MODE, total_reward, total_reward / len(self.rewards)))
self.writer.add_scalar("train/reward", total_reward)
def default_action(self, state):
hero = state[0]
for unit in state:
if unit[4] > 0:
if math.hypot(hero[0]-unit[0],hero[1]-unit[1]) < 600. / 7000.:
return 1
if (hero[0] < 0 and hero[1] < 0):
return 5
else:
return 1
return 5
async def run_a_game(self):
#tracker = SummaryTracker()
#print('using model id {}'.format(id(self.a3c_model)))
self.reset()
self.MODE = np.random.randint(2)
#print('Mode:{}'.format(self.MODE))
# start a game
while True:
try:
t_start = time()
await asyncio.wait_for(self.env.clear(Empty()), timeout=60)
state = await asyncio.wait_for(self.env.reset(config), timeout=60)
#print('start time {}'.format(time() - t_start))
break
except Exception as e:
print('Exception on env.reset: {}'.format(e))
return
while True:
# fetch hero
#tick_start = time()
state = state.world_state
if state.dota_time > 130:
break
prev_state = state
# print(state.dota_time)
hero = get_hero_unit(state)
all_units = DotaServiceEnv.get_unit_list(state)
input_state = []
hero_loc = (hero.location.x, hero.location.y)
for unit in all_units:
loc = [unit.location.x, unit.location.y]
d = math.sqrt((unit.location.x - hero_loc[0])**2 + (unit.location.y - hero_loc[1])**2)
if d >= 1200:
continue
if unit is not hero:
loc = [hero_loc[0] - unit.location.x, hero_loc[1] - unit.location.y]
loc = [loc[0] / 7000., loc[1] / 7000.]
unit_state = list(loc)
type_tup = [0] * 6
if unit.unit_type == CMsgBotWorldState.UnitType.Value('HERO') and unit.player_id == 0:
type_tup[0] = 1
elif unit.unit_type == CMsgBotWorldState.UnitType.Value('LANE_CREEP') and unit.team_id == hero.team_id:
type_tup[1] = 1
elif unit.unit_type == CMsgBotWorldState.UnitType.Value('LANE_CREEP') and unit.team_id != hero.team_id:
type_tup[2] = 1
elif unit.unit_type == CMsgBotWorldState.UnitType.Value('TOWER') and unit.team_id == hero.team_id:
type_tup[3] = 1
elif unit.unit_type == CMsgBotWorldState.UnitType.Value('TOWER') and unit.team_id != hero.team_id:
type_tup[4] = 1
else:
type_tup[5] = 1
unit_state.extend(type_tup)
unit_state = np.array(unit_state)
unit_state = torch.from_numpy(unit_state).float()
if unit is hero:
hero_state = unit_state
else:
input_state.append(unit_state)
input_state_wo_hero = sorted(input_state, key=lambda x:math.hypot(x[0],x[1]))
input_state = [hero_state]
input_state.extend(input_state_wo_hero)
#print(input_state)
raw_input_state = input_state
input_state = torch.stack(input_state)
self.states.append(input_state)
action_out, value_out = self.a3c_model(input_state)
#print(action_out , value_out, input_state)
prob = F.softmax(action_out)
self.raw_probs.append(prob)
log_prob = F.log_softmax(action_out)
self.raw_log_probs.append(log_prob)
self.values.append(value_out)
self.log_probs.append(log_prob)
entropy = - (log_prob * prob).sum(1, keepdim=True)
self.entropies.append(entropy)
if self.MODE == MODE_NORMAL:
action = prob.multinomial(num_samples=1).data
#action = torch.argmax(log_prob, 1).data.view(-1,1)
elif self.MODE == MODE_AUTO:
action = self.default_action(raw_input_state)
self.actions.append(action)
action_pb = CMsgBotWorldState.Action()
action_pb.actionType = CMsgBotWorldState.Action.Type.Value(
'DOTA_UNIT_ORDER_MOVE_TO_POSITION')
mx, my = get_moving_vec(action)
scale = 500
hloc = hero.location
m = CMsgBotWorldState.Action.MoveToLocation()
m.location.x = mx * scale + hloc.x
m.location.y = my * scale + hloc.y
m.location.z = 0
action_pb.moveToLocation.CopyFrom(m)
action_pb.actionDelay=0
# print(action, action_pb)
# print('tick cost {}'.format(time() - tick_start))
try:
state = await asyncio.wait_for(self.env.step(Action(action=action_pb)), timeout=11)
reward = calc_reward(state.world_state, prev_state)
self.rewards.append(reward)
except Exception as e:
print('Exception on env.step: {}'.format(repr(e)))
raise
break
self.train()
#await asyncio.get_event_loop().run_in_executor(None, self.train)
#tracker.print_diff()
def main():
tmp_dir = '/root/Dota2BotStepByStep/runs'
shutil.rmtree(tmp_dir)
host = '172.18.5.31'
base_port = 13337
concurrent_num = 12
eps = [
{'host':host, 'port':i} for i in range(base_port, base_port + concurrent_num)
]
'''
host = '172.18.5.30'
eps.extend([
{'host':host, 'port':i} for i in range(base_port, base_port + concurrent_num)
])
'''
thread_num = os.cpu_count()
if thread_num > len(eps):
thread_num = len(eps)
shared_model = ActorCritic(8, 9, num_hidden)
shared_model.share_memory()
threads = [mp.Process(target=worker_thread,args=(i, eps[i::thread_num],shared_model)) for i in range(thread_num)]
for t in threads:
t.start()
for t in threads:
t.join()
def worker_thread(rank, eps, shared_model):
loop=asyncio.get_event_loop()
actors = [
DotaServiceEnv(rank=rank,config=config,
**ep) for ep in eps
]
print('thread start {} actors end points {}'.format(len(actors), eps))
for a in actors:
a.set_model(shared_model)
np.random.seed()
loop.run_until_complete(working_loop(actors))
async def working_loop(actors):
while True:
actor_output = await asyncio.gather(*[a.run_a_game() for a in actors])
if __name__ == '__main__':
main()
|
helpers_3.py
|
"""
helpers and wrappers for common rpyc tasks
"""
import threading
from rpyc.lib.lib import WeakValueDictionary, callable
from rpyc.core.consts import HANDLE_BUFFITER, HANDLE_CALL
from rpyc.core.netref import BaseNetref, syncreq, asyncreq
def buffiter(obj, chunk=10, max_chunk=1000, factor=2):
"""buffering iterator - reads the remote iterator in chunks starting with
`chunk` up to `max_chunk`, multiplying by `factor` as an exponential
backoff"""
if factor < 1:
raise ValueError("factor must be >= 1, got %r" % (factor,))
it = iter(obj)
count = chunk
while True:
items = syncreq(it, HANDLE_BUFFITER, count)
count = min(count * factor, max_chunk)
if not items:
break
for elem in items:
yield elem
class _Async(object):
"""creates an async proxy wrapper over an existing proxy. async proxies
are cached. invoking an async proxy will return an AsyncResult instead of
blocking"""
__slots__ = ("proxy", "__weakref__")
def __init__(self, proxy):
self.proxy = proxy
def __call__(self, *args, **kwargs):
return asyncreq(self.proxy, HANDLE_CALL, args, tuple(kwargs.items()))
def __repr__(self):
return "async(%r)" % (self.proxy,)
_async_proxies_cache = WeakValueDictionary()
def async(proxy):
pid = id(proxy)
if pid in _async_proxies_cache:
return _async_proxies_cache[pid]
if not hasattr(proxy, "____conn__") or not hasattr(proxy, "____oid__"):
raise TypeError("'proxy' must be a Netref: %r", (proxy,))
if not callable(proxy):
raise TypeError("'proxy' must be callable: %r" % (proxy,))
caller = _Async(proxy)
_async_proxies_cache[id(caller)] = _async_proxies_cache[pid] = caller
return caller
async.__doc__ = _Async.__doc__
class timed(object):
"""creates a timed asynchronous proxy. invoking the timed proxy will
run in the background and will raise an AsyncResultTimeout exception
if the computation does not terminate within the given timeout"""
__slots__ = ("__weakref__", "proxy", "timeout")
def __init__(self, proxy, timeout):
self.proxy = async(proxy)
self.timeout = timeout
def __call__(self, *args, **kwargs):
res = self.proxy(*args, **kwargs)
res.set_expiry(self.timeout)
return res
def __repr__(self):
return "timed(%r, %r)" % (self.proxy.proxy, self.timeout)
class BgServingThread(object):
"""runs an RPyC server in the background to serve all requests and replies
that arrive on the given RPyC connection. the thread is created along with
the object; you can use the stop() method to stop the server thread"""
INTERVAL = 0.1
def __init__(self, conn):
self._conn = conn
self._thread = threading.Thread(target=self._bg_server)
self._thread.setDaemon(True)
self._active = True
self._thread.start()
def __del__(self):
if self._active:
self.stop()
def _bg_server(self):
try:
while self._active:
self._conn.serve(self.INTERVAL)
except Exception:
if self._active:
raise
def stop(self):
"""stop the server thread. once stopped, it cannot be resumed. you will
have to create a new BgServingThread object later."""
self._active = False
self._thread.join()
self._conn = None
|
__main__.py
|
# ESPER Waveform and Variable viewer
from __future__ import print_function
from builtins import str as text
import threading
import os
import sys
import argparse
import cmd
import time
import getpass
import platform
import configparser
import zmq
import socket
import struct
import signal
import re
import queue
import pyqtgraph as pg
from pyqtgraph.Qt import QtCore, QtGui
import numpy as np
import zlib
import msgpack
import time
from .version import __version__
if(platform.system() == u'Windows'):
import ctypes
import pyreadline as readline
else:
import readline
here = os.path.abspath(os.path.dirname(__file__))
version = __version__
MTU_SIZE = 1500
def getAuth(args, config):
username = False
password = False
if(args.user):
username = args.user
if(not args.password):
password = getpass.getpass("Password for " + username + ": ")
else:
password = args.password
else:
if(config.has_section('auth')):
if(config.has_option('auth','username')):
username = config.get('auth','username')
if(username):
if(config.has_option('auth','password')):
password = config.get('auth','password')
else:
password = getpass.getpass("Password for " + username + ": ")
else:
# Config file has no value yet, might as well use the one passed in (or the default)
config.add_section('auth')
config.add_option('username', username)
return { 'username' : username, 'password' : password }
def send_zipped_pickle(socket, obj, flags=0, protocol=-1):
"""pickle an object, and zip the pickle before sending it"""
p = pickle.dumps(obj, protocol)
z = zlib.compress(p)
return socket.send(z, flags=flags)
def send_msgpack(socket, obj, flags=0, protocol=-1):
"""pickle an object, and zip the pickle before sending it"""
p = msgpack.packb(obj, use_bin_type=True)
return socket.send(p, flags=flags)
def getData(sock, q):
sample_count = 0
trigger_start = 0
trigger_delay = 0
waveform = [0] * (76*4*511) #np.zeros((76*4*511), np.dtype('i2'))
waveform_len = 0
last_msg_count = False
last_chunk_num = 0
in_msg = False
while(True):
try:
data, addr = sock.recvfrom(MTU_SIZE)
if(data):
# Discard if we aren't getting the first packet
msg_count = struct.unpack_from("<I",data,0)[0] # Overall message count
chunk_num = struct.unpack_from("<H",data,4)[0] # Chunk number, should start at zero.
sample_cnt = struct.unpack_from("<H",data,6)[0] # Samples captured
sample_byte_cnt = struct.unpack_from("<H",data,8)[0] # waveform bytes sent in this packet
if(chunk_num == 0):
in_msg = True
waveform_len = 0
trigger_start = struct.unpack_from("<Q",data,10)[0]
triger_delay = struct.unpack_from("<Q",data,18)[0] - trigger_start
if(in_msg):
if(last_msg_count == False):
last_msg_count = msg_count
if(chunk_num != 0):
last_chunk_num = chunk_num - 1
else:
last_chunk_num = chunk_num
if(last_msg_count == msg_count):
if(chunk_num != 0):
if(last_chunk_num != (chunk_num - 1)):
print("Missed " + str(chunk_num - 1 - last_chunk_num) + " Chunk" + " " + str(last_chunk_num) + " " + str(chunk_num))
in_msg = False # Bail out of being in a message until the next chunk_num == 0 comes around
else:
if(last_msg_count != msg_count - 1):
print("Missed " + str(msg_count - 1 - last_msg_count) + " Messages")
# Still a valid message? Let's put together the waveform data
if(in_msg):
if(chunk_num == 0):
offset = 26
else:
offset = 10
for n in range(0, sample_byte_cnt, 2):
waveform[waveform_len] = struct.unpack_from("<h",data,int(offset+n))[0]
waveform_len = waveform_len + 1
if(chunk_num == 255):
# lets add it to the queue!
if((in_msg == True) and (waveform_len == 155344)):
print("Message " + str(last_msg_count) + " Received")
q.put({
u'msg_count': msg_count,
u'chunk_num': chunk_num,
u'waveform_len': waveform_len,
u'waveform': waveform })
else:
print("Message Error has occurred. Possible length mismatch? Waveform length: " + str(msg.waveform_len))
last_msg_count = msg_count
last_chunk_num = chunk_num
except BlockingIOError:
pass
def putData(pub, q):
# whenever the queue has data, we will pass it along to 0MQ
# in the future this will be a separate thread to de-sync the UDP and the zeromq
while True:
while not q.empty():
pub.send(msgpack.packb(q.get(), use_bin_type=False))
time.sleep(1)
def main():
try:
prog='esper-capture'
parser = argparse.ArgumentParser(prog=prog)
# Verbose, because sometimes you want feedback
parser.add_argument('-v','--verbose', help="Verbose output", default=False, action='store_true')
parser.add_argument('--version', action='version', version='%(prog)s ' + version)
parser.add_argument("-f", "--config", default="test.ini", help="Config file for node")
parser.add_argument("-s", "--storage", default="", help="Storage path for collected data")
parser.add_argument("-u", "--user", default=False, help="User for Auth")
parser.add_argument("-p", "--password", default=False, help="Password for Auth")
parser.add_argument("ip", help="IP address of node to pull data from")
parser.add_argument("pub", help="IP address of ZeroMQ Publisher")
#parser.add_argument("port", type=int, default=50005, help="Port of node to pull data from")
# Put the arguments passed into args
args = parser.parse_args()
try:
# Load up config
# Create config instance
config = configparser.SafeConfigParser()
# Load configuration file
config.read(args.config)
auth = getAuth(args, config)
# if a username has been defined, then a password *MUST* have been grabbed, perform authentication
if(auth['username']):
print(auth['username'] + ' ' + auth['password'])
addr = re.split(' :', args.ip)
if(len(addr) < 2):
addr.append( 50005 )
# Attempt to gather data from UDP source
udp_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
udp_sock.bind((addr[0], addr[1]))
# Setup 0MQ publisher
addr = re.split(' :', args.pub)
if(len(addr) < 2):
addr.append( 50006 )
context = zmq.Context()
pub = context.socket(zmq.PUB)
pub.bind("tcp://"+str(addr[0])+":"+str(addr[1]))
# Create Queue
q = queue.Queue()
thread_udp = threading.Thread(target=getData, args=(udp_sock, q,))
thread_udp.setDaemon(True)
thread_udp.start()
thread_pub = threading.Thread(target=putData, args=(pub, q,))
thread_pub.setDaemon(True)
thread_pub.start()
while(1):
#print("Current Queue Size: " + str(q.qsize()))
if(q.full()):
print("Queue is full!")
time.sleep(1)
# No options selected, this should never be reached
sys.exit(0)
except Exception as err:
print(err)
sys.exit(1)
except KeyboardInterrupt:
print("\nExiting " + prog)
udp_sock.close()
sys.exit(0)
if __name__ == "__main__":
main()
|
httptiming.py
|
import time
import requests
from performance.driver.core.classes import Observer
from performance.driver.core.events import Event, TeardownEvent, StartEvent, ParameterUpdateEvent
from performance.driver.core.reflection import subscribesToHint, publishesHint
from threading import Thread
class HTTPTimingResultEvent(Event):
"""
The results of a timing event, initiated by a ``HTTPTimingObserver``
"""
def __init__(self, url, verb, statusCode, requestTime, responseTime,
totalTime, contentLength, *args, **kwargs):
super().__init__(*args, **kwargs)
#: The URL requested
self.url = url
#: The HTTP verb used to request this resource
self.verb = verb
#: The HTTP response code
self.statusCode = statusCode
#: The time the HTTP request took to complete
self.requestTime = requestTime
#: The time the HTTP response took to complete
self.responseTime = responseTime
#: The overall time from the beginning of the request, till the end of the
#: response
self.totalTime = totalTime
#: The length of the response body
self.contentLength = contentLength
class HTTPTimingObserver(Observer):
"""
The *HTTP Timing Observer* is performing HTTP requests to the given endpoint
and is measuring the request and response times.
::
observers:
- class: observer.HTTPTimingObserver
# The URL to send the requests at
url: http://127.0.0.1:8080/v2/apps
# [Optional] The interval of the reqeusts (seconds)
interval: 1
# [Optional] The body of the HTTP request
body: |
{
"cmd": "sleep 1200",
"cpus": 0.1,
"mem": 64,
"disk": 0,
"instances": {{instances}},
"id": "/scale-instances/{{uuid()}}",
"backoffFactor": 1.0,
"backoffSeconds": 0
}
# [Optional] The HTTP Verb to use (Defaults to 'GET')
verb: POST
# [Optional] The HTTP headers to send
headers:
Accept: text/plain
This observer is publishing a ``HTTPTimingResultEvent`` every time a sample
is taken. Refer to the event documentation for more details.
"""
@subscribesToHint(TeardownEvent, StartEvent, ParameterUpdateEvent)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.url = self.getConfig('url')
self.interval = float(self.getConfig('interval', '1'))
self.clockThread = None
self.active = False
self.traceids = None
# Register to the Start / Teardown events
self.eventbus.subscribe(self.handleTeardownEvent, events=(TeardownEvent, ))
self.eventbus.subscribe(self.handleStartEvent, events=(StartEvent, ))
self.eventbus.subscribe(
self.handleParameterUpdate, events=(ParameterUpdateEvent, ))
def handleParameterUpdate(self, event):
"""
Update trace IDs
"""
self.traceids = event.traceids
def handleStartEvent(self, event):
"""
Start polling timer
"""
self.logger.debug('Starting polling timer')
self.active = True
self.clockThread = Thread(target=self.pollThreadHandler, name="http-timing")
self.clockThread.start()
def handleTeardownEvent(self, event):
"""
Interrupt polling timer
"""
self.logger.debug('Stopping polling timer')
self.active = False
self.clockThread.join()
self.clockThread = None
@publishesHint(HTTPTimingResultEvent)
def pollThreadHandler(self):
"""
A thread that keeps polling the given url until it responds
"""
# Render config and definitions
config = self.getRenderedConfig()
definitions = self.getDefinitions()
# If we are missing an `Authorization` header but we have a
# `dcos_auth_token` definition, allocate an `Authorization` header now
if not 'headers' in config:
config['headers'] = {}
if not 'Authorization' in config['headers'] \
and 'dcos_auth_token' in definitions:
config['headers']['Authorization'] = 'token={}'.format(
definitions['dcos_auth_token'])
# Extract useful info
url = config['url']
body = config.get('body', None)
headers = config['headers']
verb = config.get('verb', 'get')
# While running, start
while self.active:
self.logger.debug('Checking the endpoint')
try:
# Reset timer values
times = [0, 0, 0]
# Acknowledge response
def ack_response(request, *args, **kwargs):
times[1] = time.time()
# Send request (and catch errors)
times[0] = time.time()
self.logger.debug('Performing HTTP {} to {}'.format(verb, url))
res = requests.request(
verb,
url,
verify=False,
data=body,
headers=headers,
hooks=dict(response=ack_response))
times[2] = time.time()
# Log error status codes
self.logger.debug('Completed with HTTP {}'.format(res.status_code))
if res.status_code != 200:
self.logger.warn('Endpoint at {} responded with HTTP {}'.format(
url, res.status_code))
# Broadcast status
self.logger.debug(
'Measurement completed: request={}, response={}, total={}'.format(
times[1] - times[0], times[2] - times[1], times[2] - times[0]))
self.eventbus.publish(
HTTPTimingResultEvent(
url,
verb,
res.status_code,
times[1] - times[0],
times[2] - times[1],
times[2] - times[0],
len(res.text),
traceid=self.traceids))
except requests.exceptions.ConnectionError as e:
self.logger.error('Unable to connect to {}'.format(url))
except Exception as e:
self.logger.error(
'An unhandled urllib exception occurred: {}'.format(e))
# Wait for next tick
time.sleep(self.interval)
|
spark.py
|
from __future__ import print_function
import copy
import threading
import time
import timeit
from hyperopt import base, fmin, Trials
from hyperopt.base import validate_timeout, validate_loss_threshold
from hyperopt.utils import coarse_utcnow, _get_logger, _get_random_id
try:
from pyspark.sql import SparkSession
_have_spark = True
except ImportError as e:
_have_spark = False
logger = _get_logger("hyperopt-spark")
class SparkTrials(Trials):
"""
Implementation of hyperopt.Trials supporting
distributed execution using Apache Spark clusters.
This requires fmin to be run on a Spark cluster.
Plugging SparkTrials into hyperopt.fmin() allows hyperopt
to send model training and evaluation tasks to Spark workers,
parallelizing hyperparameter search.
Each trial (set of hyperparameter values) is handled within
a single Spark task; i.e., each model will be fit and evaluated
on a single worker machine. Trials are run asynchronously.
See hyperopt.Trials docs for general information about Trials.
The fields we store in our trial docs match the base Trials class. The fields include:
- 'tid': trial ID
- 'state': JOB_STATE_DONE, JOB_STATE_ERROR, etc.
- 'result': evaluation result for completed trial run
- 'refresh_time': timestamp for last status update
- 'misc': includes:
- 'error': (error type, error message)
- 'book_time': timestamp for trial run start
"""
asynchronous = True
# Hard cap on the number of concurrent hyperopt tasks (Spark jobs) to run. Set at 128.
MAX_CONCURRENT_JOBS_ALLOWED = 128
def __init__(
self, parallelism=None, timeout=None, loss_threshold=None, spark_session=None
):
"""
:param parallelism: Maximum number of parallel trials to run,
i.e., maximum number of concurrent Spark tasks.
The actual parallelism is subject to available Spark task slots at
runtime.
If set to None (default) or a non-positive value, this will be set to
Spark's default parallelism, or the current total of Spark task slots,
or `1`, whichever is greater.
We cap the value at `MAX_CONCURRENT_JOBS_ALLOWED=128`.
:param timeout: Maximum time (in seconds) which fmin is allowed to take.
If this timeout is hit, then fmin will cancel running and proposed trials.
It will retain all completed trial runs and return the best result found
so far.
:param spark_session: A SparkSession object. If None is passed, SparkTrials will attempt
to use an existing SparkSession or create a new one. SparkSession is
the entry point for various facilities provided by Spark. For more
information, visit the documentation for PySpark.
"""
super(SparkTrials, self).__init__(exp_key=None, refresh=False)
if not _have_spark:
raise Exception(
"SparkTrials cannot import pyspark classes. Make sure that PySpark "
"is available in your environment. E.g., try running 'import pyspark'"
)
validate_timeout(timeout)
validate_loss_threshold(loss_threshold)
self._spark = (
SparkSession.builder.getOrCreate()
if spark_session is None
else spark_session
)
self._spark_context = self._spark.sparkContext
# The feature to support controlling jobGroupIds is in SPARK-22340
self._spark_supports_job_cancelling = hasattr(
self._spark_context.parallelize([1]), "collectWithJobGroup"
)
# maxNumConcurrentTasks() is a package private API
max_num_concurrent_tasks = self._spark_context._jsc.sc().maxNumConcurrentTasks()
spark_default_parallelism = self._spark_context.defaultParallelism
self.parallelism = self._decide_parallelism(
requested_parallelism=parallelism,
spark_default_parallelism=spark_default_parallelism,
max_num_concurrent_tasks=max_num_concurrent_tasks,
)
if not self._spark_supports_job_cancelling and timeout is not None:
logger.warning(
"SparkTrials was constructed with a timeout specified, but this Apache "
"Spark version does not support job group-based cancellation. The "
"timeout will be respected when starting new Spark jobs, but "
"SparkTrials will not be able to cancel running Spark jobs which exceed"
" the timeout."
)
self.timeout = timeout
self.loss_threshold = loss_threshold
self._fmin_cancelled = False
self._fmin_cancelled_reason = None
self.refresh()
@staticmethod
def _decide_parallelism(
requested_parallelism, spark_default_parallelism, max_num_concurrent_tasks
):
"""
Given the requested parallelism, return the max parallelism SparkTrials will actually use.
See the docstring for `parallelism` in the constructor for expected behavior.
"""
if max_num_concurrent_tasks == 0:
logger.warning(
"The cluster has no executors currently. "
"The trials won't start until some new executors register."
)
if requested_parallelism is None or requested_parallelism <= 0:
parallelism = max(spark_default_parallelism, max_num_concurrent_tasks, 1)
logger.warning(
"Because the requested parallelism was None or a non-positive value, "
"parallelism will be set to ({d}), which is Spark's default parallelism ({s}), "
"or the current total of Spark task slots ({t}), or 1, whichever is greater. "
"We recommend setting parallelism explicitly to a positive value because "
"the total of Spark task slots is subject to cluster sizing.".format(
d=parallelism,
s=spark_default_parallelism,
t=max_num_concurrent_tasks,
)
)
else:
parallelism = requested_parallelism
if parallelism > SparkTrials.MAX_CONCURRENT_JOBS_ALLOWED:
logger.warning(
"Parallelism ({p}) is capped at SparkTrials.MAX_CONCURRENT_JOBS_ALLOWED ({c}).".format(
p=parallelism, c=SparkTrials.MAX_CONCURRENT_JOBS_ALLOWED
)
)
parallelism = SparkTrials.MAX_CONCURRENT_JOBS_ALLOWED
if parallelism > max_num_concurrent_tasks:
logger.warning(
"Parallelism ({p}) is greater than the current total of Spark task slots ({c}). "
"If dynamic allocation is enabled, you might see more executors allocated.".format(
p=requested_parallelism, c=max_num_concurrent_tasks
)
)
return parallelism
def count_successful_trials(self):
"""
Returns the current number of trials which ran successfully
"""
return self.count_by_state_unsynced(base.JOB_STATE_DONE)
def count_failed_trials(self):
"""
Returns the current number of trial runs which failed
"""
return self.count_by_state_unsynced(base.JOB_STATE_ERROR)
def count_cancelled_trials(self):
"""
Returns the current number of cancelled trial runs.
This covers trials which are cancelled from exceeding the timeout.
"""
return self.count_by_state_unsynced(base.JOB_STATE_CANCEL)
def count_total_trials(self):
"""
Returns the current number of all successful, failed, and cancelled trial runs
"""
total_states = [
base.JOB_STATE_DONE,
base.JOB_STATE_ERROR,
base.JOB_STATE_CANCEL,
]
return self.count_by_state_unsynced(total_states)
def delete_all(self):
"""
Reset the Trials to init state
"""
super(SparkTrials, self).delete_all()
self._fmin_cancelled = False
self._fmin_cancelled_reason = None
def trial_attachments(self, trial):
raise NotImplementedError("SparkTrials does not support trial attachments.")
def fmin(
self,
fn,
space,
algo,
max_evals,
timeout,
loss_threshold,
max_queue_len,
rstate,
verbose,
pass_expr_memo_ctrl,
catch_eval_exceptions,
return_argmin,
show_progressbar,
early_stop_fn,
):
"""
This should not be called directly but is called via :func:`hyperopt.fmin`
Refer to :func:`hyperopt.fmin` for docs on each argument
"""
if timeout is not None:
if self.timeout is not None:
logger.warning(
"Timeout param was defined in Trials object, ignoring fmin definition"
)
else:
validate_timeout(timeout)
self.timeout = timeout
if loss_threshold is not None:
validate_loss_threshold(loss_threshold)
self.loss_threshold = loss_threshold
assert (
not pass_expr_memo_ctrl
), "SparkTrials does not support `pass_expr_memo_ctrl`"
assert (
not catch_eval_exceptions
), "SparkTrials does not support `catch_eval_exceptions`"
state = _SparkFMinState(self._spark, fn, space, self)
# Will launch a dispatcher thread which runs each trial task as one spark job.
state.launch_dispatcher()
try:
res = fmin(
fn,
space,
algo,
max_evals,
timeout=timeout,
loss_threshold=loss_threshold,
max_queue_len=max_queue_len,
trials=self,
allow_trials_fmin=False, # -- prevent recursion
rstate=rstate,
pass_expr_memo_ctrl=None, # not support
catch_eval_exceptions=catch_eval_exceptions,
verbose=verbose,
return_argmin=return_argmin,
points_to_evaluate=None, # not support
show_progressbar=show_progressbar,
early_stop_fn=early_stop_fn,
)
except BaseException as e:
logger.debug("fmin thread exits with an exception raised.")
raise e
else:
logger.debug("fmin thread exits normally.")
return res
finally:
state.wait_for_all_threads()
logger.info(
"Total Trials: {t}: {s} succeeded, {f} failed, {c} cancelled.".format(
t=self.count_total_trials(),
s=self.count_successful_trials(),
f=self.count_failed_trials(),
c=self.count_cancelled_trials(),
)
)
class _SparkFMinState:
"""
Class for managing threads which run concurrent Spark jobs.
This maintains a primary dispatcher thread, plus 1 thread per Hyperopt trial.
Each trial's thread runs 1 Spark job with 1 task.
"""
def __init__(self, spark, eval_function, space, trials):
self.spark = spark
self.eval_function = eval_function
self.space = space
self.trials = trials
self._fmin_done = False
self._dispatcher_thread = None
self._task_threads = set()
if self.trials._spark_supports_job_cancelling:
spark_context = spark.sparkContext
self._job_group_id = spark_context.getLocalProperty("spark.jobGroup.id")
self._job_desc = spark_context.getLocalProperty("spark.job.description")
interrupt_on_cancel = spark_context.getLocalProperty(
"spark.job.interruptOnCancel"
)
if interrupt_on_cancel is None:
self._job_interrupt_on_cancel = False
else:
self._job_interrupt_on_cancel = "true" == interrupt_on_cancel.lower()
# In certain Spark deployments, the local property "spark.jobGroup.id"
# value is None, so we create one to use for SparkTrials.
if self._job_group_id is None:
self._job_group_id = "Hyperopt_SparkTrials_" + _get_random_id()
if self._job_desc is None:
self._job_desc = "Trial evaluation jobs launched by hyperopt fmin"
logger.debug(
"Job group id: {g}, job desc: {d}, job interrupt on cancel: {i}".format(
g=self._job_group_id,
d=self._job_desc,
i=self._job_interrupt_on_cancel,
)
)
def running_trial_count(self):
return self.trials.count_by_state_unsynced(base.JOB_STATE_RUNNING)
@staticmethod
def _begin_trial_run(trial):
trial["state"] = base.JOB_STATE_RUNNING
now = coarse_utcnow()
trial["book_time"] = now
trial["refresh_time"] = now
logger.debug("trial task {tid} started".format(tid=trial["tid"]))
def _finish_trial_run(self, is_success, is_cancelled, trial, data):
"""
Call this method when a trial evaluation finishes. It will save results to the
trial object and update task counters.
:param is_success: whether the trial succeeded
:param is_cancelled: whether the trial was cancelled
:param data: If the trial succeeded, this is the return value from the trial
task function. Otherwise, this is the exception raised when running the trial
task.
"""
if is_cancelled:
logger.debug(
"trial task {tid} cancelled, exception is {e}".format(
tid=trial["tid"], e=str(data)
)
)
self._write_cancellation_back(trial, e=data)
elif is_success:
logger.debug(
"trial task {tid} succeeded, result is {r}".format(
tid=trial["tid"], r=data
)
)
self._write_result_back(trial, result=data)
else:
logger.debug(
"trial task {tid} failed, exception is {e}".format(
tid=trial["tid"], e=str(data)
)
)
self._write_exception_back(trial, e=data)
def launch_dispatcher(self):
def run_dispatcher():
start_time = timeit.default_timer()
while not self._fmin_done:
new_tasks = self._poll_new_tasks()
for trial in new_tasks:
self._run_trial_async(trial)
cur_time = timeit.default_timer()
elapsed_time = cur_time - start_time
# In the future, timeout checking logic could be moved to `fmin`.
# For now, timeouts are specific to SparkTrials.
# When a timeout happens:
# - Set `trials._fmin_cancelled` flag to be True.
# - FMinIter checks this flag and exits if it is set to True.
if (
self.trials.timeout is not None
and elapsed_time > self.trials.timeout
and not self.trials._fmin_cancelled
):
self.trials._fmin_cancelled = True
self.trials._fmin_cancelled_reason = "fmin run timeout"
self._cancel_running_trials()
logger.warning(
"fmin cancelled because of "
+ self.trials._fmin_cancelled_reason
)
time.sleep(1)
if self.trials._fmin_cancelled:
# Because cancelling fmin triggered, warn that the dispatcher won't launch
# more trial tasks.
logger.warning("fmin is cancelled, so new trials will not be launched.")
logger.debug("dispatcher thread exits normally.")
self._dispatcher_thread = threading.Thread(target=run_dispatcher)
self._dispatcher_thread.setDaemon(True)
self._dispatcher_thread.start()
@staticmethod
def _get_spec_from_trial(trial):
return base.spec_from_misc(trial["misc"])
@staticmethod
def _write_result_back(trial, result):
trial["state"] = base.JOB_STATE_DONE
trial["result"] = result
trial["refresh_time"] = coarse_utcnow()
@staticmethod
def _write_exception_back(trial, e):
trial["state"] = base.JOB_STATE_ERROR
trial["misc"]["error"] = (str(type(e)), str(e))
trial["refresh_time"] = coarse_utcnow()
@staticmethod
def _write_cancellation_back(trial, e):
trial["state"] = base.JOB_STATE_CANCEL
trial["misc"]["error"] = (str(type(e)), str(e))
trial["refresh_time"] = coarse_utcnow()
def _run_trial_async(self, trial):
def finish_trial_run(result_or_e):
if not isinstance(result_or_e, BaseException):
self._finish_trial_run(
is_success=True,
is_cancelled=self.trials._fmin_cancelled,
trial=trial,
data=result_or_e,
)
logger.debug(
"trial {tid} task thread exits normally and writes results "
"back correctly.".format(tid=trial["tid"])
)
else:
self._finish_trial_run(
is_success=False,
is_cancelled=self.trials._fmin_cancelled,
trial=trial,
data=result_or_e,
)
logger.debug(
"trial {tid} task thread catches an exception and writes the "
"info back correctly.".format(tid=trial["tid"])
)
def run_task_thread():
local_eval_function, local_space = self.eval_function, self.space
params = self._get_spec_from_trial(trial)
def run_task_on_executor(_):
domain = base.Domain(
local_eval_function, local_space, pass_expr_memo_ctrl=None
)
try:
result = domain.evaluate(params, ctrl=None, attach_attachments=False)
yield result
except BaseException as e:
yield e
try:
worker_rdd = self.spark.sparkContext.parallelize([0], 1)
if self.trials._spark_supports_job_cancelling:
result_or_e = worker_rdd.mapPartitions(
run_task_on_executor
).collectWithJobGroup(
self._job_group_id,
self._job_desc,
self._job_interrupt_on_cancel,
)[0]
else:
result_or_e = worker_rdd.mapPartitions(run_task_on_executor).collect()[0]
except BaseException as e:
# I recommend to catch all exceptions here, it can make the program more robust.
# There're several possible reasons lead to raising exception here.
# so I use `except BaseException` here.
#
# If cancelled flag is set, it represent we need to cancel all running tasks,
# Otherwise it represent the task failed.
finish_trial_run(e)
else:
# The execptions captured in run_task_on_executor would be returned in the result_or_e
finish_trial_run(result_or_e)
task_thread = threading.Thread(target=run_task_thread)
task_thread.setDaemon(True)
task_thread.start()
self._task_threads.add(task_thread)
def _poll_new_tasks(self):
new_task_list = []
for trial in copy.copy(self.trials.trials):
if trial["state"] == base.JOB_STATE_NEW:
# check parallelism limit
if self.running_trial_count() >= self.trials.parallelism:
break
new_task_list.append(trial)
self._begin_trial_run(trial)
return new_task_list
def _cancel_running_trials(self):
if self.trials._spark_supports_job_cancelling:
logger.debug(
"Cancelling all running jobs in job group {g}".format(
g=self._job_group_id
)
)
self.spark.sparkContext.cancelJobGroup(self._job_group_id)
# Make a copy of trials by slicing
for trial in self.trials.trials[:]:
if trial["state"] in [base.JOB_STATE_NEW, base.JOB_STATE_RUNNING]:
trial["state"] = base.JOB_STATE_CANCEL
else:
logger.info(
"Because the current Apache PySpark version does not support "
"cancelling jobs by job group ID, SparkTrials will block until all of "
"its running Spark jobs finish."
)
def wait_for_all_threads(self):
"""
Wait for the dispatcher and worker threads to finish.
:param cancel_running_trials: If true, try to cancel all running trials.
"""
self._fmin_done = True
self._dispatcher_thread.join()
self._dispatcher_thread = None
for task_thread in self._task_threads:
task_thread.join()
self._task_threads.clear()
|
vul_recheck_v2.py
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# author:owefsad
# software: PyCharm
# project: lingzhi-webapi
import logging
import time
from dongtai.models.agent import IastAgent
from dongtai.models.project import IastProject
from dongtai.models.replay_queue import IastReplayQueue
from dongtai.models.vulnerablity import IastVulnerabilityModel
from dongtai.utils.validate import Validate
from iast.utils import extend_schema_with_envcheck, get_response_serializer
from rest_framework import serializers
from dongtai.endpoint import R
from dongtai.utils import const
from dongtai.endpoint import UserEndPoint
from django.utils.translation import gettext_lazy as _
from django.db.models import F
from django.db.models import Q
import threading
logger = logging.getLogger('dongtai-webapi')
_ResponseGetSerializer = get_response_serializer(
status_msg_keypair=(
((201, _('Handle success')), ''),
((202, _('Item ID should not be empty')), ''),
((202, _('Incorrect format parameter')), ''),
((202, _('Batch playback error')), ''),
((202, _('Current application has not been associated with probes and cannot be reproduced.')), ''),
((202, _('No permission to access')), ''),
))
_ResponsePostSerializer = get_response_serializer(
status_msg_keypair=(
((201, _('Handle success')), ''),
((202, _('IDS should not be empty')), ''),
((202, _('IDS must be: Vulnerability ID, Vulnerability ID Format')), ''),
((202, _('Vulnerability replay error')), ''),
))
class VulReCheckv2(UserEndPoint):
@staticmethod
def recheck(vul_queryset):
timestamp = int(time.time())
waiting_count = 0
success_count = 0
re_success_count = 0
opt_vul_queryset = vul_queryset.only('agent__id', 'id')
vul_ids = [i.id for i in opt_vul_queryset]
vul_id_agentmap = {i.id:i.agent_id for i in opt_vul_queryset}
history_replay_vul_ids = IastReplayQueue.objects.filter(
relation_id__in=vul_ids,
replay_type=const.VUL_REPLAY).order_by('relation_id').values_list(
'relation_id', flat=True).distinct()
waiting_count = IastReplayQueue.objects.filter(
Q(relation_id__in=vul_ids)
& Q(replay_type=const.VUL_REPLAY)
& Q(state__in=(const.PENDING, const.WAITING))).count()
re_success_count = IastReplayQueue.objects.filter(
Q(relation_id__in=[i.id for i in opt_vul_queryset])
& Q(replay_type=const.VUL_REPLAY)
& ~Q(state__in=(const.PENDING, const.WAITING))).update(
state=const.WAITING,
count=F('count') + 1,
update_time=timestamp)
vuls_not_exist = set(vul_ids) - set(history_replay_vul_ids)
success_count = len(vuls_not_exist)
IastReplayQueue.objects.bulk_create(
[
IastReplayQueue(agent_id=vul_id_agentmap[vul_id],
relation_id=vul_id,
state=const.WAITING,
count=1,
create_time=timestamp,
update_time=timestamp,
replay_type=const.VUL_REPLAY)
for vul_id in vuls_not_exist
],
ignore_conflicts=True)
vul_queryset.update(status_id=1, latest_time=timestamp)
return waiting_count, success_count, re_success_count
@staticmethod
def vul_check_for_queryset(vul_queryset):
active_agent_ids = IastAgent.objects.filter(
id__in=vul_queryset.values('agent_id'),
online=const.RUNNING,
is_core_running=const.CORE_IS_RUNNING).values(
"id").distinct().all()
no_agent = vul_queryset.filter(~Q(
agent_id__in=active_agent_ids)).count()
waiting_count, success_count, re_success_count = VulReCheckv2.recheck(
vul_queryset)
return no_agent, waiting_count, success_count, re_success_count
@extend_schema_with_envcheck(
[{
'name':
'type',
'type':
str,
'description':
_('''available options are ("all","project").
Corresponding to all or specific project respectively.''')
}, {
'name':
"projectId",
'type':
int,
'description':
_("""The corresponding id of the Project.
Only If the type is project, the projectId here will be used.""")
}],
tags=[_('Vulnerability')],
summary=_("Vulnerability verification"),
description=_("""Verify the user's corresponding vulnerabilities.
Need to specify the type"""),
response_schema=_ResponsePostSerializer
)
def post(self, request):
"""
:param request:
:return:
"""
try:
vul_ids = request.data.get('ids')
if vul_ids is None or vul_ids == '':
return R.failure(_("IDS should not be empty"))
vul_ids = vul_ids.split(',')
if Validate.is_number(vul_ids) is False:
return R.failure(_('IDS must be: Vulnerability ID, Vulnerability ID Format'))
auth_agents = self.get_auth_agents_with_user(user=request.user)
vul_queryset = IastVulnerabilityModel.objects.filter(
id__in=vul_ids, agent__in=auth_agents)
no_agent, waiting_count, success_count, re_success_count = self.vul_check_for_queryset(
vul_queryset)
return R.success(data={
"no_agent": no_agent,
"pending": waiting_count,
"recheck": re_success_count,
"checking": success_count
},
msg=_('Handle success'))
except Exception as e:
logger.error(f' msg:{e}')
return R.failure(msg=_('Vulnerability replay error'))
def vul_check_for_project(self, project_id, auth_users):
try:
project_exist = IastProject.objects.values("id").filter(
id=project_id, user__in=auth_users).exists()
if project_exist:
agent_queryset = IastAgent.objects.values("id").filter(
bind_project_id=project_id)
if agent_queryset:
agent_ids = agent_queryset.values_list('id',flat=True)
vul_queryset = IastVulnerabilityModel.objects.filter(
agent_id__in=agent_ids)
waiting_count, success_count, re_success_count = self.recheck(
vul_queryset)
return True, waiting_count, re_success_count, success_count, None
else:
return False, 0, 0, 0, _(
'Current application has not been associated with probes and cannot be reproduced.'
)
else:
return False, 0, 0, 0, _('No permission to access')
except Exception as e:
logger.error(f' msg:{e}',exc_info=True)
return False, 0, 0, 0, _('Batch playback error')
@extend_schema_with_envcheck(
[{
'name':
'type',
'type':
str,
'description':
_('''available options are ("all","project").
Corresponding to all or specific project respectively.''')
}, {
'name':
"projectId",
'type':
int,
'description':
_("""The corresponding id of the Project.
Only If the type is project, the projectId here will be used.""")
}],
tags=[_('Vulnerability')],
summary=_("Vulnerability verification"),
description=_("""Verify the user's corresponding vulnerabilities.
Need to specify the type"""),
response_schema=_ResponsePostSerializer
)
def get(self, request):
try:
check_type = request.query_params.get('type')
project_id = request.query_params.get('projectId')
if check_type == 'project' and not project_id:
return R.failure(msg=_("Item ID should not be empty"))
if check_type == 'all':
vul_queryset = IastVulnerabilityModel.objects.filter(
agent__in=self.get_auth_agents_with_user(request.user))
def vul_check_thread():
self.vul_check_for_queryset(vul_queryset)
t1 = threading.Thread(target=vul_check_thread, daemon=True)
t1.start()
return R.success(msg=_('Verification in progress'))
elif check_type == 'project':
auth_users = self.get_auth_users(request.user)
def vul_check_thread():
self.vul_check_for_project(project_id,
auth_users=auth_users)
t1 = threading.Thread(target=vul_check_thread, daemon=True)
t1.start()
return R.success(msg=_("Verification in progress"))
return R.failure(msg=_("Incorrect format parameter"))
except Exception as e:
logger.error(f'user_id:{request.user.id} msg:{e}', exc_info=True)
return R.failure(msg=_('Batch playback error'))
|
test_datamol_import_time.py
|
import multiprocessing
import platform
DATAMOL_MAX_IMPORT_DURATION = {} # in seconds
DATAMOL_MAX_IMPORT_DURATION["default"] = 2
DATAMOL_MAX_IMPORT_DURATION["linux"] = 2
DATAMOL_MAX_IMPORT_DURATION["osx"] = 5
DATAMOL_MAX_IMPORT_DURATION["windows"] = 4
def _get_max_import_duration():
if platform.system() == "Windows":
return DATAMOL_MAX_IMPORT_DURATION["windows"]
elif platform.system() == "Linux":
return DATAMOL_MAX_IMPORT_DURATION["linux"]
elif platform.system() == "Darwin":
return DATAMOL_MAX_IMPORT_DURATION["osx"]
else:
return DATAMOL_MAX_IMPORT_DURATION["default"]
def _import_datamol_fn(queue):
import datetime
start = datetime.datetime.now()
import datamol
duration = datetime.datetime.now() - start
duration = duration.total_seconds()
queue.put(duration)
def test_datamol_import():
context = multiprocessing.get_context(method="spawn")
queue = context.Queue()
p = context.Process(target=_import_datamol_fn, args=(queue,))
p.start()
duration = queue.get()
p.join()
p.close()
print(duration)
# Check datamol wasn't too fast to load
# That should never happen and it's a simple sanity check
# whether the `spawn` process worked correctly.
assert duration > 1e-2, "datamol loaded too fast, something is wrong with the test."
# Check datamol wasn't too long to load
assert duration < _get_max_import_duration(), "datamol tooks too long to load."
|
test_master_slave.py
|
# coding: utf-8
"""Test the master talking to the slave"""
import logging
import os
import threading
import signal
import subprocess
import sys
import time
import pytest
from drmaa_futures.master import ZeroMQListener
logger = logging.getLogger(__name__)
def wait_until(condition, interval=0.1, timeout=1, *args):
"""Simple convenience function to wait for a condition."""
start = time.time()
while not condition(*args) and time.time() - start < timeout:
time.sleep(interval)
@pytest.fixture
def master():
zmq = ZeroMQListener()
zmq._socket.LINGER = 300
run = True
def _do_thread():
try:
while run:
zmq.process_messages()
except Exception as e:
logger.error("Got exception in worker thread: %s", e)
logger.error(traceback.format_exc())
thread = threading.Thread(target=_do_thread)
# Allow loose test threads?
# thread.daemon = True
thread.start()
yield zmq
run = False
logger.debug("Ended test loop")
thread.join()
zmq.shutdown()
@pytest.fixture
def slave(master):
# def slave(url=None, id="0", timeout=None):
# We need to update the environment to include this file, so that we can unpickle it's functions
new_env = dict(os.environ)
new_env["PYTHONPATH"] = ":".join(
new_env.get("PYTHONPATH", "").split(":") + [os.path.dirname(__file__)])
url = [master.endpoint]
timeout = ["--timeout=3"]
proc = subprocess.Popen(
[sys.executable, "-m", "drmaa_futures", "-v", "slave"] + timeout + url,
env=new_env)
try:
time.sleep(0.2)
yield proc
finally:
try:
# Kill in a gentle way
os.kill(proc.pid, signal.SIGINT)
proc.wait()
except OSError:
# On python2 trying to kill something that has just died seems to error
pass
def test_worker_registered(master, slave):
# Since we could have lag here, explicitly wait for a bit to give time to connect
wait_until(lambda: master.active_workers, timeout=5)
assert master.active_workers
def test_task_enqueue(master, slave):
task = master.enqueue_task(lambda: 42)
assert task.result(timeout=2) == 42
task = master.enqueue_task(lambda: 42)
assert task.cancel()
task2 = master.enqueue_task(lambda: 1337)
assert task2.result(timeout=2) == 1337
|
map_editor.py
|
import traceback
import sys
import os
from functools import partial
os.environ['PYGAME_HIDE_SUPPORT_PROMPT'] = '1' #hiding pygame greeting console output
import pygame
import json
from engine import *
import tkinter
import tkinter.filedialog
import tkinter.messagebox as mb
from tkinter import scrolledtext as stxt
from tkinter import ttk
import random
import time
import shutil
import zipfile
import threading
import copy
def show_exception_and_exit(exc_type, exc_value, tb):
text = traceback.format_exc()
show_error(text = text)
sys.excepthook = show_exception_and_exit
sys.unraisablehook = show_exception_and_exit #configuring python, so it wouldn`t just crash when a error occures
args = sys.argv
def show_error(title = 'Ooops!', text = 'ERROR'):
top = tkinter.Tk()
top.title(title)
top.withdraw()
mb.showerror(title, text) #function to show python tracebacks
top.destroy()
try:
a = open('map_editor/config.json', 'r')
setts = json.loads(a.read())
a.close()
a = open(setts['blk_file_path'], 'r')
blk_data = json.loads(a.read())
a.close()
a = open(setts['ent_file_path'], 'r', encoding = "UTF-8")
ent_data = json.loads(a.read())
a.close()
a = open(setts['help_file_path'], 'r', encoding = "UTF-8")
TXT_HELP = a.read()
a.close()
a = open(setts['lang_file'], 'r', encoding = "UTF-8")
lang = json.loads(a.read())
a.close()
except:
text = traceback.format_exc()
show_error(text = text)
IDMAP = {} #id map contains block data according to is ID. this was made to optimise world data
ENTMAP = {} #same map but for entities
TEXTURES = {} #textures data
ENT_TEXTURES = {}
ENTITY = [] #entity list
WIRES = [] #wires list
for key, value in blk_data.items():
IDMAP[int(value['id'])] = value
for key, value in ent_data.items():
ENTMAP[int(value['id'])] = value
WIDTH = setts['WIDTH']
HEIGHT = setts['HEIGHT'] #screen size in pixels
W_w = setts['world_size'] #map size
W_h = W_w
ENT_COUNTER = 0
T_SIZE = setts['texture_size'] #variable than defines texture size. T_SIZE can change during program work, but T_SIZE_ORIG can`t
T_SIZE_ORIG = setts['texture_size']
X0, Y0 = 0,0
CAM_X = 0
CAM_Y = 0 #camera position
def merge_dicts(dictOne, dictTwo):
dictThree = dictOne.copy()
dictThree.update(dictTwo)
return dictThree
def open_file(title = 'Открытие'):
top = tkinter.Tk()
top.title(title) #function for beautiful GUI file opening
top.withdraw()
file_name = tkinter.filedialog.askopenfilename(parent=top)
top.destroy()
return file_name
def save_file(title = 'Открытие'):
top = tkinter.Tk()
top.title(title)
top.withdraw() #function for beautiful GUI file saving
file_name = tkinter.filedialog.asksaveasfilename(parent=top)
top.destroy()
return file_name
def compile_map(name): #the heart of CMF system, the compiler
print(lang['compiler']['start'].format(time.ctime()[11:19]))
try:
os.mkdir('maps_raw/' + name)
except:
shutil.rmtree('maps_raw/' + name) #check if folder with same name exists, then delete it
os.mkdir('maps_raw/' + name)
prefix = 'maps_raw/' + name + '/' #defining prefix
FILES = []
print(lang['compiler']['dir_copy'].format(time.ctime()[11:19]))
for key, value in TEXTURES.items():
path = key
try:
if path != False:
os.makedirs(prefix + '/'.join(path.split('/')[0:-1]))
print('\t' + prefix + '/'.join(path.split('/')[0:-1])) #copying all directories hierarhy
except:
pass
for x in range(W_w):
for y in range(W_h):
j = ent.get(x, y)
if type(j) != int:
if j['name'] != 'ent_noname':
for key, value in j.items():
try:
if os.path.isfile(str(value)):
os.makedirs(prefix + '/'.join(value.split('/')[0:-1]))
print('\t' + prefix + '/'.join(value.split('/')[0:-1]))
except:
pass
for key, value in j['attributes'].items():
try:
if os.path.isfile(str(value)):
os.makedirs(prefix + '/'.join(value.split('/')[0:-1])) #copying all directories hierarhy
print('\t' + prefix + '/'.join(value.split('/')[0:-1]))
except:
pass
for key, value in ent_data.items():
try:
os.makedirs(prefix + '/'.join(value['image'].split('/')[0:-1])) #copying all directories hierarhy
except:
pass
print(lang['compiler']['file_copy'].format(time.ctime()[11:19]))
for key, value in TEXTURES.items():
path = key
try:
shutil.copy(str(path), prefix + path)
print('\t' + path + ' > ' + prefix + path) #the code below cheking all data for files and then copying it
FILES.append(path)
except:
print(lang['compiler']['failed_copy'] + prefix + path)
for x in range(W_w):
for y in range(W_h):
j = ent.get(x, y)
if type(j) != int:
if j['name'] != 'ent_noname':
for key, value in j.items():
try:
shutil.copy(str(value), prefix + str(value))
print('\t' + str(value) + ' > ' + prefix + str(value))
FILES.append(prefix + str(value))
except:
pass
for key, value in j['attributes'].items():
try:
shutil.copy(str(value), prefix + str(value))
print('\t' + str(value) + ' > ' + prefix + str(value))
FILES.append(prefix + str(value))
except:
pass
for key, value in ent_data.items():
if os.path.isfile(value['image']):
FILES.append(value['image'])
shutil.copy(str(value['image']), prefix + str(value['image']))
print(lang['compiler']['blocks_copy'].format(time.ctime()[11:19]))
shutil.copy(setts['blk_file_path'], prefix + 'blocks.json') #writing main map data to file
FILES.append(prefix + 'blocks.json')
print(lang['compiler']['map_patch'].format(time.ctime()[11:19]))
a = open(prefix + 'world.json', 'w')
FILES.append(prefix + 'world.json')
PLPS = []
x, y = 0, 0
for x in range(W_w):
for y in range(W_h):
if type(ent.get(x, y)) != int:
if ent.get(x, y)['func'] != False:
if ent.get(x, y)['func'] == 'spawnpoint': #defining player`s spawn positions
PLPS.append([x, y])
ENTITY = []
for x in range(W_w):
for y in range(W_h):
unit = ent.get(x,y)
if type(unit) != int:
if unit['name'] != 'ent_noname': #defining all map entities
ENTITY.append(unit)
idmap_ = {}
for key, value in IDMAP.items():
idmap_[int(key)] = value
data = {
'player_pos':'none',
'world':world.arr,
'idmap':idmap_,
'world_size':W_w, #patching all map data to this dictionary
'entity':ENTITY,
'player_pos':PLPS,
'ent_data':ent_data,
'wire':WIRES,
'cam':[CAM_X, CAM_Y]
}
json.dump(data, a, separators=(',', ':')) #and dumping it to JSON
a.close()
arh = zipfile.ZipFile('maps/' + name + '.cmf', 'w')
for root, dirs, files in os.walk('maps_raw/'+name): #copying all defined files
for file in files:
print(lang['compiler']['added'] + ''.join(os.path.join(root, file).split(name)[1][1:]))
arh.write(prefix + ''.join(os.path.join(root, file).split(name)[1][1:]), arcname=''.join(os.path.join(root, file).split(name)[1][1:]))
arh.close()
print(lang['compiler']['done'].format(time.ctime()[11:19]))
def load_map(out = False): #function to load CMF files
global world, IDMAP, TEXTURES, blk_data, W_w, W_h, WIRES, CAM_X, CAM_Y, ent
ent.fill(0) #clearing all entities
path = open_file() #get the path of CMF
if path != '':
name = path.split('/')[-1].split('.')[0] #defining the map name
arh = zipfile.ZipFile(path, 'r') #opening zip (yes, CMF is actually a zip)
try:
os.mkdir('maps_raw/' + name)
except:
shutil.rmtree('maps_raw/' + name) #if a folder with this name already exist, delete it
os.mkdir('maps_raw/' + name)
prefix = 'maps_raw/' + name + '/' #defining prefix for all file paths in map
for file_info in arh.infolist():
if out:
print(lang['loader']['exctracted']+file_info.filename) #exctracting map files in temporary folder
try:
arh.extract(file_info.filename, 'maps_raw/'+name)
except:
pass
a = open(prefix + 'world.json', 'r') #reading main map data
data = json.loads(a.read())
a.close()
try:
W_w, W_h = data['world_size'], data['world_size'] #defining world size
except:
print(lang['loader']['world_size_failed'])
W_w, W_h = 128, 128
world = arr2.arr2(W_w, W_h, 0) #recreating the world
world.arr = data['world']
ent = arr2.arr2(W_w, W_h, ent_data['ent_noname'])
for j in data['entity']:
ent.put(j['pos_x'], j['pos_y'], j) #loading entities
a = open(prefix + 'blocks.json', 'r') #reding map blocks info
blk_data = json.loads(a.read())
a.close()
for key, value in blk_data.items():
if value['image'] != False:
TEXTURES[prefix + value['image']] = image(value['image']) #defining blocks textures
for key, value in blk_data.items():
if value['image'] != False:
unit = value
unit['image'] = prefix + value['image'] #defining blocks id map
IDMAP[int(value['id'])] = unit
else:
IDMAP[int(value['id'])] = value
try:
WIRES = data['wire']
except:
print(lang['loader']['wire_failed']) #loading wire data
WIRES = []
try:
CAM_X, CAM_Y = data['cam'] #loading camera position info
except:
print(lang['loader']['camera_failed'])
pygame.display.set_caption(setts['TITLE'] + ' - ' + name + '.cmf')
def load_map_by_file(path): #outdated function that loads map by given path
global world, IDMAP, TEXTURES, blk_data, W_w, W_h, ent
if os.path.isfile(path):
name = 'maps_raw/' + os.getcwd().join(path.split(os.getcwd())[1:])[1:].split('.')[0]
arh = zipfile.ZipFile(path, 'r')
try:
os.mkdir(name)
except:
shutil.rmtree(name)
os.mkdir(name)
prefix = 'maps_raw/' + os.getcwd().join(path.split(os.getcwd())[1:])[1:].split('.')[0] + '/'
for file_info in arh.infolist():
#print(file_info.filename)
try:
arh.extract(file_info.filename, name)#+file_info.filename.split('/')[-1])
except:
pass
a = open(prefix + 'world.json', 'r')
data = json.loads(a.read())
a.close()
W_w, W_h = data['world_size'], data['world_size']
world.arr = data['world']
ent = arr2.arr2(W_w, W_h, ent_data['ent_noname'])
for j in data['entity']:
ent.put(j['pos_x'], j['pos_y'], j)
a = open(prefix + 'blocks.json', 'r')
blk_data = json.loads(a.read())
a.close
for key, value in blk_data.items():
if value['image'] != False:
TEXTURES[prefix + value['image']] = image(value['image'])
for key, value in blk_data.items():
if value['image'] != False:
unit = value
unit['image'] = prefix + value['image']
IDMAP[int(value['id'])] = unit
else:
IDMAP[int(value['id'])] = value
def cam(x, y):
global CAM_X, CAM_Y
CAM_X, CAM_Y = x, y
def fg(ID):
global fg_block #some "commands" for old console
fg_block = ID
def bg(ID):
global bg_block
bg_block = ID
def console(): #old console thread function
while c_loop:
com = str(input('Python >>> '))
try:
exec(com, globals())
except Exception as ex:
text = traceback.format_exc()
show_error(text = text)
def block(x, y, ID):
try:
world.put(x, y, ID)
except:
pass
def image(path): #returns pygame surface image
try:
return pygame.image.load(path)
except:
print(lang['misc']['image_failed'].format(path))
def invert(var):
if var:
return False
elif not var:
return True
def get_block(ID):
try:
return IDMAP[ID]
except:
print(lang['misc']['block_failed'].format(ID))
def get_image(ID):
try:
return TEXTURES[get_block(ID)['image']]
except:
return EMO
def exist(key, dic):
try:
dic[key]
return True
except:
return False
def get_ent_image(ID):
try:
return ENT_TEXTURES[get_entity(ID)['image']]
except:
return EMO
def get_entity(ID):
try:
return ENTMAP[ID]
except:
pass
def draw(): #main render function
x, y = 0, 0
for x in range(SCR_X):
for y in range(SCR_Y):
if CAM_X + x >= 0 and CAM_X + x < W_w and CAM_Y + y >= 0 and CAM_Y + y < W_h:
if world.get(CAM_X + x, CAM_Y + y) != setts['nodraw_id']:
map_layer.blit(pygame.transform.scale(get_image(world.get(CAM_X + x, CAM_Y + y)), (T_SIZE, T_SIZE)), (X0 + x*T_SIZE, Y0 + y*T_SIZE)) #render block texture
y += 1
x += 1
def draw_entity(): #entity render
x, y = 0, 0
for x in range(SCR_X):
for y in range(SCR_Y):
try:
unit = ent.get(CAM_X + x, CAM_Y + y)
if unit['name'] != 'ent_noname':
info_layer.blit(Consolas.render(unit['attributes']['name'], False, (255,255,255)), (X0 + x*T_SIZE, Y0 + y*T_SIZE - 20))
if unit['func'] == 'texture_resizable':
map_layer.blit(pygame.transform.scale(image(unit['attributes']['image']), (T_SIZE*unit['attributes']['size_x'], T_SIZE*unit['attributes']['size_y'])),(unit['attributes']['pad_x'] + X0 + x*T_SIZE, unit['attributes']['pad_y'] + Y0 + y*T_SIZE))
pygame.draw.rect(info_layer, (255,255,255), (X0 + x*T_SIZE, Y0 + y*T_SIZE, T_SIZE, T_SIZE), 2)
elif unit['func'] == 'npc':
map_layer.blit(pygame.transform.scale(image(unit['attributes']['skin']), (T_SIZE, T_SIZE)), (X0 + x*T_SIZE, Y0 + y*T_SIZE))
elif unit['func'] == 'snd_ambient':
pygame.draw.circle(info_layer, (0,38,255), (X0 + x*T_SIZE + T_SIZE//2, Y0 + y*T_SIZE + T_SIZE//2), unit['attributes']['radius']*T_SIZE, 2)
map_layer.blit(pygame.transform.scale(get_ent_image(ent.get(CAM_X + x, CAM_Y + y)['id']), (T_SIZE, T_SIZE)), (X0 + x*T_SIZE, Y0 + y*T_SIZE))
info_layer.blit(Consolas.render(unit['attributes']['sound_file'].split('/')[-1], False, (0, 38, 255)), (X0 + x*T_SIZE, Y0 + y*T_SIZE - 40))
elif unit['func'] == 'snd_point':
pygame.draw.circle(info_layer, (0,38,255), (X0 + x*T_SIZE + T_SIZE//2, Y0 + y*T_SIZE + T_SIZE//2), unit['attributes']['radius']*T_SIZE, 2)
map_layer.blit(pygame.transform.scale(get_ent_image(ent.get(CAM_X + x, CAM_Y + y)['id']), (T_SIZE, T_SIZE)), (X0 + x*T_SIZE, Y0 + y*T_SIZE))
info_layer.blit(Consolas.render(unit['attributes']['sound_file'].split('/')[-1], False, (0, 38, 255)), (X0 + x*T_SIZE, Y0 + y*T_SIZE - 40))
else:
map_layer.blit(pygame.transform.scale(get_ent_image(ent.get(CAM_X + x, CAM_Y + y)['id']), (T_SIZE, T_SIZE)), (X0 + x*T_SIZE, Y0 + y*T_SIZE))
except:
continue
y += 1
x += 1
def draw_blk_choose(): #function to draw blocks preview
x, y = 0, 0
for x in range(WIDTH//(T_SIZE_ORIG+5)):
for y in range(HEIGHT//(T_SIZE_ORIG+5)):
#print(x, y)
if blk_arr.get(x, y) != False:
pygame.draw.rect(screen, (50,50,50), (x*(T_SIZE_ORIG+5), 45 + y*(T_SIZE_ORIG+5), 30,30))
screen.blit(get_image(blk_arr.get(x, y)), (x*(T_SIZE_ORIG+5)+5, 45 + y*(T_SIZE_ORIG+5)+5))
if [ax, ay] == [x, y]:
pygame.draw.rect(screen, (200,200,200), (x*(T_SIZE_ORIG+5)+5, 50 + y*(T_SIZE_ORIG+5), 20,20), 2)
m_uni = get_block(m_unit)
pygame.draw.rect(screen, (70,70,70), (mx+8, my+8, len(m_uni['name'] + '({})'.format(m_uni['id']))*setts['font_size']//1.8+2, setts['font_size']+2))
name_text = Consolas.render(m_uni['name'] + '({})'.format(m_uni['id']), False, (255,255,255))
screen.blit(name_text, (mx+10, my+10))
y += 1
x += 1
def draw_ent_choose(): #function to draw entity preview
x, y = 0, 0
for x in range(WIDTH//(T_SIZE_ORIG+5)):
for y in range(HEIGHT//(T_SIZE_ORIG+5)):
if ent_arr.get(x, y)['name'] != 'ent_noname':
pygame.draw.rect(screen, (50,50,50), (x*(T_SIZE_ORIG+5), 45 + y*(T_SIZE_ORIG+5), 30,30))
screen.blit(get_ent_image(ent_arr.get(x, y)['id']), (x*(T_SIZE_ORIG+5)+5, 45 + y*(T_SIZE_ORIG+5)+5))
if [ax, ay] == [x, y]:
pygame.draw.rect(screen, (200,200,200), (x*(T_SIZE_ORIG+5)+5, 50 + y*(T_SIZE_ORIG+5), 20,20), 2)
m_uni = m_unit
pygame.draw.rect(screen, (70,70,70), (mx+8, my+8, len(m_uni['name'] + '({})'.format(m_uni['id']))*setts['font_size']//1.8+2, setts['font_size']+2))
if exist("desc", m_uni):
pygame.draw.rect(screen, (70,70,70), (mx+8, my+24, (len(m_uni['desc']) + 1.5)*setts['font_size']//1.8+2, setts['font_size']+2))
desc_text = Consolas.render(m_uni['desc'], False, (255,255,255))
screen.blit(desc_text, (mx+10, my+10+setts['font_size']))
name_text = Consolas.render(m_uni['name'] + '({})'.format(m_uni['id']), False, (255,255,255))
screen.blit(name_text, (mx+10, my+10))
y += 1
x += 1
def help_window():
root = tkinter.Tk()
root.title(lang['windows']['help'])
root.geometry('600x500')
txt = stxt.ScrolledText(root, width = 70, height = 30, wrap = 'word')
txt.place(x = 10, y = 10)
txt.insert(1.0, TXT_HELP)
txt['state'] = 'disable'
root.mainloop()
#help_window()
def property_editor(unit): #entity property editor
global ax1, ay1, ent
def appl():
for var in ENTRYS:
dic = unit
DATA[var] = globals()[var].get()
#print(DATA[var])
for key, value in DATA.items():
try:
DATA[key] = int(value)
except:
DATA[key] = str(value)
for key, value in DATA.items():
DATA1['_'.join(key.split('_')[2:])] = value
unit2 = copy.deepcopy(unit)
unit2['attributes'] = copy.deepcopy(DATA1)
for a in ENTITY:
if a['pos_x'] == unit['pos_x'] and a['pos_y'] == unit['pos_y'] and a['name'] == unit['name']:
ENTITY.remove(a)
ENTITY.append(unit2)
ent.put(ax1, ay1, unit2)
#print(ent.get(ax1,ay1)['attributes'])
root.destroy()
def default():
attrs = copy.deepcopy(ent_data[unit['name']]['attributes'])
for key, value in attrs.items():
exec('var_entry_{}.delete(0, tkinter.END)'.format(key))
exec('var_entry_{}.insert(0, value)'.format(key))
#print(unit)
dic = {}
attrs = unit['attributes']
#print(attrs)
LEN = 0
TXTS = [0]
DATA = {}
DATA1 = {}
for key, value in attrs.items():
LEN += 1
TXTS.append(len(key))
MAX = max(TXTS)
axx1, ayy1 = ax1, ay1
root = tkinter.Tk()
root.geometry('{}x{}'.format(10 + MAX*11 + 10 + 200 + 30,LEN*40 + 60))
root.title(unit['name'] + ' ({},{})'.format(axx1, ayy1))
root.resizable(width=False, height=False)
if LEN == 0:
root.geometry('300x200')
a = tkinter.Label(text = lang['misc']['ent_noattrs'])
a.place(x = 50, y = 50)
else:
ENTRYS = []
y = 20
for key, value in attrs.items():
wired = False
wired_to = {'pos_x':0, 'pos_y':0, "attr":'none'}
for wire in WIRES:
if wire[0]['pos_x'] == unit['pos_x'] and wire[0]['pos_y'] == unit['pos_y'] and wire[0]['attr'] == key:
wired = True
wired_to = wire
if not wired:
globals()['var_entry_' + key] = tkinter.Entry(width = 40)
exec('var_entry_{}.place(x={},y={})'.format(key, MAX*7 + 20, y))
exec('var_entry_{}.insert(0, value)'.format(key))
else:
globals()['var_wirelabel_{}'.format(key)] = tkinter.Label(root, fg = 'red', text='Wired to {} ({}, {})({})'.format(ent.get(wired_to[1]['pos_x'], wired_to[1]['pos_y'])['attributes']['name'], wired_to[1]['pos_x'], wired_to[1]['pos_y'], wired_to[1]['attr']))
exec('var_wirelabel_{}.place(x = {}, y = {})'.format(key, MAX*7+20, y))
globals()['var_wirelabel_{}_1'.format(key)] = tkinter.Label(root, fg = 'red', text = '({})'.format(ent.get(wired_to[1]['pos_x'], wired_to[1]['pos_y'])['attributes'][wired_to[1]['attr']]))
exec('var_wirelabel_{}_1.place(x = {}, y = {})'.format(key, MAX*7+20, y+15))
globals()['var_label_' + key] = tkinter.Label(text = key + ':')
exec('var_label_{}.place(x={},y={})'.format(key, 10, y))
ENTRYS.append('var_entry_{}'.format(key))
y += 40
if unit['attributes']['name'] == '0':
exec('var_entry_name.delete(0, tkinter.END)')
exec('var_entry_name.insert(0, "{}")'.format('entity_{}'.format(ENT_COUNTER)))
apply = tkinter.Button(root, text = 'Apply', command = appl)
apply.place(x = 10, y = y)
cancel = tkinter.Button(root, text = 'Default', command = default)
cancel.place(x = 60, y = y)
root.mainloop()
def view_vars():
print(pretty_out.box(pretty_out.listing(VARS)))
pygame.init()
pygame.mixer.init()
if setts['fullscreen']:
screen = pygame.display.set_mode((0,0),pygame.RESIZABLE)
else:
screen = pygame.display.set_mode((WIDTH, HEIGHT))
map_layer = pygame.Surface((WIDTH, HEIGHT))
pygame.display.set_caption(setts['TITLE'])
pygame.display.set_icon(image('map_editor/icon.png'))
clock = pygame.time.Clock()
Consolas = pygame.font.Font('map_editor/consolas.ttf', setts['font_size'])
info_layer = pygame.Surface((WIDTH, HEIGHT))
info_layer.set_colorkey(setts['colorkey'])
for key, value in blk_data.items():
if value['image'] != False:
TEXTURES[value['image']] = image(value['image']) #patching textures info
for key, value in ent_data.items():
if value['image'] != False:
ENT_TEXTURES[value['image']] = image(value['image'])
SCR_X = WIDTH//T_SIZE #screen size in blocks
SCR_Y = HEIGHT//T_SIZE
world = worldgen.CaveChunk(W_w, W_h, W_w*W_h*3, 3, 1, 2, 3) #generating default world
blk_arr = arr2.arr2(WIDTH//(T_SIZE_ORIG+5), HEIGHT//(T_SIZE_ORIG+5), False)
ent_arr = arr2.arr2(WIDTH//(T_SIZE_ORIG+5), HEIGHT//(T_SIZE_ORIG+5), ent_data['ent_noname'])
ent = arr2.arr2(W_w, W_h, ent_data['ent_noname'])
EMO = image('map_editor/missing.png') #loading emo-texture for replacing undefined images
x, y = 0, 0
for key, value in blk_data.items():
x += 1
if x == WIDTH // (T_SIZE+10):
x = 0
y += 1
if y == HEIGHT // (T_SIZE+10):
y = 0
break
blk_arr.put(x, y, value['id']) #loading entities and blocks to 2d arrays for preview menus
x, y = 0, 0
for key, value in ent_data.items():
x += 1
if x == WIDTH // (T_SIZE+10):
x = 0
y += 1
if y == HEIGHT // (T_SIZE+10):
y = 0
break
ent_arr.put(x, y, value)
running = True
c_loop = True
fg_drawing = False
bg_drawing = False
blk_choose = False #some state variables
ent_choose = False
tool_choose = False
full_view = False
fg_block = 3
bg_block = 2
brackets = image('map_editor/block_br_black.png')
brackets_wrong = image('map_editor/wrong.png')
ax, ay = CAM_X, CAM_Y
ax1, ay1 = ax, ay
mx, my = 0, 0
MOVE = ''
move_right = False
move_left = False #variables to describe camera movement
move_up = False
move_down = False
WMOD = False
LINE_POS1 = [0,0]
LINE_POS2 = [0,0]
line_c = 0
RECT_POS1 = [0,0]
RECT_POS2 = [0,0]
rect_c = 0
C_CENTER = [0,0] #variables to describe primitives drawing (circle, rect and line)
C_RADIUS = 0
C_WIDTH = 1
c_c = 0
C_POS_ORIG = []
T_SIZE_1 = 20
SCR_SCALE_1 = []
move_counter = 0
move_speed = setts['move_speed']
m_unit = 0
fg_ent = False
VARS = {}
ON_WIRE = False
TEST_ARR = [[4,4,4,4],
[4,2,2,4],
[4,2,2,4],
[4,4,4,4]]
full_counter = 0
map_rect = pygame.rect.Rect((0,0, SCR_X*T_SIZE, SCR_Y*T_SIZE))
map_r = pygame.rect.Rect((0,0,WIDTH,HEIGHT-T_SIZE_ORIG*2))
#threading.Thread(target=console).start()
if len(args) > 1:
if os.path.isfile(args[1]):
load_map_by_file(args[1])
#T_SIZE = WIDTH//W_w
#full_view = True
def comp(): #compilation window
def f():
global NAM
NAM = a.get()
root.destroy()
compile_map(NAM)
NAM = 'noname'
root = tkinter.Tk()
root.geometry("300x60")
root.title(lang['windows']['compiler'])
root.resizable(width=False, height=False)
a = tkinter.Entry(root)
a.place(x = 20, y = 21)
b = tkinter.Label(root, text = lang['gui']['map_name'])
b.place(x = 20, y = 0)
c = tkinter.Button(root, command = f, text = lang['gui']['compile'])
c.place(x = 150, y = 20)
root.mainloop()
def set_wire_mode():
global WMOD
if WMOD:
WMOD = False
elif not WMOD:
WMOD = True
def wire_editor(unit): #wiring menu
global ON_WIRE, ax1, ay1
def on_wire(unit, key):
global ON_WIRE
#print(key) #wiring first entity(input)
ON_WIRE = copy.deepcopy({'pos_x':ax1, 'pos_y':ay1, 'attr':key})
root.destroy()
def on_wire2(unit, key):
global ON_WIRE
#print(key) #wiring second entity(output)
WIRES.append([ON_WIRE, {'pos_x':ax1, 'pos_y':ay1, 'attr':key}])
ON_WIRE = False
root.destroy()
def unwire(wired_to):
WIRES.remove(wired_to)
root.destroy()
if ON_WIRE == False:
dic = {}
attrs = unit['attributes']
#print(attrs)
LEN = 0
TXTS = [0]
DATA = {}
DATA1 = {}
for key, value in attrs.items():
LEN += 1
TXTS.append(len(key))
#print(TXTS)
MAX = max(TXTS)
axx1, ayy1 = ax1, ay1
root = tkinter.Tk()
root.geometry('{}x{}'.format(10 + MAX*11 + 10 + 200 + 30,LEN*40+30))
root.title(unit['name'] + ' ({},{})'.format(axx1, ayy1))
root.resizable(width=False, height=False)
if LEN == 0: #if entitys attributes list is empty, show this message
root.geometry('300x200')
a = tkinter.Label(text = lang['misc']['ent_noattrs'])
a.place(x = 50, y = 50)
else:
BUTTONS = []
y = 20
for key, value in attrs.items():
wired = False
wired_to = {'pos_x':0, 'pos_y':0, "attr":'none'}
for wire in WIRES:
if wire[0]['pos_x'] == unit['pos_x'] and wire[0]['pos_y'] == unit['pos_y'] and wire[0]['attr'] == key:
wired = True
wired_to = wire
if not wired:
globals()['var_button_' + key] = tkinter.Button(width = 30, text = 'Wire (input)',command = partial(on_wire, unit, key))
exec('var_button_{}.place(x={},y={})'.format(key, MAX*7 + 20, y))
else:
globals()['var_button_' + key + '_1'] = tkinter.Button(width = 30,fg = 'red', text = 'Unwire',command = partial(unwire, wired_to))
exec('var_button_{}_1.place(x={},y={})'.format(key, MAX*7 + 20, y))
globals()['var_label_' + key] = tkinter.Label(text = key)
exec('var_label_{}.place(x={},y={})'.format(key, 10, y))
BUTTONS.append('var_button_{}'.format(key))
y += 40
root.mainloop()
else:
dic = {}
attrs = unit['attributes']
#print(attrs)
LEN = 0
TXTS = [0]
DATA = {}
DATA1 = {}
for key, value in attrs.items():
LEN += 1
TXTS.append(len(key))
#print(TXTS)
MAX = max(TXTS)
axx1, ayy1 = ax1, ay1
root = tkinter.Tk()
root.geometry('{}x{}'.format(10 + MAX*11 + 10 + 200 + 30,LEN*40+30))
root.title(unit['name'] + ' ({},{})'.format(axx1, ayy1))
root.resizable(width=False, height=False)
if LEN == 0:
root.geometry('300x200')
a = tkinter.Label(text = lang['gui']['ent_noattrs'])
a.place(x = 50, y = 50)
else:
BUTTONS = []
y = 20
for key, value in attrs.items():
wired = False
wired_to = {'pos_x':0, 'pos_y':0, "attr":'none'}
for wire in WIRES:
if wire[0]['pos_x'] == unit['pos_x'] and wire[0]['pos_y'] == unit['pos_y'] and wire[0]['attr'] == key:
wired = True
wired_to = wire
if not wired:
globals()['var_button_' + key] = tkinter.Button(width = 30, text = 'Wire (output)',command = partial(on_wire2, unit, key))
exec('var_button_{}.place(x={},y={})'.format(key, MAX*7 + 20, y))
else:
globals()['var_button_' + key + '_1'] = tkinter.Button(width = 30, fg = 'red',text = 'Unwire',command = partial(unwire, wired_to))
exec('var_button_{}_1.place(x={},y={})'.format(key, MAX*7 + 20, y))
globals()['var_label_' + key] = tkinter.Label(text = key)
exec('var_label_{}.place(x={},y={})'.format(key, 10, y))
BUTTONS.append('var_button_{}'.format(key))
y += 40
root.mainloop()
def map_menu(): #generator properties window
global world
def f():
global world, W_w, W_h, ent
blk = list(map(int, ent_grass.get().split(',')))
world = worldgen.CaveChunk(int(ent_size.get()), int(ent_size.get()), int(ent_moves.get()), blk[0], blk[1], blk[2], int(ent_smt.get()))
W_w = int(ent_size.get())
W_h = int(ent_size.get())
ent = arr2.arr2(W_w, W_h, ent_data['ent_noname'])
root.destroy()
def g():
global world, W_w, W_h, ent
blk = list(map(int, ent_grass2.get().split(',')))
world = worldgen.TunnelChunk(int(ent_size2.get()), int(ent_size2.get()), int(ent_moves2.get()), int(ent_tun.get()), blk[0], blk[1], blk[2], int(ent_smt2.get()))
W_w = int(ent_size2.get())
W_h = int(ent_size2.get())
ent = arr2.arr2(W_w, W_h, ent_data['ent_noname'])
root.destroy()
def h():
global world, W_w, W_h, ent
world = arr2.arr2(int(ent_size3.get()), int(ent_size3.get()), int(ent_grass3.get()))
W_w = int(ent_size3.get())
W_h = int(ent_size3.get())
ent = arr2.arr2(W_w, W_h, ent_data['ent_noname'])
root.destroy()
root = tkinter.Tk()
root.geometry("300x230")
root.title(lang['windows']['generator'])
root.resizable(width=False, height=False)
tabs = ttk.Notebook(root)
tab_cave = ttk.Frame(tabs)
tabs.add(tab_cave, text = lang['generator']['cavechunk'])
tab_tunnel = ttk.Frame(tabs)
tabs.add(tab_tunnel, text=lang['generator']['tunnelchunk'])
tab_flat = ttk.Frame(tabs)
tabs.add(tab_flat, text=lang['generator']['flat'])
tabs.pack(expand=1, fill="both")
l_size = tkinter.Label(tab_cave, text = lang['generator']['world_size'])
l_size.place(x = 20, y = 10)
ent_size = tkinter.Entry(tab_cave)
ent_size.place(x = 130, y = 10)
l_moves = tkinter.Label(tab_cave, text = lang['generator']['gen_moves'])
l_moves.place(x = 20, y = 40)
ent_moves = tkinter.Entry(tab_cave)
ent_moves.place(x = 130, y = 40)
l_grass = tkinter.Label(tab_cave, text = lang['generator']['gen_blocks'])
l_grass.place(x = 20, y = 70)
ent_grass = tkinter.Entry(tab_cave)
ent_grass.place(x = 130, y = 70)
l_smt= tkinter.Label(tab_cave, text = lang['generator']['smooth'])
l_smt.place(x = 20, y = 100)
ent_smt = tkinter.Entry(tab_cave)
ent_smt.place(x = 130, y = 100)
gen = tkinter.Button(tab_cave, text = lang['generator']['generate'], command = f) #DONT LOOK HERE
gen.place(x = 20, y = 130)
l_size2 = tkinter.Label(tab_tunnel, text = lang['generator']['world_size']) #THIS CODE DOES NOT EXIST
l_size2.place(x = 20, y = 10)
ent_size2 = tkinter.Entry(tab_tunnel)
ent_size2.place(x = 130, y = 10)
l_moves2 = tkinter.Label(tab_tunnel, text = lang['generator']['gen_moves'])
l_moves2.place(x = 20, y = 40)
ent_moves2 = tkinter.Entry(tab_tunnel)
ent_moves2.place(x = 130, y = 40)
l_grass2 = tkinter.Label(tab_tunnel, text = lang['generator']['gen_blocks'])
l_grass2.place(x = 20, y = 70)
ent_grass2 = tkinter.Entry(tab_tunnel)
ent_grass2.place(x = 130, y = 70)
l_tun= tkinter.Label(tab_tunnel, text = lang['generator']['tunnel_num'])
l_tun.place(x = 20, y = 100)
ent_tun = tkinter.Entry(tab_tunnel)
ent_tun.place(x = 130, y = 100)
l_smt2= tkinter.Label(tab_tunnel, text = lang['generator']['smooth'])
l_smt2.place(x = 20, y = 130)
ent_smt2 = tkinter.Entry(tab_tunnel)
ent_smt2.place(x = 130, y = 130)
gen2 = tkinter.Button(tab_tunnel, text = lang['generator']['generate'], command = g)
gen2.place(x = 20, y = 160)
l_size3 = tkinter.Label(tab_flat, text = lang['generator']['world_size'])
l_size3.place(x = 20, y = 10)
ent_size3 = tkinter.Entry(tab_flat)
ent_size3.place(x = 130, y = 10)
l_grass3 = tkinter.Label(tab_flat, text = lang['generator']['fill_block'])
l_grass3.place(x = 20, y = 40)
ent_grass3 = tkinter.Entry(tab_flat)
ent_grass3.place(x = 130, y = 40)
gen3 = tkinter.Button(tab_flat, text = lang['generator']['generate'], command = h)
gen3.place(x = 20, y = 70)
root.mainloop()
gui_layer = pygame.Surface((WIDTH, HEIGHT))
gui_layer.set_colorkey((69,69,69))
gui = GUI(gui_layer, 60, screen)
gui.colorkey = (69,69,69)
gui.draw.button(1, pos = (20,HEIGHT-30), scale=(100,20), size = 15, text = lang['gui']['load_map'], border_width = 2, function = load_map)
gui.draw.button(2, pos = (140,HEIGHT-30), scale=(100,20), size=15, text=lang['gui']['compile'], border_width=2, function = comp)
gui.draw.button(3, pos = (260,HEIGHT-30), scale=(100,20), size=15, text=lang['gui']['worldgen'], border_width=2, function = map_menu)
gui.draw.button(4, pos = (380, HEIGHT-30), scale=(100,20), size=15, text=lang['gui']['wiremode'], border_width=2, function= set_wire_mode)
while running:
clock.tick(setts['FPS'])
move_counter += 1
screen.fill((0,0,0))
info_layer.fill(setts['colorkey'])
SCR_X = WIDTH//T_SIZE
SCR_Y = HEIGHT//T_SIZE - 2 #recalculating this variables to allow dynamic resizing
map_rect_ = pygame.Rect((0,0,SCR_X*T_SIZE,SCR_Y*T_SIZE))
blk_prefix = get_block(fg_block)['name'].split('_')[0]
if WMOD:
gui.units[4]['fg'] = (255,0,0) #coloring "wiring mode" button
else:
gui.units[4]['fg'] = (0,0,0)
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
if event.type == pygame.MOUSEMOTION:
if map_rect.collidepoint(event.pos):
mx, my = event.pos #defining mouse position
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_a:
move_left = True
elif event.key == pygame.K_d:
move_right = True
elif event.key == pygame.K_w: #camera movement
move_up = True
elif event.key == pygame.K_s:
move_down = True
if event.key == pygame.K_b:
blk_choose = invert(blk_choose)
if event.key == pygame.K_m:
full_view = invert(full_view)
if full_view:
C_POS_ORIG = [CAM_X, CAM_Y]
T_SIZE_1 = T_SIZE
CAM_X, CAM_Y = 0, 0
T_SIZE = min([HEIGHT//W_h, WIDTH//W_w]) #global map mode
SCR_SCALE_1 = [SCR_X, SCR_Y]
if T_SIZE == 0:
T_SIZE += 1
draw()
draw_entity()
elif not full_view:
CAM_X, CAM_Y = C_POS_ORIG
T_SIZE = T_SIZE_1
if event.key == pygame.K_l:
line_c += 1
if line_c == 1:
LINE_POS1 = [ax1, ay1]
elif line_c == 2:
line_c = 0
LINE_POS2 = [ax1, ay1]
points = point_engine.get_line(LINE_POS1[0], LINE_POS1[1], LINE_POS2[0], LINE_POS2[1]) #line
for a in points:
try:
if a[0] >= 0 and a[0] < W_w and a[1] >= 0 and a[1] < W_h:
world.put(a[0], a[1], fg_block)
except:
pass
if event.key == pygame.K_r:
rect_c += 1
if rect_c == 1:
RECT_POS1 = [ax1, ay1]
elif rect_c == 2:
rect_c = 0
RECT_POS2 = [ax1, ay1]
points = point_engine.get_rect(RECT_POS1[0], RECT_POS1[1], RECT_POS2[0], RECT_POS2[1]) #rect
for a in points:
try:
if a[0] >= 0 and a[0] < W_w and a[1] >= 0 and a[1] < W_h:
world.put(a[0], a[1], fg_block)
except:
pass
if event.key == pygame.K_c:
c_c += 1
if c_c == 1:
C_CENTER = [ax1, ay1]
elif c_c == 2:
C_RADIUS = point_engine.way(C_CENTER, (ax1, ay1))
c_c = 0
points = point_engine.get_hollow_circle(C_CENTER[0], C_CENTER[1], C_RADIUS, C_WIDTH) #circle
for a in points:
try:
if a[0] >= 0 and a[0] < W_w and a[1] >= 0 and a[1] < W_h:
world.put(int(a[0]), int(a[1]), fg_block)
except:
pass
if event.key == pygame.K_e:
ent_choose = invert(ent_choose) #switching to entity list
if event.key == pygame.K_DOWN:
fg_block, bg_block = bg_block, fg_block #swapping blocks
if event.key == pygame.K_p:
if ent.get(ax1, ay1)['name'] != 'ent_noname':
try:
if fg_ent['attributes'] == {}:
print()
print(pretty_out.box(pretty_out.listing(ent.get(ax1, ay1)))) #block or entity info output
else:
dic = copy.deepcopy(ent.get(ax1, ay1))
del dic['attributes']
print()
print(pretty_out.box(pretty_out.listing(dic)))
print(lang['misc']['attributes'])
print(pretty_out.box(pretty_out.listing(ent.get(ax1, ay1)['attributes'])))
except:
pass
else:
try:
print()
print(pretty_out.box(pretty_out.listing(get_block(m_unit))))
except:
pass
if event.key == pygame.K_j:
TEST_ARR = worldgen.room(30, 20, walls = [{12:100}, {10:100}, {11:100}, {9:100}], floor = [{8:50}, {13:1}]) #broken room generator
if event.key == pygame.K_INSERT:
world.paste(ax1, ay1, TEST_ARR)
if event.type == pygame.KEYUP:
if event.key == pygame.K_a:
move_left = False
if event.key == pygame.K_d:
move_right = False
if event.key == pygame.K_w: #camera movement
move_up = False
if event.key == pygame.K_s:
move_down = False
if event.type == pygame.MOUSEBUTTONDOWN:
#print(fg_ent)
if event.button == 1 and map_r.collidepoint(event.pos):
if blk_choose:
fg_block = m_unit
fg_ent = False
blk_choose = False
elif ent_choose:
fg_ent = copy.deepcopy(ent_data[m_unit['name']])
ent_choose = False
elif tool_choose:
pass
elif full_view:
C_POS_ORIG = ax, ay
else:
if fg_ent == False:
fg_drawing = True
else:
if ax1 >= 0 and ax1 < W_w and ay1 >= 0 and ay1 < W_h:
ENT_COUNTER += 1
enti = copy.deepcopy(ent_data[fg_ent['name']])
enti['pos_x'] = ax1
enti['pos_y'] = ay1
ent.put(ax1, ay1, enti)
if event.button == 2 and map_r.collidepoint(event.pos):
try:
if ent.get(ax1, ay1)['name'] == 'ent_noname':
fg_block = m_unit
else:
if not WMOD:
property_editor(ent.get(ax1, ay1))
else:
wire_editor(ent.get(ax1, ay1))
#print(WIRES)
except:
pass
if event.button == 3 and map_r.collidepoint(event.pos):
if blk_choose:
bg_block = m_unit
blk_choose = False
elif ent_choose:
pass
elif tool_choose:
pass
elif ON_WIRE != False:
ON_WIRE = False
else:
if fg_ent == False:
bg_drawing = True
else:
if ax1 >= 0 and ax1 < W_w and ay1 >= 0 and ay1 < W_h:
ent.put(ax1, ay1, ent_data['ent_noname'])
if event.button == 4 and map_r.collidepoint(event.pos):
if c_c == 1:
C_WIDTH += 1
else:
if T_SIZE > 1:
T_SIZE -= 1
#X0 = (WIDTH - T_SIZE*SCR_X)
#Y0 = (HEIGHT - T_SIZE*SCR_Y)
if event.button == 5 and map_r.collidepoint(event.pos):
if c_c == 1:
C_WIDTH -= 1
else:
T_SIZE += 1
#X0 = (WIDTH - T_SIZE*SCR_X)
#Y0 = (HEIGHT - T_SIZE*SCR_Y)
if event.type == pygame.MOUSEBUTTONUP:
if event.button == 1:
fg_drawing = False
if event.button == 3:
bg_drawing = False
if blk_choose:
ax, ay = (mx)//(T_SIZE_ORIG+5), (my-50)//(T_SIZE_ORIG+5) #some maths
m_unit = blk_arr.get(ax, ay)
pygame.draw.rect(screen, (255,255,255), (ax,ay,30,30), 2)
elif ent_choose:
ax, ay = (mx)//(T_SIZE_ORIG+5), (my-50)//(T_SIZE_ORIG+5)
m_unit = ent_arr.get(ax, ay)
pygame.draw.rect(screen, (255,255,255), (ax,ay,30,30), 2)
elif tool_choose:
pass
else:
ax, ay = mx//T_SIZE, my//T_SIZE
ax1, ay1 = CAM_X + mx//T_SIZE, CAM_Y + my//T_SIZE
try:
m_unit = world.get(ax1, ay1)
except:
pass
if move_counter*round(T_SIZE_ORIG/T_SIZE, 2) >= move_speed:
move_counter = 0
if move_up:
CAM_Y -= 1
if move_down:#camera movement
CAM_Y += 1
if move_right:
CAM_X += 1
if move_left:
CAM_X -= 1
if fg_drawing:
try:
if ax1 >= 0 and ax1 < W_w and ay1 >= 0 and ay1 < W_h:
world.put(ax1, ay1, fg_block)
except:
pass
if bg_drawing:
try:
if ax1 >= 0 and ax1 < W_w and ay1 >= 0 and ay1 < W_h:
world.put(ax1, ay1, bg_block)
except:
pass
if blk_choose:
screen.fill((128,128,128))
pygame.draw.rect(screen, (64,64,64), (0,0,WIDTH,40))
screen.blit(Consolas.render(lang['misc']['blk_choose'], False, (255,255,255)), (20,15))
draw_blk_choose()
elif ent_choose:
screen.fill((128,128,128))
pygame.draw.rect(screen, (64,64,64), (0,0,WIDTH,40))
screen.blit(Consolas.render(lang['misc']['ent_choose'], False, (255,255,255)), (20,15))
draw_ent_choose()
elif tool_choose:
pass
else:
map_layer.fill((50,50,50))
draw()
draw_entity()
pygame.draw.rect(info_layer, (50,50,50), (WIDTH - 50 - 10, HEIGHT - (HEIGHT-50-10), 45, 35))
info_layer.blit(get_image(bg_block), (WIDTH - 40, HEIGHT - (HEIGHT-70)))
if full_view:
pygame.draw.rect(info_layer, (255,0,0), (C_POS_ORIG[0]*T_SIZE, C_POS_ORIG[1]*T_SIZE, SCR_SCALE_1[0]*T_SIZE, SCR_SCALE_1[1]*T_SIZE), 2)
if bg_drawing:
if not blk_choose and not ent_choose and not tool_choose:
pygame.draw.rect(info_layer, (200,200,200), (WIDTH - 40, HEIGHT - (HEIGHT-70), T_SIZE_ORIG, T_SIZE_ORIG), 2)
info_layer.blit(get_image(fg_block), (WIDTH - 55, HEIGHT - (HEIGHT-65)))
if fg_drawing:
if not blk_choose and not ent_choose and not tool_choose:
pygame.draw.rect(info_layer, (200,200,200), (WIDTH - 55, HEIGHT - (HEIGHT-65), T_SIZE_ORIG, T_SIZE_ORIG), 2)
pygame.draw.rect(info_layer, (30,30,30), (WIDTH - 50 - 10, HEIGHT - (HEIGHT-50-10), 45, 35), 2)
cx, cy = CAM_X + SCR_X//2, CAM_Y + SCR_Y//2
scal = round(T_SIZE_ORIG/T_SIZE, 2)
info_layer.blit(Consolas.render(lang['gui']['cam_pos'].format(cx, cy), False, (255,255,255)), (20,20))
info_layer.blit(Consolas.render(lang['gui']['map_size'].format(W_w, W_h), False, (255,255,255)), (20,35))
info_layer.blit(Consolas.render(lang['gui']['scale'].format(scal), False, (255,255,255)), (20,50))
if fg_ent != False:
info_layer.blit(Consolas.render(lang['gui']['ent_mode'], False, (255,0,0)), (WIDTH//2,35))
if full_view != False:
info_layer.blit(Consolas.render(lang['gui']['full_view'], False, (255,0,0)), (WIDTH//3,35))
if line_c == 1:
pygame.draw.rect(info_layer, (255,0,0), ((LINE_POS1[0] - CAM_X)*T_SIZE, (LINE_POS1[1] - CAM_Y)*T_SIZE, T_SIZE, T_SIZE), 3)
pygame.draw.line(info_layer, (255,255,255), ((LINE_POS1[0] - CAM_X)*T_SIZE + T_SIZE//2, (LINE_POS1[1] - CAM_Y)*T_SIZE + T_SIZE//2),
((ax)*T_SIZE + T_SIZE//2, (ay)*T_SIZE + T_SIZE//2), 2)
pygame.draw.rect(info_layer, (255,0,0), ((ax)*T_SIZE, (ay)*T_SIZE, T_SIZE, T_SIZE), 3)
if rect_c == 1:
delta_x = abs(ax1 - RECT_POS1[0])
delta_y = abs(ay1 - RECT_POS1[1])
x1, y1 = RECT_POS1
x2, y2 = ax1, ay1
tx, ty = (ax1-CAM_X)*T_SIZE, (ay1-CAM_Y)*T_SIZE
if x2 > x1 and y2 > y1:
x1, y1, x2, y2 = x1, y1, x2+1, y2+1
tx, ty = (ax1-CAM_X+1)*T_SIZE, (ay1-CAM_Y+1)*T_SIZE
if x2 > x1 and y2 < y1:
x1, y1, x2, y2 = x1, y2, x2+1, y1
tx, ty = (ax1-CAM_X+1)*T_SIZE, (ay1-CAM_Y)*T_SIZE
if x2 < x1 and y2 < y1:
x1, y1, x2, y2 = x2, y2, x1, y1
tx, ty = (ax1-CAM_X)*T_SIZE, (ay1-CAM_Y-1)*T_SIZE
if y2 > y1 and x2 < x1:
x1, y1, x2, y2 = x2, y1, x1, y2+1
tx, ty = (ax1-CAM_X+1)*T_SIZE, (ay1-CAM_Y-1)*T_SIZE
pygame.draw.rect(info_layer, (255,0,0), ((x1-CAM_X)*T_SIZE, (y1-CAM_Y)*T_SIZE, (x2-x1)*T_SIZE, (y2-y1)*T_SIZE), 3)
info_layer.blit(Consolas.render(f'{delta_x+1},{delta_y+1}', False, (255,0,0)), (tx, ty))
if c_c == 1:
pygame.draw.circle(info_layer, (255,0,0), ((C_CENTER[0] - CAM_X)*T_SIZE, (C_CENTER[1] - CAM_Y)*T_SIZE), 4, 2)
pygame.draw.circle(info_layer, (255,0,0), ((C_CENTER[0] - CAM_X)*T_SIZE, (C_CENTER[1] - CAM_Y)*T_SIZE), C_RADIUS*T_SIZE, 2)
pygame.draw.circle(info_layer, (255,0,0), ((C_CENTER[0] - CAM_X)*T_SIZE, (C_CENTER[1] - CAM_Y)*T_SIZE), (C_RADIUS - C_WIDTH)*T_SIZE, 2)
if ON_WIRE != False:
pygame.draw.line(info_layer, (255,0,0), ((ON_WIRE['pos_x'] - CAM_X)*T_SIZE + T_SIZE//2, (ON_WIRE['pos_y'] - CAM_Y)*T_SIZE + T_SIZE//2), (ax*T_SIZE+T_SIZE//2, ay*T_SIZE+T_SIZE//2), 2)
for wire in WIRES:
pos1 = ((wire[0]['pos_x'] - CAM_X)*T_SIZE + T_SIZE//2, (wire[0]['pos_y'] - CAM_Y)*T_SIZE + T_SIZE//2)
pos2 = ((wire[1]['pos_x'] - CAM_X)*T_SIZE + T_SIZE//2, (wire[1]['pos_y'] - CAM_Y)*T_SIZE + T_SIZE//2)
pygame.draw.line(info_layer, (200,0,0), pos1, pos2, 2)
pygame.draw.circle(info_layer, (200,0,0), pos1, 3)
pygame.draw.circle(info_layer, (200,0,0), pos2, 3)
if ent.get(wire[0]['pos_x'], wire[0]['pos_y'])['name'] == 'ent_noname' or ent.get(wire[1]['pos_x'], wire[1]['pos_y'])['name'] == 'ent_noname':
WIRES.remove(wire) #check if one of wired entitys invalid, then remove the wire
continue
else:
ent_in = copy.deepcopy(ent.get(wire[0]['pos_x'], wire[0]['pos_y']))
ent_out = copy.deepcopy(ent.get(wire[1]['pos_x'], wire[1]['pos_y']))
ent_in['attributes'][wire[0]['attr']] = ent_out['attributes'][wire[1]['attr']]
ent.put(wire[0]['pos_x'], wire[0]['pos_y'], ent_in)
#pygame.draw.rect(info_layer, (255,255,255), (ax*T_SIZE, ay*T_SIZE, get_image(fg_block).get_width(), get_image(fg_block).get_height()), 1)
screen.blit(map_layer, (0,0))
if ax1 >= 0 and ax1 < W_w and ay1 >= 0 and ay1 < W_h:
screen.blit(pygame.transform.scale(brackets, (T_SIZE, T_SIZE)), (ax*T_SIZE, ay*T_SIZE))
else:
screen.blit(pygame.transform.scale(brackets_wrong, (T_SIZE, T_SIZE)), (ax*T_SIZE, ay*T_SIZE))
pygame.draw.rect(info_layer, (255,0,0), ((0-CAM_X)*T_SIZE, (0-CAM_Y)*T_SIZE, W_w*T_SIZE, W_h*T_SIZE), 2)
screen.blit(info_layer, (0,0))
pygame.draw.rect(screen, (64,64,64), (0, HEIGHT-40, WIDTH, 40))
gui.render()
C_RADIUS = point_engine.way(C_CENTER, (ax1, ay1))
map_rect.bottomright = (SCR_X*T_SIZE, SCR_Y*T_SIZE)
pygame.display.flip()
pygame.quit()
|
generic_pubsub.py
|
"""Generic Pub/Sub implementation."""
from collections import deque
from typing import Any, Callable, Deque, Dict, List, Optional, Union
from datetime import datetime
from threading import Thread
from pydantic import PositiveInt
from eepythontools.inheritable_object import InheritableObject
class Publisher(InheritableObject):
subscribers: Dict[str, Dict[str, Deque[Any]]]
public_subscriptions: List[str]
exception_on_buffer_full: bool
def __init__(
self,
*args: Any,
subscribers: Dict[str, Dict[str, Deque[Dict[str, Any]]]] = {},
public_subscriptions: List[str] = ["__public_subscriptions__"],
exception_on_buffer_full: bool = False,
**kwargs: Any
) -> None:
super(Publisher, self).__init__(
self,
*args,
subscribers=subscribers,
public_subscriptions=public_subscriptions,
exception_on_buffer_full=exception_on_buffer_full,
**kwargs
)
self.subscribers = subscribers
self.public_subscriptions = public_subscriptions
self.exception_on_buffer_full = exception_on_buffer_full
def publish(
self,
subscription_name: str,
data: Any,
expiration: Optional[Union[datetime, None]] = None,
public_subscription: bool = True,
) -> None:
full_buffers: List[str] = []
if public_subscription and subscription_name not in self.public_subscriptions:
self.public_subscriptions.append(subscription_name)
for subscriber_name, send_deque in self.subscribers[subscription_name].items():
if expiration is not None and datetime.now() > expiration:
return
send_deque.append({"data": data})
if (
self.exception_on_buffer_full
and send_deque.maxlen is not None
and send_deque.maxlen <= len(send_deque) + 1
):
full_buffers.append(subscriber_name)
if full_buffers:
raise BufferError("Full Buffer(s): {}".format(full_buffers))
async def add_subscriber(
self,
subscription_name: str,
subscriber_name: str,
sender_function: Callable[[Deque[Any]], bool],
buffer_size: PositiveInt = 32,
) -> None:
buffer: Deque[Any] = deque(maxlen=buffer_size)
new_thread = Thread(target=sender_function, args=(buffer))
new_thread.start()
self.subscribers[subscription_name][subscriber_name] = buffer
def remove_subscriber(self, subscription_name: str, subscriber_name: str) -> None:
if subscription_name not in self.subscribers:
raise KeyError(
"{} is not a subscription within this publisher.".format(
subscription_name
)
)
if subscriber_name not in self.subscribers[subscription_name]:
raise KeyError(
"{} is not a subscriber in subscription {} in this publisher.".format(
subscriber_name,
subscription_name,
)
)
self.subscribers[subscription_name][subscriber_name].append(
{"control": "shutdown"}
)
del self.subscribers[subscription_name][subscriber_name]
def remove_subscription(self, subscription_name: str) -> None:
if subscription_name not in self.subscribers:
raise KeyError(
"{} is not a subscription within this publisher.".format(
subscription_name
)
)
for subscriber in self.subscribers[subscription_name]:
self.remove_subscriber(subscription_name, subscriber)
del self.subscribers[subscription_name]
|
test_functools.py
|
import abc
import builtins
import collections
import copy
from itertools import permutations
import pickle
from random import choice
import sys
from test import support
import time
import unittest
from weakref import proxy
import contextlib
try:
import threading
except ImportError:
threading = None
import functools
py_functools = support.import_fresh_module('functools', blocked=['_functools'])
c_functools = functools
# pypy: was:
# c_functools = support.import_fresh_module('functools', fresh=['_functools'])
# but this creates confusion for pickle because on pypy, _functools is a
# pure python module, whereas on CPython it is C (and so not really
# re-importable)
decimal = support.import_fresh_module('decimal', fresh=['_decimal'])
@contextlib.contextmanager
def replaced_module(name, replacement):
original_module = sys.modules[name]
sys.modules[name] = replacement
try:
yield
finally:
sys.modules[name] = original_module
def capture(*args, **kw):
"""capture all positional and keyword arguments"""
return args, kw
def signature(part):
""" return the signature of a partial object """
return (part.func, part.args, part.keywords, part.__dict__)
class MyTuple(tuple):
pass
class BadTuple(tuple):
def __add__(self, other):
return list(self) + list(other)
class MyDict(dict):
pass
class TestPartial:
def test_basic_examples(self):
p = self.partial(capture, 1, 2, a=10, b=20)
self.assertTrue(callable(p))
self.assertEqual(p(3, 4, b=30, c=40),
((1, 2, 3, 4), dict(a=10, b=30, c=40)))
p = self.partial(map, lambda x: x*10)
self.assertEqual(list(p([1,2,3,4])), [10, 20, 30, 40])
def test_attributes(self):
p = self.partial(capture, 1, 2, a=10, b=20)
# attributes should be readable
self.assertEqual(p.func, capture)
self.assertEqual(p.args, (1, 2))
self.assertEqual(p.keywords, dict(a=10, b=20))
def test_argument_checking(self):
self.assertRaises(TypeError, self.partial) # need at least a func arg
try:
self.partial(2)()
except TypeError:
pass
else:
self.fail('First arg not checked for callability')
def test_protection_of_callers_dict_argument(self):
# a caller's dictionary should not be altered by partial
def func(a=10, b=20):
return a
d = {'a':3}
p = self.partial(func, a=5)
self.assertEqual(p(**d), 3)
self.assertEqual(d, {'a':3})
p(b=7)
self.assertEqual(d, {'a':3})
def test_kwargs_copy(self):
# Issue #29532: Altering a kwarg dictionary passed to a constructor
# should not affect a partial object after creation
d = {'a': 3}
p = self.partial(capture, **d)
self.assertEqual(p(), ((), {'a': 3}))
d['a'] = 5
self.assertEqual(p(), ((), {'a': 3}))
def test_arg_combinations(self):
# exercise special code paths for zero args in either partial
# object or the caller
p = self.partial(capture)
self.assertEqual(p(), ((), {}))
self.assertEqual(p(1,2), ((1,2), {}))
p = self.partial(capture, 1, 2)
self.assertEqual(p(), ((1,2), {}))
self.assertEqual(p(3,4), ((1,2,3,4), {}))
def test_kw_combinations(self):
# exercise special code paths for no keyword args in
# either the partial object or the caller
p = self.partial(capture)
self.assertEqual(p.keywords, {})
self.assertEqual(p(), ((), {}))
self.assertEqual(p(a=1), ((), {'a':1}))
p = self.partial(capture, a=1)
self.assertEqual(p.keywords, {'a':1})
self.assertEqual(p(), ((), {'a':1}))
self.assertEqual(p(b=2), ((), {'a':1, 'b':2}))
# keyword args in the call override those in the partial object
self.assertEqual(p(a=3, b=2), ((), {'a':3, 'b':2}))
def test_positional(self):
# make sure positional arguments are captured correctly
for args in [(), (0,), (0,1), (0,1,2), (0,1,2,3)]:
p = self.partial(capture, *args)
expected = args + ('x',)
got, empty = p('x')
self.assertTrue(expected == got and empty == {})
def test_keyword(self):
# make sure keyword arguments are captured correctly
for a in ['a', 0, None, 3.5]:
p = self.partial(capture, a=a)
expected = {'a':a,'x':None}
empty, got = p(x=None)
self.assertTrue(expected == got and empty == ())
def test_no_side_effects(self):
# make sure there are no side effects that affect subsequent calls
p = self.partial(capture, 0, a=1)
args1, kw1 = p(1, b=2)
self.assertTrue(args1 == (0,1) and kw1 == {'a':1,'b':2})
args2, kw2 = p()
self.assertTrue(args2 == (0,) and kw2 == {'a':1})
def test_error_propagation(self):
def f(x, y):
x / y
self.assertRaises(ZeroDivisionError, self.partial(f, 1, 0))
self.assertRaises(ZeroDivisionError, self.partial(f, 1), 0)
self.assertRaises(ZeroDivisionError, self.partial(f), 1, 0)
self.assertRaises(ZeroDivisionError, self.partial(f, y=0), 1)
def test_weakref(self):
f = self.partial(int, base=16)
p = proxy(f)
self.assertEqual(f.func, p.func)
f = None
support.gc_collect()
self.assertRaises(ReferenceError, getattr, p, 'func')
def test_with_bound_and_unbound_methods(self):
data = list(map(str, range(10)))
join = self.partial(str.join, '')
self.assertEqual(join(data), '0123456789')
join = self.partial(''.join)
self.assertEqual(join(data), '0123456789')
def test_nested_optimization(self):
partial = self.partial
inner = partial(signature, 'asdf')
nested = partial(inner, bar=True)
flat = partial(signature, 'asdf', bar=True)
self.assertEqual(signature(nested), signature(flat))
def test_nested_partial_with_attribute(self):
# see issue 25137
partial = self.partial
def foo(bar):
return bar
p = partial(foo, 'first')
p2 = partial(p, 'second')
p2.new_attr = 'spam'
self.assertEqual(p2.new_attr, 'spam')
def test_repr(self):
args = (object(), object())
args_repr = ', '.join(repr(a) for a in args)
kwargs = {'a': object(), 'b': object()}
kwargs_reprs = ['a={a!r}, b={b!r}'.format_map(kwargs),
'b={b!r}, a={a!r}'.format_map(kwargs)]
if self.partial in (c_functools.partial, py_functools.partial):
name = 'functools.partial'
else:
name = self.partial.__name__
f = self.partial(capture)
self.assertEqual(f'{name}({capture!r})', repr(f))
f = self.partial(capture, *args)
self.assertEqual(f'{name}({capture!r}, {args_repr})', repr(f))
f = self.partial(capture, **kwargs)
self.assertIn(repr(f),
[f'{name}({capture!r}, {kwargs_repr})'
for kwargs_repr in kwargs_reprs])
f = self.partial(capture, *args, **kwargs)
self.assertIn(repr(f),
[f'{name}({capture!r}, {args_repr}, {kwargs_repr})'
for kwargs_repr in kwargs_reprs])
def test_recursive_repr(self):
if self.partial in (c_functools.partial, py_functools.partial):
name = 'functools.partial'
else:
name = self.partial.__name__
f = self.partial(capture)
f.__setstate__((f, (), {}, {}))
try:
self.assertEqual(repr(f), '%s(...)' % (name,))
finally:
f.__setstate__((capture, (), {}, {}))
f = self.partial(capture)
f.__setstate__((capture, (f,), {}, {}))
try:
self.assertEqual(repr(f), '%s(%r, ...)' % (name, capture,))
finally:
f.__setstate__((capture, (), {}, {}))
f = self.partial(capture)
f.__setstate__((capture, (), {'a': f}, {}))
try:
self.assertEqual(repr(f), '%s(%r, a=...)' % (name, capture,))
finally:
f.__setstate__((capture, (), {}, {}))
def test_pickle(self):
with self.AllowPickle():
f = self.partial(signature, ['asdf'], bar=[True])
f.attr = []
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
f_copy = pickle.loads(pickle.dumps(f, proto))
self.assertEqual(signature(f_copy), signature(f))
def test_copy(self):
f = self.partial(signature, ['asdf'], bar=[True])
f.attr = []
f_copy = copy.copy(f)
self.assertEqual(signature(f_copy), signature(f))
self.assertIs(f_copy.attr, f.attr)
self.assertIs(f_copy.args, f.args)
self.assertIs(f_copy.keywords, f.keywords)
def test_deepcopy(self):
f = self.partial(signature, ['asdf'], bar=[True])
f.attr = []
f_copy = copy.deepcopy(f)
self.assertEqual(signature(f_copy), signature(f))
self.assertIsNot(f_copy.attr, f.attr)
self.assertIsNot(f_copy.args, f.args)
self.assertIsNot(f_copy.args[0], f.args[0])
self.assertIsNot(f_copy.keywords, f.keywords)
self.assertIsNot(f_copy.keywords['bar'], f.keywords['bar'])
def test_setstate(self):
f = self.partial(signature)
f.__setstate__((capture, (1,), dict(a=10), dict(attr=[])))
self.assertEqual(signature(f),
(capture, (1,), dict(a=10), dict(attr=[])))
self.assertEqual(f(2, b=20), ((1, 2), {'a': 10, 'b': 20}))
f.__setstate__((capture, (1,), dict(a=10), None))
self.assertEqual(signature(f), (capture, (1,), dict(a=10), {}))
self.assertEqual(f(2, b=20), ((1, 2), {'a': 10, 'b': 20}))
f.__setstate__((capture, (1,), None, None))
#self.assertEqual(signature(f), (capture, (1,), {}, {}))
self.assertEqual(f(2, b=20), ((1, 2), {'b': 20}))
self.assertEqual(f(2), ((1, 2), {}))
self.assertEqual(f(), ((1,), {}))
f.__setstate__((capture, (), {}, None))
self.assertEqual(signature(f), (capture, (), {}, {}))
self.assertEqual(f(2, b=20), ((2,), {'b': 20}))
self.assertEqual(f(2), ((2,), {}))
self.assertEqual(f(), ((), {}))
def test_setstate_errors(self):
f = self.partial(signature)
self.assertRaises(TypeError, f.__setstate__, (capture, (), {}))
self.assertRaises(TypeError, f.__setstate__, (capture, (), {}, {}, None))
self.assertRaises(TypeError, f.__setstate__, [capture, (), {}, None])
self.assertRaises(TypeError, f.__setstate__, (None, (), {}, None))
self.assertRaises(TypeError, f.__setstate__, (capture, None, {}, None))
self.assertRaises(TypeError, f.__setstate__, (capture, [], {}, None))
self.assertRaises(TypeError, f.__setstate__, (capture, (), [], None))
def test_setstate_subclasses(self):
f = self.partial(signature)
f.__setstate__((capture, MyTuple((1,)), MyDict(a=10), None))
s = signature(f)
self.assertEqual(s, (capture, (1,), dict(a=10), {}))
self.assertIs(type(s[1]), tuple)
self.assertIs(type(s[2]), dict)
r = f()
self.assertEqual(r, ((1,), {'a': 10}))
self.assertIs(type(r[0]), tuple)
self.assertIs(type(r[1]), dict)
f.__setstate__((capture, BadTuple((1,)), {}, None))
s = signature(f)
self.assertEqual(s, (capture, (1,), {}, {}))
self.assertIs(type(s[1]), tuple)
r = f(2)
self.assertEqual(r, ((1, 2), {}))
self.assertIs(type(r[0]), tuple)
def test_recursive_pickle(self):
with self.AllowPickle():
f = self.partial(capture)
f.__setstate__((f, (), {}, {}))
try:
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
with self.assertRaises(RecursionError):
pickle.dumps(f, proto)
finally:
f.__setstate__((capture, (), {}, {}))
f = self.partial(capture)
f.__setstate__((capture, (f,), {}, {}))
try:
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
f_copy = pickle.loads(pickle.dumps(f, proto))
try:
self.assertIs(f_copy.args[0], f_copy)
finally:
f_copy.__setstate__((capture, (), {}, {}))
finally:
f.__setstate__((capture, (), {}, {}))
f = self.partial(capture)
f.__setstate__((capture, (), {'a': f}, {}))
try:
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
f_copy = pickle.loads(pickle.dumps(f, proto))
try:
self.assertIs(f_copy.keywords['a'], f_copy)
finally:
f_copy.__setstate__((capture, (), {}, {}))
finally:
f.__setstate__((capture, (), {}, {}))
# Issue 6083: Reference counting bug
def test_setstate_refcount(self):
class BadSequence:
def __len__(self):
return 4
def __getitem__(self, key):
if key == 0:
return max
elif key == 1:
return tuple(range(1000000))
elif key in (2, 3):
return {}
raise IndexError
f = self.partial(object)
self.assertRaises(TypeError, f.__setstate__, BadSequence())
@unittest.skipUnless(c_functools, 'requires the C _functools module')
class TestPartialC(TestPartial, unittest.TestCase):
if c_functools:
partial = c_functools.partial
class AllowPickle:
def __enter__(self):
return self
def __exit__(self, type, value, tb):
return False
def test_attributes_unwritable(self):
# attributes should not be writable
p = self.partial(capture, 1, 2, a=10, b=20)
self.assertRaises(AttributeError, setattr, p, 'func', map)
self.assertRaises(AttributeError, setattr, p, 'args', (1, 2))
self.assertRaises(AttributeError, setattr, p, 'keywords', dict(a=1, b=2))
p = self.partial(hex)
try:
del p.__dict__
except TypeError:
pass
else:
self.fail('partial object allowed __dict__ to be deleted')
def test_manually_adding_non_string_keyword(self):
p = self.partial(capture)
# Adding a non-string/unicode keyword to partial kwargs
p.keywords[1234] = 'value'
r = repr(p)
self.assertIn('1234', r)
self.assertIn("'value'", r)
with self.assertRaises(TypeError):
p()
def test_keystr_replaces_value(self):
p = self.partial(capture)
class MutatesYourDict(object):
def __str__(self):
p.keywords[self] = ['sth2']
return 'astr'
# Replacing the value during key formatting should keep the original
# value alive (at least long enough).
p.keywords[MutatesYourDict()] = ['sth']
r = repr(p)
self.assertIn('astr', r)
self.assertIn("['sth']", r)
class TestPartialPy(TestPartial, unittest.TestCase):
partial = py_functools.partial
class AllowPickle:
def __init__(self):
self._cm = replaced_module("functools", py_functools)
def __enter__(self):
return self._cm.__enter__()
def __exit__(self, type, value, tb):
return self._cm.__exit__(type, value, tb)
if c_functools:
class CPartialSubclass(c_functools.partial):
pass
class PyPartialSubclass(py_functools.partial):
pass
@unittest.skipUnless(c_functools, 'requires the C _functools module')
class TestPartialCSubclass(TestPartialC):
if c_functools:
partial = CPartialSubclass
# partial subclasses are not optimized for nested calls
test_nested_optimization = None
class TestPartialPySubclass(TestPartialPy):
partial = PyPartialSubclass
class TestPartialMethod(unittest.TestCase):
class A(object):
nothing = functools.partialmethod(capture)
positional = functools.partialmethod(capture, 1)
keywords = functools.partialmethod(capture, a=2)
both = functools.partialmethod(capture, 3, b=4)
nested = functools.partialmethod(positional, 5)
over_partial = functools.partialmethod(functools.partial(capture, c=6), 7)
static = functools.partialmethod(staticmethod(capture), 8)
cls = functools.partialmethod(classmethod(capture), d=9)
a = A()
def test_arg_combinations(self):
self.assertEqual(self.a.nothing(), ((self.a,), {}))
self.assertEqual(self.a.nothing(5), ((self.a, 5), {}))
self.assertEqual(self.a.nothing(c=6), ((self.a,), {'c': 6}))
self.assertEqual(self.a.nothing(5, c=6), ((self.a, 5), {'c': 6}))
self.assertEqual(self.a.positional(), ((self.a, 1), {}))
self.assertEqual(self.a.positional(5), ((self.a, 1, 5), {}))
self.assertEqual(self.a.positional(c=6), ((self.a, 1), {'c': 6}))
self.assertEqual(self.a.positional(5, c=6), ((self.a, 1, 5), {'c': 6}))
self.assertEqual(self.a.keywords(), ((self.a,), {'a': 2}))
self.assertEqual(self.a.keywords(5), ((self.a, 5), {'a': 2}))
self.assertEqual(self.a.keywords(c=6), ((self.a,), {'a': 2, 'c': 6}))
self.assertEqual(self.a.keywords(5, c=6), ((self.a, 5), {'a': 2, 'c': 6}))
self.assertEqual(self.a.both(), ((self.a, 3), {'b': 4}))
self.assertEqual(self.a.both(5), ((self.a, 3, 5), {'b': 4}))
self.assertEqual(self.a.both(c=6), ((self.a, 3), {'b': 4, 'c': 6}))
self.assertEqual(self.a.both(5, c=6), ((self.a, 3, 5), {'b': 4, 'c': 6}))
self.assertEqual(self.A.both(self.a, 5, c=6), ((self.a, 3, 5), {'b': 4, 'c': 6}))
def test_nested(self):
self.assertEqual(self.a.nested(), ((self.a, 1, 5), {}))
self.assertEqual(self.a.nested(6), ((self.a, 1, 5, 6), {}))
self.assertEqual(self.a.nested(d=7), ((self.a, 1, 5), {'d': 7}))
self.assertEqual(self.a.nested(6, d=7), ((self.a, 1, 5, 6), {'d': 7}))
self.assertEqual(self.A.nested(self.a, 6, d=7), ((self.a, 1, 5, 6), {'d': 7}))
def test_over_partial(self):
self.assertEqual(self.a.over_partial(), ((self.a, 7), {'c': 6}))
self.assertEqual(self.a.over_partial(5), ((self.a, 7, 5), {'c': 6}))
self.assertEqual(self.a.over_partial(d=8), ((self.a, 7), {'c': 6, 'd': 8}))
self.assertEqual(self.a.over_partial(5, d=8), ((self.a, 7, 5), {'c': 6, 'd': 8}))
self.assertEqual(self.A.over_partial(self.a, 5, d=8), ((self.a, 7, 5), {'c': 6, 'd': 8}))
def test_bound_method_introspection(self):
obj = self.a
self.assertIs(obj.both.__self__, obj)
self.assertIs(obj.nested.__self__, obj)
self.assertIs(obj.over_partial.__self__, obj)
self.assertIs(obj.cls.__self__, self.A)
self.assertIs(self.A.cls.__self__, self.A)
def test_unbound_method_retrieval(self):
obj = self.A
self.assertFalse(hasattr(obj.both, "__self__"))
self.assertFalse(hasattr(obj.nested, "__self__"))
self.assertFalse(hasattr(obj.over_partial, "__self__"))
self.assertFalse(hasattr(obj.static, "__self__"))
self.assertFalse(hasattr(self.a.static, "__self__"))
def test_descriptors(self):
for obj in [self.A, self.a]:
with self.subTest(obj=obj):
self.assertEqual(obj.static(), ((8,), {}))
self.assertEqual(obj.static(5), ((8, 5), {}))
self.assertEqual(obj.static(d=8), ((8,), {'d': 8}))
self.assertEqual(obj.static(5, d=8), ((8, 5), {'d': 8}))
self.assertEqual(obj.cls(), ((self.A,), {'d': 9}))
self.assertEqual(obj.cls(5), ((self.A, 5), {'d': 9}))
self.assertEqual(obj.cls(c=8), ((self.A,), {'c': 8, 'd': 9}))
self.assertEqual(obj.cls(5, c=8), ((self.A, 5), {'c': 8, 'd': 9}))
def test_overriding_keywords(self):
self.assertEqual(self.a.keywords(a=3), ((self.a,), {'a': 3}))
self.assertEqual(self.A.keywords(self.a, a=3), ((self.a,), {'a': 3}))
def test_invalid_args(self):
with self.assertRaises(TypeError):
class B(object):
method = functools.partialmethod(None, 1)
def test_repr(self):
self.assertEqual(repr(vars(self.A)['both']),
'functools.partialmethod({}, 3, b=4)'.format(capture))
def test_abstract(self):
class Abstract(abc.ABCMeta):
@abc.abstractmethod
def add(self, x, y):
pass
add5 = functools.partialmethod(add, 5)
self.assertTrue(Abstract.add.__isabstractmethod__)
self.assertTrue(Abstract.add5.__isabstractmethod__)
for func in [self.A.static, self.A.cls, self.A.over_partial, self.A.nested, self.A.both]:
self.assertFalse(getattr(func, '__isabstractmethod__', False))
class TestUpdateWrapper(unittest.TestCase):
def check_wrapper(self, wrapper, wrapped,
assigned=functools.WRAPPER_ASSIGNMENTS,
updated=functools.WRAPPER_UPDATES):
# Check attributes were assigned
for name in assigned:
self.assertTrue(getattr(wrapper, name) == getattr(wrapped, name))
# Check attributes were updated
for name in updated:
wrapper_attr = getattr(wrapper, name)
wrapped_attr = getattr(wrapped, name)
for key in wrapped_attr:
if name == "__dict__" and key == "__wrapped__":
# __wrapped__ is overwritten by the update code
continue
self.assertIs(wrapped_attr[key], wrapper_attr[key])
# Check __wrapped__
self.assertIs(wrapper.__wrapped__, wrapped)
def _default_update(self):
def f(a:'This is a new annotation'):
"""This is a test"""
pass
f.attr = 'This is also a test'
f.__wrapped__ = "This is a bald faced lie"
def wrapper(b:'This is the prior annotation'):
pass
functools.update_wrapper(wrapper, f)
return wrapper, f
def test_default_update(self):
wrapper, f = self._default_update()
self.check_wrapper(wrapper, f)
self.assertIs(wrapper.__wrapped__, f)
self.assertEqual(wrapper.__name__, 'f')
self.assertEqual(wrapper.__qualname__, f.__qualname__)
self.assertEqual(wrapper.attr, 'This is also a test')
self.assertEqual(wrapper.__annotations__['a'], 'This is a new annotation')
self.assertNotIn('b', wrapper.__annotations__)
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_default_update_doc(self):
wrapper, f = self._default_update()
self.assertEqual(wrapper.__doc__, 'This is a test')
def test_no_update(self):
def f():
"""This is a test"""
pass
f.attr = 'This is also a test'
def wrapper():
pass
functools.update_wrapper(wrapper, f, (), ())
self.check_wrapper(wrapper, f, (), ())
self.assertEqual(wrapper.__name__, 'wrapper')
self.assertNotEqual(wrapper.__qualname__, f.__qualname__)
self.assertEqual(wrapper.__doc__, None)
self.assertEqual(wrapper.__annotations__, {})
self.assertFalse(hasattr(wrapper, 'attr'))
def test_selective_update(self):
def f():
pass
f.attr = 'This is a different test'
f.dict_attr = dict(a=1, b=2, c=3)
def wrapper():
pass
wrapper.dict_attr = {}
assign = ('attr',)
update = ('dict_attr',)
functools.update_wrapper(wrapper, f, assign, update)
self.check_wrapper(wrapper, f, assign, update)
self.assertEqual(wrapper.__name__, 'wrapper')
self.assertNotEqual(wrapper.__qualname__, f.__qualname__)
self.assertEqual(wrapper.__doc__, None)
self.assertEqual(wrapper.attr, 'This is a different test')
self.assertEqual(wrapper.dict_attr, f.dict_attr)
def test_missing_attributes(self):
def f():
pass
def wrapper():
pass
wrapper.dict_attr = {}
assign = ('attr',)
update = ('dict_attr',)
# Missing attributes on wrapped object are ignored
functools.update_wrapper(wrapper, f, assign, update)
self.assertNotIn('attr', wrapper.__dict__)
self.assertEqual(wrapper.dict_attr, {})
# Wrapper must have expected attributes for updating
del wrapper.dict_attr
with self.assertRaises(AttributeError):
functools.update_wrapper(wrapper, f, assign, update)
wrapper.dict_attr = 1
with self.assertRaises(AttributeError):
functools.update_wrapper(wrapper, f, assign, update)
@support.requires_docstrings
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_builtin_update(self):
# Test for bug #1576241
def wrapper():
pass
functools.update_wrapper(wrapper, max)
self.assertEqual(wrapper.__name__, 'max')
self.assertTrue(wrapper.__doc__.startswith('max('))
self.assertEqual(wrapper.__annotations__, {})
class TestWraps(TestUpdateWrapper):
def _default_update(self):
def f():
"""This is a test"""
pass
f.attr = 'This is also a test'
f.__wrapped__ = "This is still a bald faced lie"
@functools.wraps(f)
def wrapper():
pass
return wrapper, f
def test_default_update(self):
wrapper, f = self._default_update()
self.check_wrapper(wrapper, f)
self.assertEqual(wrapper.__name__, 'f')
self.assertEqual(wrapper.__qualname__, f.__qualname__)
self.assertEqual(wrapper.attr, 'This is also a test')
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_default_update_doc(self):
wrapper, _ = self._default_update()
self.assertEqual(wrapper.__doc__, 'This is a test')
def test_no_update(self):
def f():
"""This is a test"""
pass
f.attr = 'This is also a test'
@functools.wraps(f, (), ())
def wrapper():
pass
self.check_wrapper(wrapper, f, (), ())
self.assertEqual(wrapper.__name__, 'wrapper')
self.assertNotEqual(wrapper.__qualname__, f.__qualname__)
self.assertEqual(wrapper.__doc__, None)
self.assertFalse(hasattr(wrapper, 'attr'))
def test_selective_update(self):
def f():
pass
f.attr = 'This is a different test'
f.dict_attr = dict(a=1, b=2, c=3)
def add_dict_attr(f):
f.dict_attr = {}
return f
assign = ('attr',)
update = ('dict_attr',)
@functools.wraps(f, assign, update)
@add_dict_attr
def wrapper():
pass
self.check_wrapper(wrapper, f, assign, update)
self.assertEqual(wrapper.__name__, 'wrapper')
self.assertNotEqual(wrapper.__qualname__, f.__qualname__)
self.assertEqual(wrapper.__doc__, None)
self.assertEqual(wrapper.attr, 'This is a different test')
self.assertEqual(wrapper.dict_attr, f.dict_attr)
@unittest.skipUnless(c_functools, 'requires the C _functools module')
class TestReduce(unittest.TestCase):
if c_functools:
func = c_functools.reduce
def test_reduce(self):
class Squares:
def __init__(self, max):
self.max = max
self.sofar = []
def __len__(self):
return len(self.sofar)
def __getitem__(self, i):
if not 0 <= i < self.max: raise IndexError
n = len(self.sofar)
while n <= i:
self.sofar.append(n*n)
n += 1
return self.sofar[i]
def add(x, y):
return x + y
self.assertEqual(self.func(add, ['a', 'b', 'c'], ''), 'abc')
self.assertEqual(
self.func(add, [['a', 'c'], [], ['d', 'w']], []),
['a','c','d','w']
)
self.assertEqual(self.func(lambda x, y: x*y, range(2,8), 1), 5040)
self.assertEqual(
self.func(lambda x, y: x*y, range(2,21), 1),
2432902008176640000
)
self.assertEqual(self.func(add, Squares(10)), 285)
self.assertEqual(self.func(add, Squares(10), 0), 285)
self.assertEqual(self.func(add, Squares(0), 0), 0)
self.assertRaises(TypeError, self.func)
self.assertRaises(TypeError, self.func, 42, 42)
self.assertRaises(TypeError, self.func, 42, 42, 42)
self.assertEqual(self.func(42, "1"), "1") # func is never called with one item
self.assertEqual(self.func(42, "", "1"), "1") # func is never called with one item
self.assertRaises(TypeError, self.func, 42, (42, 42))
self.assertRaises(TypeError, self.func, add, []) # arg 2 must not be empty sequence with no initial value
self.assertRaises(TypeError, self.func, add, "")
self.assertRaises(TypeError, self.func, add, ())
self.assertRaises(TypeError, self.func, add, object())
class TestFailingIter:
def __iter__(self):
raise RuntimeError
self.assertRaises(RuntimeError, self.func, add, TestFailingIter())
self.assertEqual(self.func(add, [], None), None)
self.assertEqual(self.func(add, [], 42), 42)
class BadSeq:
def __getitem__(self, index):
raise ValueError
self.assertRaises(ValueError, self.func, 42, BadSeq())
# Test reduce()'s use of iterators.
def test_iterator_usage(self):
class SequenceClass:
def __init__(self, n):
self.n = n
def __getitem__(self, i):
if 0 <= i < self.n:
return i
else:
raise IndexError
from operator import add
self.assertEqual(self.func(add, SequenceClass(5)), 10)
self.assertEqual(self.func(add, SequenceClass(5), 42), 52)
self.assertRaises(TypeError, self.func, add, SequenceClass(0))
self.assertEqual(self.func(add, SequenceClass(0), 42), 42)
self.assertEqual(self.func(add, SequenceClass(1)), 0)
self.assertEqual(self.func(add, SequenceClass(1), 42), 42)
d = {"one": 1, "two": 2, "three": 3}
self.assertEqual(self.func(add, d), "".join(d.keys()))
class TestCmpToKey:
def test_cmp_to_key(self):
def cmp1(x, y):
return (x > y) - (x < y)
key = self.cmp_to_key(cmp1)
self.assertEqual(key(3), key(3))
self.assertGreater(key(3), key(1))
self.assertGreaterEqual(key(3), key(3))
def cmp2(x, y):
return int(x) - int(y)
key = self.cmp_to_key(cmp2)
self.assertEqual(key(4.0), key('4'))
self.assertLess(key(2), key('35'))
self.assertLessEqual(key(2), key('35'))
self.assertNotEqual(key(2), key('35'))
def test_cmp_to_key_arguments(self):
def cmp1(x, y):
return (x > y) - (x < y)
key = self.cmp_to_key(mycmp=cmp1)
self.assertEqual(key(obj=3), key(obj=3))
self.assertGreater(key(obj=3), key(obj=1))
with self.assertRaises((TypeError, AttributeError)):
key(3) > 1 # rhs is not a K object
with self.assertRaises((TypeError, AttributeError)):
1 < key(3) # lhs is not a K object
with self.assertRaises(TypeError):
key = self.cmp_to_key() # too few args
with self.assertRaises(TypeError):
key = self.cmp_to_key(cmp1, None) # too many args
key = self.cmp_to_key(cmp1)
with self.assertRaises(TypeError):
key() # too few args
with self.assertRaises(TypeError):
key(None, None) # too many args
def test_bad_cmp(self):
def cmp1(x, y):
raise ZeroDivisionError
key = self.cmp_to_key(cmp1)
with self.assertRaises(ZeroDivisionError):
key(3) > key(1)
class BadCmp:
def __lt__(self, other):
raise ZeroDivisionError
def cmp1(x, y):
return BadCmp()
with self.assertRaises(ZeroDivisionError):
key(3) > key(1)
def test_obj_field(self):
def cmp1(x, y):
return (x > y) - (x < y)
key = self.cmp_to_key(mycmp=cmp1)
self.assertEqual(key(50).obj, 50)
def test_sort_int(self):
def mycmp(x, y):
return y - x
self.assertEqual(sorted(range(5), key=self.cmp_to_key(mycmp)),
[4, 3, 2, 1, 0])
def test_sort_int_str(self):
def mycmp(x, y):
x, y = int(x), int(y)
return (x > y) - (x < y)
values = [5, '3', 7, 2, '0', '1', 4, '10', 1]
values = sorted(values, key=self.cmp_to_key(mycmp))
self.assertEqual([int(value) for value in values],
[0, 1, 1, 2, 3, 4, 5, 7, 10])
def test_hash(self):
def mycmp(x, y):
return y - x
key = self.cmp_to_key(mycmp)
k = key(10)
self.assertRaises(TypeError, hash, k)
self.assertNotIsInstance(k, collections.Hashable)
@unittest.skipUnless(c_functools, 'requires the C _functools module')
class TestCmpToKeyC(TestCmpToKey, unittest.TestCase):
if c_functools:
cmp_to_key = c_functools.cmp_to_key
class TestCmpToKeyPy(TestCmpToKey, unittest.TestCase):
cmp_to_key = staticmethod(py_functools.cmp_to_key)
class TestTotalOrdering(unittest.TestCase):
def test_total_ordering_lt(self):
@functools.total_ordering
class A:
def __init__(self, value):
self.value = value
def __lt__(self, other):
return self.value < other.value
def __eq__(self, other):
return self.value == other.value
self.assertTrue(A(1) < A(2))
self.assertTrue(A(2) > A(1))
self.assertTrue(A(1) <= A(2))
self.assertTrue(A(2) >= A(1))
self.assertTrue(A(2) <= A(2))
self.assertTrue(A(2) >= A(2))
self.assertFalse(A(1) > A(2))
def test_total_ordering_le(self):
@functools.total_ordering
class A:
def __init__(self, value):
self.value = value
def __le__(self, other):
return self.value <= other.value
def __eq__(self, other):
return self.value == other.value
self.assertTrue(A(1) < A(2))
self.assertTrue(A(2) > A(1))
self.assertTrue(A(1) <= A(2))
self.assertTrue(A(2) >= A(1))
self.assertTrue(A(2) <= A(2))
self.assertTrue(A(2) >= A(2))
self.assertFalse(A(1) >= A(2))
def test_total_ordering_gt(self):
@functools.total_ordering
class A:
def __init__(self, value):
self.value = value
def __gt__(self, other):
return self.value > other.value
def __eq__(self, other):
return self.value == other.value
self.assertTrue(A(1) < A(2))
self.assertTrue(A(2) > A(1))
self.assertTrue(A(1) <= A(2))
self.assertTrue(A(2) >= A(1))
self.assertTrue(A(2) <= A(2))
self.assertTrue(A(2) >= A(2))
self.assertFalse(A(2) < A(1))
def test_total_ordering_ge(self):
@functools.total_ordering
class A:
def __init__(self, value):
self.value = value
def __ge__(self, other):
return self.value >= other.value
def __eq__(self, other):
return self.value == other.value
self.assertTrue(A(1) < A(2))
self.assertTrue(A(2) > A(1))
self.assertTrue(A(1) <= A(2))
self.assertTrue(A(2) >= A(1))
self.assertTrue(A(2) <= A(2))
self.assertTrue(A(2) >= A(2))
self.assertFalse(A(2) <= A(1))
def test_total_ordering_no_overwrite(self):
# new methods should not overwrite existing
@functools.total_ordering
class A(int):
pass
self.assertTrue(A(1) < A(2))
self.assertTrue(A(2) > A(1))
self.assertTrue(A(1) <= A(2))
self.assertTrue(A(2) >= A(1))
self.assertTrue(A(2) <= A(2))
self.assertTrue(A(2) >= A(2))
def test_no_operations_defined(self):
with self.assertRaises(ValueError):
@functools.total_ordering
class A:
pass
def test_type_error_when_not_implemented(self):
# bug 10042; ensure stack overflow does not occur
# when decorated types return NotImplemented
@functools.total_ordering
class ImplementsLessThan:
def __init__(self, value):
self.value = value
def __eq__(self, other):
if isinstance(other, ImplementsLessThan):
return self.value == other.value
return False
def __lt__(self, other):
if isinstance(other, ImplementsLessThan):
return self.value < other.value
return NotImplemented
@functools.total_ordering
class ImplementsGreaterThan:
def __init__(self, value):
self.value = value
def __eq__(self, other):
if isinstance(other, ImplementsGreaterThan):
return self.value == other.value
return False
def __gt__(self, other):
if isinstance(other, ImplementsGreaterThan):
return self.value > other.value
return NotImplemented
@functools.total_ordering
class ImplementsLessThanEqualTo:
def __init__(self, value):
self.value = value
def __eq__(self, other):
if isinstance(other, ImplementsLessThanEqualTo):
return self.value == other.value
return False
def __le__(self, other):
if isinstance(other, ImplementsLessThanEqualTo):
return self.value <= other.value
return NotImplemented
@functools.total_ordering
class ImplementsGreaterThanEqualTo:
def __init__(self, value):
self.value = value
def __eq__(self, other):
if isinstance(other, ImplementsGreaterThanEqualTo):
return self.value == other.value
return False
def __ge__(self, other):
if isinstance(other, ImplementsGreaterThanEqualTo):
return self.value >= other.value
return NotImplemented
@functools.total_ordering
class ComparatorNotImplemented:
def __init__(self, value):
self.value = value
def __eq__(self, other):
if isinstance(other, ComparatorNotImplemented):
return self.value == other.value
return False
def __lt__(self, other):
return NotImplemented
with self.subTest("LT < 1"), self.assertRaises(TypeError):
ImplementsLessThan(-1) < 1
with self.subTest("LT < LE"), self.assertRaises(TypeError):
ImplementsLessThan(0) < ImplementsLessThanEqualTo(0)
with self.subTest("LT < GT"), self.assertRaises(TypeError):
ImplementsLessThan(1) < ImplementsGreaterThan(1)
with self.subTest("LE <= LT"), self.assertRaises(TypeError):
ImplementsLessThanEqualTo(2) <= ImplementsLessThan(2)
with self.subTest("LE <= GE"), self.assertRaises(TypeError):
ImplementsLessThanEqualTo(3) <= ImplementsGreaterThanEqualTo(3)
with self.subTest("GT > GE"), self.assertRaises(TypeError):
ImplementsGreaterThan(4) > ImplementsGreaterThanEqualTo(4)
with self.subTest("GT > LT"), self.assertRaises(TypeError):
ImplementsGreaterThan(5) > ImplementsLessThan(5)
with self.subTest("GE >= GT"), self.assertRaises(TypeError):
ImplementsGreaterThanEqualTo(6) >= ImplementsGreaterThan(6)
with self.subTest("GE >= LE"), self.assertRaises(TypeError):
ImplementsGreaterThanEqualTo(7) >= ImplementsLessThanEqualTo(7)
with self.subTest("GE when equal"):
a = ComparatorNotImplemented(8)
b = ComparatorNotImplemented(8)
self.assertEqual(a, b)
with self.assertRaises(TypeError):
a >= b
with self.subTest("LE when equal"):
a = ComparatorNotImplemented(9)
b = ComparatorNotImplemented(9)
self.assertEqual(a, b)
with self.assertRaises(TypeError):
a <= b
def test_pickle(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
for name in '__lt__', '__gt__', '__le__', '__ge__':
with self.subTest(method=name, proto=proto):
method = getattr(Orderable_LT, name)
method_copy = pickle.loads(pickle.dumps(method, proto))
self.assertIs(method_copy, method)
@functools.total_ordering
class Orderable_LT:
def __init__(self, value):
self.value = value
def __lt__(self, other):
return self.value < other.value
def __eq__(self, other):
return self.value == other.value
class TestLRU:
def test_lru(self):
def orig(x, y):
return 3 * x + y
f = self.module.lru_cache(maxsize=20)(orig)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(maxsize, 20)
self.assertEqual(currsize, 0)
self.assertEqual(hits, 0)
self.assertEqual(misses, 0)
domain = range(5)
for i in range(1000):
x, y = choice(domain), choice(domain)
actual = f(x, y)
expected = orig(x, y)
self.assertEqual(actual, expected)
hits, misses, maxsize, currsize = f.cache_info()
self.assertTrue(hits > misses)
self.assertEqual(hits + misses, 1000)
self.assertEqual(currsize, 20)
f.cache_clear() # test clearing
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(hits, 0)
self.assertEqual(misses, 0)
self.assertEqual(currsize, 0)
f(x, y)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(hits, 0)
self.assertEqual(misses, 1)
self.assertEqual(currsize, 1)
# Test bypassing the cache
self.assertIs(f.__wrapped__, orig)
f.__wrapped__(x, y)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(hits, 0)
self.assertEqual(misses, 1)
self.assertEqual(currsize, 1)
# test size zero (which means "never-cache")
@self.module.lru_cache(0)
def f():
nonlocal f_cnt
f_cnt += 1
return 20
self.assertEqual(f.cache_info().maxsize, 0)
f_cnt = 0
for i in range(5):
self.assertEqual(f(), 20)
self.assertEqual(f_cnt, 5)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(hits, 0)
self.assertEqual(misses, 5)
self.assertEqual(currsize, 0)
# test size one
@self.module.lru_cache(1)
def f():
nonlocal f_cnt
f_cnt += 1
return 20
self.assertEqual(f.cache_info().maxsize, 1)
f_cnt = 0
for i in range(5):
self.assertEqual(f(), 20)
self.assertEqual(f_cnt, 1)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(hits, 4)
self.assertEqual(misses, 1)
self.assertEqual(currsize, 1)
# test size two
@self.module.lru_cache(2)
def f(x):
nonlocal f_cnt
f_cnt += 1
return x*10
self.assertEqual(f.cache_info().maxsize, 2)
f_cnt = 0
for x in 7, 9, 7, 9, 7, 9, 8, 8, 8, 9, 9, 9, 8, 8, 8, 7:
# * * * *
self.assertEqual(f(x), x*10)
self.assertEqual(f_cnt, 4)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(hits, 12)
self.assertEqual(misses, 4)
self.assertEqual(currsize, 2)
def test_lru_reentrancy_with_len(self):
# Test to make sure the LRU cache code isn't thrown-off by
# caching the built-in len() function. Since len() can be
# cached, we shouldn't use it inside the lru code itself.
old_len = builtins.len
try:
builtins.len = self.module.lru_cache(4)(len)
for i in [0, 0, 1, 2, 3, 3, 4, 5, 6, 1, 7, 2, 1]:
self.assertEqual(len('abcdefghijklmn'[:i]), i)
finally:
builtins.len = old_len
def test_lru_type_error(self):
# Regression test for issue #28653.
# lru_cache was leaking when one of the arguments
# wasn't cacheable.
@functools.lru_cache(maxsize=None)
def infinite_cache(o):
pass
@functools.lru_cache(maxsize=10)
def limited_cache(o):
pass
with self.assertRaises(TypeError):
infinite_cache([])
with self.assertRaises(TypeError):
limited_cache([])
def test_lru_with_maxsize_none(self):
@self.module.lru_cache(maxsize=None)
def fib(n):
if n < 2:
return n
return fib(n-1) + fib(n-2)
self.assertEqual([fib(n) for n in range(16)],
[0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610])
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=28, misses=16, maxsize=None, currsize=16))
fib.cache_clear()
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=0, misses=0, maxsize=None, currsize=0))
def test_lru_with_maxsize_negative(self):
@self.module.lru_cache(maxsize=-10)
def eq(n):
return n
for i in (0, 1):
self.assertEqual([eq(n) for n in range(150)], list(range(150)))
self.assertEqual(eq.cache_info(),
self.module._CacheInfo(hits=0, misses=300, maxsize=-10, currsize=1))
def test_lru_with_exceptions(self):
# Verify that user_function exceptions get passed through without
# creating a hard-to-read chained exception.
# http://bugs.python.org/issue13177
for maxsize in (None, 128):
@self.module.lru_cache(maxsize)
def func(i):
return 'abc'[i]
self.assertEqual(func(0), 'a')
with self.assertRaises(IndexError) as cm:
func(15)
self.assertIsNone(cm.exception.__context__)
# Verify that the previous exception did not result in a cached entry
with self.assertRaises(IndexError):
func(15)
def test_lru_with_types(self):
for maxsize in (None, 128):
@self.module.lru_cache(maxsize=maxsize, typed=True)
def square(x):
return x * x
self.assertEqual(square(3), 9)
self.assertEqual(type(square(3)), type(9))
self.assertEqual(square(3.0), 9.0)
self.assertEqual(type(square(3.0)), type(9.0))
self.assertEqual(square(x=3), 9)
self.assertEqual(type(square(x=3)), type(9))
self.assertEqual(square(x=3.0), 9.0)
self.assertEqual(type(square(x=3.0)), type(9.0))
self.assertEqual(square.cache_info().hits, 4)
self.assertEqual(square.cache_info().misses, 4)
def test_lru_with_keyword_args(self):
@self.module.lru_cache()
def fib(n):
if n < 2:
return n
return fib(n=n-1) + fib(n=n-2)
self.assertEqual(
[fib(n=number) for number in range(16)],
[0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610]
)
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=28, misses=16, maxsize=128, currsize=16))
fib.cache_clear()
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=0, misses=0, maxsize=128, currsize=0))
def test_lru_with_keyword_args_maxsize_none(self):
@self.module.lru_cache(maxsize=None)
def fib(n):
if n < 2:
return n
return fib(n=n-1) + fib(n=n-2)
self.assertEqual([fib(n=number) for number in range(16)],
[0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610])
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=28, misses=16, maxsize=None, currsize=16))
fib.cache_clear()
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=0, misses=0, maxsize=None, currsize=0))
def test_kwargs_order(self):
# PEP 468: Preserving Keyword Argument Order
@self.module.lru_cache(maxsize=10)
def f(**kwargs):
return list(kwargs.items())
self.assertEqual(f(a=1, b=2), [('a', 1), ('b', 2)])
self.assertEqual(f(b=2, a=1), [('b', 2), ('a', 1)])
self.assertEqual(f.cache_info(),
self.module._CacheInfo(hits=0, misses=2, maxsize=10, currsize=2))
def test_lru_cache_decoration(self):
def f(zomg: 'zomg_annotation'):
"""f doc string"""
return 42
g = self.module.lru_cache()(f)
for attr in self.module.WRAPPER_ASSIGNMENTS:
self.assertEqual(getattr(g, attr), getattr(f, attr))
@unittest.skipUnless(threading, 'This test requires threading.')
def test_lru_cache_threaded(self):
n, m = 5, 11
def orig(x, y):
return 3 * x + y
f = self.module.lru_cache(maxsize=n*m)(orig)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(currsize, 0)
start = threading.Event()
def full(k):
start.wait(10)
for _ in range(m):
self.assertEqual(f(k, 0), orig(k, 0))
def clear():
start.wait(10)
for _ in range(2*m):
f.cache_clear()
orig_si = sys.getswitchinterval()
support.setswitchinterval(1e-6)
try:
# create n threads in order to fill cache
threads = [threading.Thread(target=full, args=[k])
for k in range(n)]
with support.start_threads(threads):
start.set()
hits, misses, maxsize, currsize = f.cache_info()
if self.module is py_functools:
# XXX: Why can be not equal?
self.assertLessEqual(misses, n)
self.assertLessEqual(hits, m*n - misses)
else:
self.assertEqual(misses, n)
self.assertEqual(hits, m*n - misses)
self.assertEqual(currsize, n)
# create n threads in order to fill cache and 1 to clear it
threads = [threading.Thread(target=clear)]
threads += [threading.Thread(target=full, args=[k])
for k in range(n)]
start.clear()
with support.start_threads(threads):
start.set()
finally:
sys.setswitchinterval(orig_si)
@unittest.skipUnless(threading, 'This test requires threading.')
def test_lru_cache_threaded2(self):
# Simultaneous call with the same arguments
n, m = 5, 7
start = threading.Barrier(n+1)
pause = threading.Barrier(n+1)
stop = threading.Barrier(n+1)
@self.module.lru_cache(maxsize=m*n)
def f(x):
pause.wait(10)
return 3 * x
self.assertEqual(f.cache_info(), (0, 0, m*n, 0))
def test():
for i in range(m):
start.wait(10)
self.assertEqual(f(i), 3 * i)
stop.wait(10)
threads = [threading.Thread(target=test) for k in range(n)]
with support.start_threads(threads):
for i in range(m):
start.wait(10)
stop.reset()
pause.wait(10)
start.reset()
stop.wait(10)
pause.reset()
self.assertEqual(f.cache_info(), (0, (i+1)*n, m*n, i+1))
@unittest.skipUnless(threading, 'This test requires threading.')
def test_lru_cache_threaded3(self):
@self.module.lru_cache(maxsize=2)
def f(x):
time.sleep(.01)
return 3 * x
def test(i, x):
with self.subTest(thread=i):
self.assertEqual(f(x), 3 * x, i)
threads = [threading.Thread(target=test, args=(i, v))
for i, v in enumerate([1, 2, 2, 3, 2])]
with support.start_threads(threads):
pass
def test_need_for_rlock(self):
# This will deadlock on an LRU cache that uses a regular lock
@self.module.lru_cache(maxsize=10)
def test_func(x):
'Used to demonstrate a reentrant lru_cache call within a single thread'
return x
class DoubleEq:
'Demonstrate a reentrant lru_cache call within a single thread'
def __init__(self, x):
self.x = x
def __hash__(self):
return self.x
def __eq__(self, other):
if self.x == 2:
test_func(DoubleEq(1))
return self.x == other.x
test_func(DoubleEq(1)) # Load the cache
test_func(DoubleEq(2)) # Load the cache
self.assertEqual(test_func(DoubleEq(2)), # Trigger a re-entrant __eq__ call
DoubleEq(2)) # Verify the correct return value
def test_early_detection_of_bad_call(self):
# Issue #22184
with self.assertRaises(TypeError):
@functools.lru_cache
def f():
pass
def test_lru_method(self):
class X(int):
f_cnt = 0
@self.module.lru_cache(2)
def f(self, x):
self.f_cnt += 1
return x*10+self
a = X(5)
b = X(5)
c = X(7)
self.assertEqual(X.f.cache_info(), (0, 0, 2, 0))
for x in 1, 2, 2, 3, 1, 1, 1, 2, 3, 3:
self.assertEqual(a.f(x), x*10 + 5)
self.assertEqual((a.f_cnt, b.f_cnt, c.f_cnt), (6, 0, 0))
self.assertEqual(X.f.cache_info(), (4, 6, 2, 2))
for x in 1, 2, 1, 1, 1, 1, 3, 2, 2, 2:
self.assertEqual(b.f(x), x*10 + 5)
self.assertEqual((a.f_cnt, b.f_cnt, c.f_cnt), (6, 4, 0))
self.assertEqual(X.f.cache_info(), (10, 10, 2, 2))
for x in 2, 1, 1, 1, 1, 2, 1, 3, 2, 1:
self.assertEqual(c.f(x), x*10 + 7)
self.assertEqual((a.f_cnt, b.f_cnt, c.f_cnt), (6, 4, 5))
self.assertEqual(X.f.cache_info(), (15, 15, 2, 2))
self.assertEqual(a.f.cache_info(), X.f.cache_info())
self.assertEqual(b.f.cache_info(), X.f.cache_info())
self.assertEqual(c.f.cache_info(), X.f.cache_info())
def test_pickle(self):
cls = self.__class__
for f in cls.cached_func[0], cls.cached_meth, cls.cached_staticmeth:
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
with self.subTest(proto=proto, func=f):
f_copy = pickle.loads(pickle.dumps(f, proto))
self.assertIs(f_copy, f)
def test_copy(self):
cls = self.__class__
def orig(x, y):
return 3 * x + y
part = self.module.partial(orig, 2)
funcs = (cls.cached_func[0], cls.cached_meth, cls.cached_staticmeth,
self.module.lru_cache(2)(part))
for f in funcs:
with self.subTest(func=f):
f_copy = copy.copy(f)
self.assertIs(f_copy, f)
def test_deepcopy(self):
cls = self.__class__
def orig(x, y):
return 3 * x + y
part = self.module.partial(orig, 2)
funcs = (cls.cached_func[0], cls.cached_meth, cls.cached_staticmeth,
self.module.lru_cache(2)(part))
for f in funcs:
with self.subTest(func=f):
f_copy = copy.deepcopy(f)
self.assertIs(f_copy, f)
@py_functools.lru_cache()
def py_cached_func(x, y):
return 3 * x + y
@c_functools.lru_cache()
def c_cached_func(x, y):
return 3 * x + y
class TestLRUPy(TestLRU, unittest.TestCase):
module = py_functools
cached_func = py_cached_func,
@module.lru_cache()
def cached_meth(self, x, y):
return 3 * x + y
@staticmethod
@module.lru_cache()
def cached_staticmeth(x, y):
return 3 * x + y
class TestLRUC(TestLRU, unittest.TestCase):
module = c_functools
cached_func = c_cached_func,
@module.lru_cache()
def cached_meth(self, x, y):
return 3 * x + y
@staticmethod
@module.lru_cache()
def cached_staticmeth(x, y):
return 3 * x + y
class TestSingleDispatch(unittest.TestCase):
def test_simple_overloads(self):
@functools.singledispatch
def g(obj):
return "base"
def g_int(i):
return "integer"
g.register(int, g_int)
self.assertEqual(g("str"), "base")
self.assertEqual(g(1), "integer")
self.assertEqual(g([1,2,3]), "base")
def test_mro(self):
@functools.singledispatch
def g(obj):
return "base"
class A:
pass
class C(A):
pass
class B(A):
pass
class D(C, B):
pass
def g_A(a):
return "A"
def g_B(b):
return "B"
g.register(A, g_A)
g.register(B, g_B)
self.assertEqual(g(A()), "A")
self.assertEqual(g(B()), "B")
self.assertEqual(g(C()), "A")
self.assertEqual(g(D()), "B")
def test_register_decorator(self):
@functools.singledispatch
def g(obj):
return "base"
@g.register(int)
def g_int(i):
return "int %s" % (i,)
self.assertEqual(g(""), "base")
self.assertEqual(g(12), "int 12")
self.assertIs(g.dispatch(int), g_int)
self.assertIs(g.dispatch(object), g.dispatch(str))
# Note: in the assert above this is not g.
# @singledispatch returns the wrapper.
def test_wrapping_attributes(self):
@functools.singledispatch
def g(obj):
"Simple test"
return "Test"
self.assertEqual(g.__name__, "g")
if sys.flags.optimize < 2:
self.assertEqual(g.__doc__, "Simple test")
@unittest.skipUnless(decimal, 'requires _decimal')
@support.cpython_only
def test_c_classes(self):
@functools.singledispatch
def g(obj):
return "base"
@g.register(decimal.DecimalException)
def _(obj):
return obj.args
subn = decimal.Subnormal("Exponent < Emin")
rnd = decimal.Rounded("Number got rounded")
self.assertEqual(g(subn), ("Exponent < Emin",))
self.assertEqual(g(rnd), ("Number got rounded",))
@g.register(decimal.Subnormal)
def _(obj):
return "Too small to care."
self.assertEqual(g(subn), "Too small to care.")
self.assertEqual(g(rnd), ("Number got rounded",))
def test_compose_mro(self):
# None of the examples in this test depend on haystack ordering.
c = collections
mro = functools._compose_mro
bases = [c.Sequence, c.MutableMapping, c.Mapping, c.Set]
for haystack in permutations(bases):
m = mro(dict, haystack)
self.assertEqual(m, [dict, c.MutableMapping, c.Mapping,
c.Collection, c.Sized, c.Iterable,
c.Container, object])
bases = [c.Container, c.Mapping, c.MutableMapping, c.OrderedDict]
for haystack in permutations(bases):
m = mro(c.ChainMap, haystack)
self.assertEqual(m, [c.ChainMap, c.MutableMapping, c.Mapping,
c.Collection, c.Sized, c.Iterable,
c.Container, object])
# If there's a generic function with implementations registered for
# both Sized and Container, passing a defaultdict to it results in an
# ambiguous dispatch which will cause a RuntimeError (see
# test_mro_conflicts).
bases = [c.Container, c.Sized, str]
for haystack in permutations(bases):
m = mro(c.defaultdict, [c.Sized, c.Container, str])
self.assertEqual(m, [c.defaultdict, dict, c.Sized, c.Container,
object])
# MutableSequence below is registered directly on D. In other words, it
# precedes MutableMapping which means single dispatch will always
# choose MutableSequence here.
class D(c.defaultdict):
pass
c.MutableSequence.register(D)
bases = [c.MutableSequence, c.MutableMapping]
for haystack in permutations(bases):
m = mro(D, bases)
self.assertEqual(m, [D, c.MutableSequence, c.Sequence, c.Reversible,
c.defaultdict, dict, c.MutableMapping, c.Mapping,
c.Collection, c.Sized, c.Iterable, c.Container,
object])
# Container and Callable are registered on different base classes and
# a generic function supporting both should always pick the Callable
# implementation if a C instance is passed.
class C(c.defaultdict):
def __call__(self):
pass
bases = [c.Sized, c.Callable, c.Container, c.Mapping]
for haystack in permutations(bases):
m = mro(C, haystack)
self.assertEqual(m, [C, c.Callable, c.defaultdict, dict, c.Mapping,
c.Collection, c.Sized, c.Iterable,
c.Container, object])
def test_register_abc(self):
c = collections
d = {"a": "b"}
l = [1, 2, 3]
s = {object(), None}
f = frozenset(s)
t = (1, 2, 3)
@functools.singledispatch
def g(obj):
return "base"
self.assertEqual(g(d), "base")
self.assertEqual(g(l), "base")
self.assertEqual(g(s), "base")
self.assertEqual(g(f), "base")
self.assertEqual(g(t), "base")
g.register(c.Sized, lambda obj: "sized")
self.assertEqual(g(d), "sized")
self.assertEqual(g(l), "sized")
self.assertEqual(g(s), "sized")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sized")
g.register(c.MutableMapping, lambda obj: "mutablemapping")
self.assertEqual(g(d), "mutablemapping")
self.assertEqual(g(l), "sized")
self.assertEqual(g(s), "sized")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sized")
g.register(c.ChainMap, lambda obj: "chainmap")
self.assertEqual(g(d), "mutablemapping") # irrelevant ABCs registered
self.assertEqual(g(l), "sized")
self.assertEqual(g(s), "sized")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sized")
g.register(c.MutableSequence, lambda obj: "mutablesequence")
self.assertEqual(g(d), "mutablemapping")
self.assertEqual(g(l), "mutablesequence")
self.assertEqual(g(s), "sized")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sized")
g.register(c.MutableSet, lambda obj: "mutableset")
self.assertEqual(g(d), "mutablemapping")
self.assertEqual(g(l), "mutablesequence")
self.assertEqual(g(s), "mutableset")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sized")
g.register(c.Mapping, lambda obj: "mapping")
self.assertEqual(g(d), "mutablemapping") # not specific enough
self.assertEqual(g(l), "mutablesequence")
self.assertEqual(g(s), "mutableset")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sized")
g.register(c.Sequence, lambda obj: "sequence")
self.assertEqual(g(d), "mutablemapping")
self.assertEqual(g(l), "mutablesequence")
self.assertEqual(g(s), "mutableset")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sequence")
g.register(c.Set, lambda obj: "set")
self.assertEqual(g(d), "mutablemapping")
self.assertEqual(g(l), "mutablesequence")
self.assertEqual(g(s), "mutableset")
self.assertEqual(g(f), "set")
self.assertEqual(g(t), "sequence")
g.register(dict, lambda obj: "dict")
self.assertEqual(g(d), "dict")
self.assertEqual(g(l), "mutablesequence")
self.assertEqual(g(s), "mutableset")
self.assertEqual(g(f), "set")
self.assertEqual(g(t), "sequence")
g.register(list, lambda obj: "list")
self.assertEqual(g(d), "dict")
self.assertEqual(g(l), "list")
self.assertEqual(g(s), "mutableset")
self.assertEqual(g(f), "set")
self.assertEqual(g(t), "sequence")
g.register(set, lambda obj: "concrete-set")
self.assertEqual(g(d), "dict")
self.assertEqual(g(l), "list")
self.assertEqual(g(s), "concrete-set")
self.assertEqual(g(f), "set")
self.assertEqual(g(t), "sequence")
g.register(frozenset, lambda obj: "frozen-set")
self.assertEqual(g(d), "dict")
self.assertEqual(g(l), "list")
self.assertEqual(g(s), "concrete-set")
self.assertEqual(g(f), "frozen-set")
self.assertEqual(g(t), "sequence")
g.register(tuple, lambda obj: "tuple")
self.assertEqual(g(d), "dict")
self.assertEqual(g(l), "list")
self.assertEqual(g(s), "concrete-set")
self.assertEqual(g(f), "frozen-set")
self.assertEqual(g(t), "tuple")
def test_c3_abc(self):
c = collections
mro = functools._c3_mro
class A(object):
pass
class B(A):
def __len__(self):
return 0 # implies Sized
@c.Container.register
class C(object):
pass
class D(object):
pass # unrelated
class X(D, C, B):
def __call__(self):
pass # implies Callable
expected = [X, c.Callable, D, C, c.Container, B, c.Sized, A, object]
for abcs in permutations([c.Sized, c.Callable, c.Container]):
self.assertEqual(mro(X, abcs=abcs), expected)
# unrelated ABCs don't appear in the resulting MRO
many_abcs = [c.Mapping, c.Sized, c.Callable, c.Container, c.Iterable]
self.assertEqual(mro(X, abcs=many_abcs), expected)
def test_false_meta(self):
# see issue23572
class MetaA(type):
def __len__(self):
return 0
class A(metaclass=MetaA):
pass
class AA(A):
pass
@functools.singledispatch
def fun(a):
return 'base A'
@fun.register(A)
def _(a):
return 'fun A'
aa = AA()
self.assertEqual(fun(aa), 'fun A')
def test_mro_conflicts(self):
c = collections
@functools.singledispatch
def g(arg):
return "base"
class O(c.Sized):
def __len__(self):
return 0
o = O()
self.assertEqual(g(o), "base")
g.register(c.Iterable, lambda arg: "iterable")
g.register(c.Container, lambda arg: "container")
g.register(c.Sized, lambda arg: "sized")
g.register(c.Set, lambda arg: "set")
self.assertEqual(g(o), "sized")
c.Iterable.register(O)
self.assertEqual(g(o), "sized") # because it's explicitly in __mro__
c.Container.register(O)
self.assertEqual(g(o), "sized") # see above: Sized is in __mro__
c.Set.register(O)
self.assertEqual(g(o), "set") # because c.Set is a subclass of
# c.Sized and c.Container
class P:
pass
p = P()
self.assertEqual(g(p), "base")
c.Iterable.register(P)
self.assertEqual(g(p), "iterable")
c.Container.register(P)
with self.assertRaises(RuntimeError) as re_one:
g(p)
self.assertIn(
str(re_one.exception),
(("Ambiguous dispatch: <class 'collections.abc.Container'> "
"or <class 'collections.abc.Iterable'>"),
("Ambiguous dispatch: <class 'collections.abc.Iterable'> "
"or <class 'collections.abc.Container'>")),
)
class Q(c.Sized):
def __len__(self):
return 0
q = Q()
self.assertEqual(g(q), "sized")
c.Iterable.register(Q)
self.assertEqual(g(q), "sized") # because it's explicitly in __mro__
c.Set.register(Q)
self.assertEqual(g(q), "set") # because c.Set is a subclass of
# c.Sized and c.Iterable
@functools.singledispatch
def h(arg):
return "base"
@h.register(c.Sized)
def _(arg):
return "sized"
@h.register(c.Container)
def _(arg):
return "container"
# Even though Sized and Container are explicit bases of MutableMapping,
# this ABC is implicitly registered on defaultdict which makes all of
# MutableMapping's bases implicit as well from defaultdict's
# perspective.
with self.assertRaises(RuntimeError) as re_two:
h(c.defaultdict(lambda: 0))
self.assertIn(
str(re_two.exception),
(("Ambiguous dispatch: <class 'collections.abc.Container'> "
"or <class 'collections.abc.Sized'>"),
("Ambiguous dispatch: <class 'collections.abc.Sized'> "
"or <class 'collections.abc.Container'>")),
)
class R(c.defaultdict):
pass
c.MutableSequence.register(R)
@functools.singledispatch
def i(arg):
return "base"
@i.register(c.MutableMapping)
def _(arg):
return "mapping"
@i.register(c.MutableSequence)
def _(arg):
return "sequence"
r = R()
self.assertEqual(i(r), "sequence")
class S:
pass
class T(S, c.Sized):
def __len__(self):
return 0
t = T()
self.assertEqual(h(t), "sized")
c.Container.register(T)
self.assertEqual(h(t), "sized") # because it's explicitly in the MRO
class U:
def __len__(self):
return 0
u = U()
self.assertEqual(h(u), "sized") # implicit Sized subclass inferred
# from the existence of __len__()
c.Container.register(U)
# There is no preference for registered versus inferred ABCs.
with self.assertRaises(RuntimeError) as re_three:
h(u)
self.assertIn(
str(re_three.exception),
(("Ambiguous dispatch: <class 'collections.abc.Container'> "
"or <class 'collections.abc.Sized'>"),
("Ambiguous dispatch: <class 'collections.abc.Sized'> "
"or <class 'collections.abc.Container'>")),
)
class V(c.Sized, S):
def __len__(self):
return 0
@functools.singledispatch
def j(arg):
return "base"
@j.register(S)
def _(arg):
return "s"
@j.register(c.Container)
def _(arg):
return "container"
v = V()
self.assertEqual(j(v), "s")
c.Container.register(V)
self.assertEqual(j(v), "container") # because it ends up right after
# Sized in the MRO
def test_cache_invalidation(self):
from collections import UserDict
class TracingDict(UserDict):
def __init__(self, *args, **kwargs):
super(TracingDict, self).__init__(*args, **kwargs)
self.set_ops = []
self.get_ops = []
def __getitem__(self, key):
result = self.data[key]
self.get_ops.append(key)
return result
def __setitem__(self, key, value):
self.set_ops.append(key)
self.data[key] = value
def clear(self):
self.data.clear()
_orig_wkd = functools.WeakKeyDictionary
td = TracingDict()
functools.WeakKeyDictionary = lambda: td
c = collections
@functools.singledispatch
def g(arg):
return "base"
d = {}
l = []
self.assertEqual(len(td), 0)
self.assertEqual(g(d), "base")
self.assertEqual(len(td), 1)
self.assertEqual(td.get_ops, [])
self.assertEqual(td.set_ops, [dict])
self.assertEqual(td.data[dict], g.registry[object])
self.assertEqual(g(l), "base")
self.assertEqual(len(td), 2)
self.assertEqual(td.get_ops, [])
self.assertEqual(td.set_ops, [dict, list])
self.assertEqual(td.data[dict], g.registry[object])
self.assertEqual(td.data[list], g.registry[object])
self.assertEqual(td.data[dict], td.data[list])
self.assertEqual(g(l), "base")
self.assertEqual(g(d), "base")
self.assertEqual(td.get_ops, [list, dict])
self.assertEqual(td.set_ops, [dict, list])
g.register(list, lambda arg: "list")
self.assertEqual(td.get_ops, [list, dict])
self.assertEqual(len(td), 0)
self.assertEqual(g(d), "base")
self.assertEqual(len(td), 1)
self.assertEqual(td.get_ops, [list, dict])
self.assertEqual(td.set_ops, [dict, list, dict])
self.assertEqual(td.data[dict],
functools._find_impl(dict, g.registry))
self.assertEqual(g(l), "list")
self.assertEqual(len(td), 2)
self.assertEqual(td.get_ops, [list, dict])
self.assertEqual(td.set_ops, [dict, list, dict, list])
self.assertEqual(td.data[list],
functools._find_impl(list, g.registry))
class X:
pass
c.MutableMapping.register(X) # Will not invalidate the cache,
# not using ABCs yet.
self.assertEqual(g(d), "base")
self.assertEqual(g(l), "list")
self.assertEqual(td.get_ops, [list, dict, dict, list])
self.assertEqual(td.set_ops, [dict, list, dict, list])
g.register(c.Sized, lambda arg: "sized")
self.assertEqual(len(td), 0)
self.assertEqual(g(d), "sized")
self.assertEqual(len(td), 1)
self.assertEqual(td.get_ops, [list, dict, dict, list])
self.assertEqual(td.set_ops, [dict, list, dict, list, dict])
self.assertEqual(g(l), "list")
self.assertEqual(len(td), 2)
self.assertEqual(td.get_ops, [list, dict, dict, list])
self.assertEqual(td.set_ops, [dict, list, dict, list, dict, list])
self.assertEqual(g(l), "list")
self.assertEqual(g(d), "sized")
self.assertEqual(td.get_ops, [list, dict, dict, list, list, dict])
self.assertEqual(td.set_ops, [dict, list, dict, list, dict, list])
g.dispatch(list)
g.dispatch(dict)
self.assertEqual(td.get_ops, [list, dict, dict, list, list, dict,
list, dict])
self.assertEqual(td.set_ops, [dict, list, dict, list, dict, list])
c.MutableSet.register(X) # Will invalidate the cache.
self.assertEqual(len(td), 2) # Stale cache.
self.assertEqual(g(l), "list")
self.assertEqual(len(td), 1)
g.register(c.MutableMapping, lambda arg: "mutablemapping")
self.assertEqual(len(td), 0)
self.assertEqual(g(d), "mutablemapping")
self.assertEqual(len(td), 1)
self.assertEqual(g(l), "list")
self.assertEqual(len(td), 2)
g.register(dict, lambda arg: "dict")
self.assertEqual(g(d), "dict")
self.assertEqual(g(l), "list")
g._clear_cache()
self.assertEqual(len(td), 0)
functools.WeakKeyDictionary = _orig_wkd
def test_invalid_positional_argument(self):
@functools.singledispatch
def f(*args):
pass
msg = 'f requires at least 1 positional argument'
with self.assertRaisesRegex(TypeError, msg):
f()
if __name__ == '__main__':
unittest.main()
|
main_window.py
|
#!/usr/bin/env python3
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2012 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys, time, threading
import os, json, traceback
import shutil
import csv
from decimal import Decimal as PyDecimal # Qt 5.12 also exports Decimal
import base64
from functools import partial
from collections import OrderedDict
from typing import List
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
from electroncash import keystore, get_config
from electroncash.address import Address, ScriptOutput
from electroncash.bitcoin import COIN, TYPE_ADDRESS, TYPE_SCRIPT, MIN_AMOUNT
from electroncash import networks
from electroncash.plugins import run_hook
from electroncash.i18n import _, ngettext
from electroncash.util import (format_time, format_satoshis, PrintError,
format_satoshis_plain, NotEnoughFunds,
ExcessiveFee, UserCancelled, InvalidPassword,
bh2u, bfh, format_fee_satoshis, Weak,
print_error)
import electroncash.web as web
from electroncash import Transaction
from electroncash import util, bitcoin, commands, cashacct
from electroncash import paymentrequest
from electroncash.wallet import Multisig_Wallet, sweep_preparations
from electroncash.contacts import Contact
try:
from electroncash.plot import plot_history
except:
plot_history = None
import electroncash.web as web
from .amountedit import AmountEdit, BTCAmountEdit, MyLineEdit, BTCkBEdit, BTCSatsByteEdit
from .qrcodewidget import QRCodeWidget, QRDialog
from .qrtextedit import ShowQRTextEdit, ScanQRTextEdit
from .transaction_dialog import show_transaction
from .fee_slider import FeeSlider
from .popup_widget import ShowPopupLabel, KillPopupLabel, PopupWidget
from . import cashacctqt
from .util import *
class StatusBarButton(QPushButton):
def __init__(self, icon, tooltip, func):
QPushButton.__init__(self, icon, '')
self.setToolTip(tooltip)
self.setFlat(True)
self.setMaximumWidth(25)
self.clicked.connect(self.onPress)
self.func = func
self.setIconSize(QSize(25,25))
self.setCursor(Qt.PointingHandCursor)
def onPress(self, checked=False):
'''Drops the unwanted PyQt5 "checked" argument'''
self.func()
def keyPressEvent(self, e):
if e.key() == Qt.Key_Return:
self.func()
from electroncash.paymentrequest import PR_PAID
class ElectrumWindow(QMainWindow, MessageBoxMixin, PrintError):
# Note: self.clean_up_connections automatically detects signals named XXX_signal and disconnects them on window close.
payment_request_ok_signal = pyqtSignal()
payment_request_error_signal = pyqtSignal()
new_fx_quotes_signal = pyqtSignal()
new_fx_history_signal = pyqtSignal()
network_signal = pyqtSignal(str, object)
alias_received_signal = pyqtSignal()
computing_privkeys_signal = pyqtSignal()
show_privkeys_signal = pyqtSignal()
history_updated_signal = pyqtSignal()
labels_updated_signal = pyqtSignal() # note this signal occurs when an explicit update_labels() call happens. Interested GUIs should also listen for history_updated_signal as well which also indicates labels may have changed.
on_timer_signal = pyqtSignal() # functions wanting to be executed from timer_actions should connect to this signal, preferably via Qt.DirectConnection
ca_address_default_changed_signal = pyqtSignal(object) # passes cashacct.Info object to slot, which is the new default. Mainly emitted by address_list and address_dialog
status_icon_dict = dict() # app-globel cache of "status_*" -> QIcon instances (for update_status() speedup)
def __init__(self, gui_object, wallet):
QMainWindow.__init__(self)
self.gui_object = gui_object
self.wallet = wallet
self.config = config = gui_object.config
assert self.wallet and self.config and self.gui_object
self.network = gui_object.daemon.network
self.fx = gui_object.daemon.fx
self.invoices = wallet.invoices
self.contacts = wallet.contacts
self.tray = gui_object.tray
self.app = gui_object.app
self.cleaned_up = False
self.payment_request = None
self.checking_accounts = False
self.qr_window = None
self.not_enough_funds = False
self.op_return_toolong = False
self.internalpluginsdialog = None
self.externalpluginsdialog = None
self.hardwarewalletdialog = None
self.require_fee_update = False
self.cashaddr_toggled_signal = self.gui_object.cashaddr_toggled_signal # alias for backwards compatibility for plugins -- this signal used to live in each window and has since been refactored to gui-object where it belongs (since it's really an app-global setting)
self.force_use_single_change_addr = None # this is set by the CashShuffle plugin to a single string that will go into the tool-tip explaining why this preference option is disabled (see self.settings_dialog)
self.tl_windows = []
self.tx_external_keypairs = {}
self._tx_dialogs = Weak.Set()
self.tx_update_mgr = TxUpdateMgr(self) # manages network callbacks for 'new_transaction' and 'verified2', and collates GUI updates from said callbacks as a performance optimization
self.is_schnorr_enabled = self.wallet.is_schnorr_enabled # This is a function -- Support for plugins that may be using the 4.0.3 & 4.0.4 API -- this function used to live in this class, before being moved to Abstract_Wallet.
self.send_tab_opreturn_widgets, self.receive_tab_opreturn_widgets = [], [] # defaults to empty list
self._shortcuts = Weak.Set() # keep track of shortcuts and disable them on close
self.create_status_bar()
self.need_update = threading.Event()
self.labels_need_update = threading.Event()
self.decimal_point = config.get('decimal_point', 8)
self.fee_unit = config.get('fee_unit', 0)
self.num_zeros = int(config.get('num_zeros',0))
self.completions = QStringListModel()
self.tabs = tabs = QTabWidget(self)
self.send_tab = self.create_send_tab()
self.receive_tab = self.create_receive_tab()
self.addresses_tab = self.create_addresses_tab()
self.utxo_tab = self.create_utxo_tab()
self.console_tab = self.create_console_tab()
self.contacts_tab = self.create_contacts_tab()
tabs.addTab(self.create_history_tab(), QIcon(":icons/tab_history.png"), _('History'))
tabs.addTab(self.send_tab, QIcon(":icons/tab_send.png"), _('Send'))
tabs.addTab(self.receive_tab, QIcon(":icons/tab_receive.png"), _('Receive'))
# clears/inits the opreturn widgets
self.on_toggled_opreturn(bool(self.config.get('enable_opreturn')))
def add_optional_tab(tabs, tab, icon, description, name, default=False):
tab.tab_icon = icon
tab.tab_description = description
tab.tab_pos = len(tabs)
tab.tab_name = name
if self.config.get('show_{}_tab'.format(name), default):
tabs.addTab(tab, icon, description.replace("&", ""))
add_optional_tab(tabs, self.addresses_tab, QIcon(":icons/tab_addresses.png"), _("&Addresses"), "addresses")
add_optional_tab(tabs, self.utxo_tab, QIcon(":icons/tab_coins.png"), _("Co&ins"), "utxo")
add_optional_tab(tabs, self.contacts_tab, QIcon(":icons/tab_contacts.png"), _("Con&tacts"), "contacts")
add_optional_tab(tabs, self.console_tab, QIcon(":icons/tab_console.png"), _("Con&sole"), "console")
tabs.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.setCentralWidget(tabs)
if self.config.get("is_maximized"):
self.showMaximized()
self.init_menubar()
wrtabs = Weak.ref(tabs) # We use a weak reference here to help along python gc of QShortcut children: prevent the lambdas below from holding a strong ref to self.
self._shortcuts.add( QShortcut(QKeySequence("Ctrl+W"), self, self.close) )
self._shortcuts.add( QShortcut(QKeySequence("Ctrl+Q"), self, self.close) )
# Below is now addded to the menu as Ctrl+R but we'll also support F5 like browsers do
self._shortcuts.add( QShortcut(QKeySequence("F5"), self, self.update_wallet) )
self._shortcuts.add( QShortcut(QKeySequence("Ctrl+PgUp"), self, lambda: wrtabs() and wrtabs().setCurrentIndex((wrtabs().currentIndex() - 1)%wrtabs().count())) )
self._shortcuts.add( QShortcut(QKeySequence("Ctrl+PgDown"), self, lambda: wrtabs() and wrtabs().setCurrentIndex((wrtabs().currentIndex() + 1)%wrtabs().count())) )
for i in range(tabs.count()):
self._shortcuts.add( QShortcut(QKeySequence("Alt+" + str(i + 1)), self, lambda i=i: wrtabs() and wrtabs().setCurrentIndex(i)) )
self.payment_request_ok_signal.connect(self.payment_request_ok)
self.payment_request_error_signal.connect(self.payment_request_error)
self.gui_object.update_available_signal.connect(self.on_update_available) # shows/hides the update_available_button, emitted by update check mechanism when a new version is available
self.history_list.setFocus(True)
# update fee slider in case we missed the callback
self.fee_slider.update()
self.load_wallet(wallet)
if self.network:
self.network_signal.connect(self.on_network_qt)
interests = ['blockchain_updated', 'wallet_updated',
'new_transaction', 'status', 'banner', 'verified2',
'fee', 'ca_verified_tx', 'ca_verification_failed']
# To avoid leaking references to "self" that prevent the
# window from being GC-ed when closed, callbacks should be
# methods of this class only, and specifically not be
# partials, lambdas or methods of subobjects. Hence...
self.network.register_callback(self.on_network, interests)
# set initial message
self.console.showMessage(self.network.banner)
self.network.register_callback(self.on_quotes, ['on_quotes'])
self.network.register_callback(self.on_history, ['on_history'])
self.new_fx_quotes_signal.connect(self.on_fx_quotes)
self.new_fx_history_signal.connect(self.on_fx_history)
gui_object.timer.timeout.connect(self.timer_actions)
self.fetch_alias()
_first_shown = True
def showEvent(self, event):
super().showEvent(event)
if event.isAccepted() and self._first_shown:
self._first_shown = False
weakSelf = Weak.ref(self)
#
#try:
# # Amaury's recommendation -- only remind a subset of users to enable it.
# self.remind_cashshuffle_enabled = bool(int.from_bytes(bytes.fromhex(self.wallet.get_public_key(self.wallet.get_addresses()[0])), byteorder='big') & 0x3)
#except (AttributeError, ValueError, TypeError):
# # wallet lacks the get_public_key method
# self.remind_cashshuffle_enabled = False
self.remind_cashshuffle_enabled = False # For now globally disabled
#QTimer.singleShot(300, lambda: weakSelf() and weakSelf().do_cash_shuffle_reminder())
#
# do this immediately after this event handler finishes -- noop on everything but linux
QTimer.singleShot(0, lambda: weakSelf() and weakSelf().gui_object.lin_win_maybe_show_highdpi_caveat_msg(weakSelf()))
def on_history(self, event, *args):
# NB: event should always be 'on_history'
if not args or args[0] is self.wallet:
self.new_fx_history_signal.emit()
@rate_limited(3.0) # Rate limit to no more than once every 3 seconds
def on_fx_history(self):
if self.cleaned_up: return
self.history_list.refresh_headers()
self.history_list.update()
self.address_list.update()
self.history_updated_signal.emit() # inform things like address_dialog that there's a new history
def on_quotes(self, b):
self.new_fx_quotes_signal.emit()
@rate_limited(3.0) # Rate limit to no more than once every 3 seconds
def on_fx_quotes(self):
if self.cleaned_up: return
self.update_status()
# Refresh edits with the new rate
edit = self.fiat_send_e if self.fiat_send_e.is_last_edited else self.amount_e
edit.textEdited.emit(edit.text())
edit = self.fiat_receive_e if self.fiat_receive_e.is_last_edited else self.receive_amount_e
edit.textEdited.emit(edit.text())
# History tab needs updating if it used spot
if self.fx.history_used_spot:
self.history_list.update()
self.history_updated_signal.emit() # inform things like address_dialog that there's a new history
def toggle_tab(self, tab):
show = self.tabs.indexOf(tab) == -1
self.config.set_key('show_{}_tab'.format(tab.tab_name), show)
item_format = _("Hide {tab_description}") if show else _("Show {tab_description}")
item_text = item_format.format(tab_description=tab.tab_description)
tab.menu_action.setText(item_text)
if show:
# Find out where to place the tab
index = len(self.tabs)
for i in range(len(self.tabs)):
try:
if tab.tab_pos < self.tabs.widget(i).tab_pos:
index = i
break
except AttributeError:
pass
self.tabs.insertTab(index, tab, tab.tab_icon, tab.tab_description.replace("&", ""))
else:
i = self.tabs.indexOf(tab)
self.tabs.removeTab(i)
def push_top_level_window(self, window):
'''Used for e.g. tx dialog box to ensure new dialogs are appropriately
parented. This used to be done by explicitly providing the parent
window, but that isn't something hardware wallet prompts know.'''
self.tl_windows.append(window)
def pop_top_level_window(self, window):
self.tl_windows.remove(window)
def top_level_window(self):
'''Do the right thing in the presence of tx dialog windows'''
override = self.tl_windows[-1] if self.tl_windows else None
return self.top_level_window_recurse(override)
def diagnostic_name(self):
return "%s/%s" % (PrintError.diagnostic_name(self), self.wallet.basename())
def is_hidden(self):
return self.isMinimized() or self.isHidden()
def show_or_hide(self):
if self.is_hidden():
self.bring_to_top()
else:
self.hide()
def bring_to_top(self):
self.show()
self.raise_()
def on_error(self, exc_info):
if not isinstance(exc_info[1], UserCancelled):
try:
traceback.print_exception(*exc_info)
except OSError:
# Issue #662, user got IO error.
# We want them to still get the error displayed to them.
pass
self.show_error(str(exc_info[1]))
def on_network(self, event, *args):
#self.print_error("on_network:", event, *args)
if event == 'wallet_updated':
if args[0] is self.wallet:
self.need_update.set()
elif event == 'blockchain_updated':
self.need_update.set()
elif event == 'new_transaction':
self.tx_update_mgr.notif_add(args) # added only if this wallet's tx
if args[1] is self.wallet:
self.network_signal.emit(event, args)
elif event == 'verified2':
self.tx_update_mgr.verif_add(args) # added only if this wallet's tx
if args[0] is self.wallet:
self.network_signal.emit(event, args)
elif event in ['status', 'banner', 'fee']:
# Handle in GUI thread
self.network_signal.emit(event, args)
elif event in ('ca_verified_tx', 'ca_verification_failed'):
if args[0] is self.wallet.cashacct:
self.network_signal.emit(event, args)
else:
self.print_error("unexpected network message:", event, args)
def on_network_qt(self, event, args=None):
if self.cleaned_up: return
# Handle a network message in the GUI thread
if event == 'status':
self.update_status()
elif event == 'banner':
self.console.showMessage(args[0])
elif event == 'fee':
pass
elif event == 'new_transaction':
self.check_and_reset_receive_address_if_needed()
elif event in ('ca_verified_tx', 'ca_verification_failed'):
pass
elif event == 'verified2':
pass
else:
self.print_error("unexpected network_qt signal:", event, args)
def fetch_alias(self):
self.alias_info = None
alias = self.config.get('alias')
if alias:
alias = str(alias)
def f():
self.alias_info = self.contacts.resolve_openalias(alias)
self.alias_received_signal.emit()
t = threading.Thread(target=f)
t.setDaemon(True)
t.start()
def _close_wallet(self):
if self.wallet:
self.print_error('close_wallet', self.wallet.storage.path)
self.wallet.thread = None
run_hook('close_wallet', self.wallet)
def load_wallet(self, wallet):
wallet.thread = TaskThread(self, self.on_error, name = wallet.diagnostic_name() + '/Wallet')
self.update_recently_visited(wallet.storage.path)
# address used to create a dummy transaction and estimate transaction fee
self.history_list.update()
self.address_list.update()
self.utxo_list.update()
self.need_update.set()
# update menus
self.seed_menu.setEnabled(self.wallet.has_seed())
self.update_lock_icon()
self.update_buttons_on_seed()
self.update_console()
self.clear_receive_tab()
self.request_list.update()
self.tabs.show()
self.init_geometry()
if self.config.get('hide_gui') and self.tray.isVisible():
self.hide()
else:
self.show()
if self._is_invalid_testnet_wallet():
self.gui_object.daemon.stop_wallet(self.wallet.storage.path)
self._rebuild_history_action.setEnabled(False)
self._warn_if_invalid_testnet_wallet()
self.watching_only_changed()
self.history_updated_signal.emit() # inform things like address_dialog that there's a new history
run_hook('load_wallet', wallet, self)
def init_geometry(self):
winpos = self.wallet.storage.get("winpos-qt")
try:
screen = self.app.desktop().screenGeometry()
assert screen.contains(QRect(*winpos))
self.setGeometry(*winpos)
except:
self.print_error("using default geometry")
self.setGeometry(100, 100, 840, 400)
def watching_only_changed(self):
title = '%s %s - %s' % (networks.net.TITLE,
self.wallet.electrum_version,
self.wallet.basename())
extra = [self.wallet.storage.get('wallet_type', '?')]
if self.wallet.is_watching_only():
self.warn_if_watching_only()
extra.append(_('watching only'))
title += ' [%s]'% ', '.join(extra)
self.setWindowTitle(title)
self.password_menu.setEnabled(self.wallet.can_change_password())
self.import_privkey_menu.setVisible(self.wallet.can_import_privkey())
self.import_address_menu.setVisible(self.wallet.can_import_address())
self.export_menu.setEnabled(self.wallet.can_export())
def warn_if_watching_only(self):
if self.wallet.is_watching_only():
msg = ' '.join([
_("This wallet is watching-only."),
_("This means you will not be able to spend Vitae with it."),
_("Make sure you own the seed phrase or the private keys, before you request Vitae to be sent to this wallet.")
])
self.show_warning(msg, title=_('Information'))
def _is_invalid_testnet_wallet(self):
if not networks.net.TESTNET:
return False
is_old_bad = False
xkey = ((hasattr(self.wallet, 'get_master_public_key') and self.wallet.get_master_public_key())
or None)
if xkey:
from electroncash.bitcoin import deserialize_xpub, InvalidXKeyFormat
try:
xp = deserialize_xpub(xkey)
except InvalidXKeyFormat:
is_old_bad = True
return is_old_bad
def _warn_if_invalid_testnet_wallet(self):
''' This was added after the upgrade from the bad xpub testnet wallets
to the good tpub testnet wallet format in version 3.3.6. See #1164.
We warn users if they are using the bad wallet format and instruct
them on how to upgrade their wallets.'''
is_old_bad = self._is_invalid_testnet_wallet()
if is_old_bad:
msg = ' '.join([
_("This testnet wallet has an invalid master key format."),
_("(Old versions of ViLight before 3.3.6 produced invalid testnet wallets)."),
'<br><br>',
_("In order to use this wallet without errors with this version of EC, please <b>re-generate this wallet from seed</b>."),
"<br><br><em><i>~SPV stopped~</i></em>"
])
self.show_critical(msg, title=_('Invalid Master Key'), rich_text=True)
return is_old_bad
def open_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
if not os.path.exists(wallet_folder):
wallet_folder = None
filename, __ = QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder)
if not filename:
return
if filename.lower().endswith('.txn'):
# they did File -> Open on a .txn, just do that.
self.do_process_from_file(fileName=filename)
return
self.gui_object.new_window(filename)
def backup_wallet(self):
self.wallet.storage.write() # make sure file is committed to disk
path = self.wallet.storage.path
wallet_folder = os.path.dirname(path)
filename, __ = QFileDialog.getSaveFileName(self, _('Enter a filename for the copy of your wallet'), wallet_folder)
if not filename:
return
new_path = os.path.join(wallet_folder, filename)
if new_path != path:
try:
# Copy file contents
shutil.copyfile(path, new_path)
# Copy file attributes if possible
# (not supported on targets like Flatpak documents)
try:
shutil.copystat(path, new_path)
except (IOError, os.error):
pass
self.show_message(_("A copy of your wallet file was created in")+" '%s'" % str(new_path), title=_("Wallet backup created"))
except (IOError, os.error) as reason:
self.show_critical(_("ViLight was unable to copy your wallet file to the specified location.") + "\n" + str(reason), title=_("Unable to create backup"))
def update_recently_visited(self, filename):
recent = self.config.get('recently_open', [])
try:
sorted(recent)
except:
recent = []
if filename in recent:
recent.remove(filename)
recent.insert(0, filename)
recent2 = []
for k in recent:
if os.path.exists(k):
recent2.append(k)
recent = recent2[:5]
self.config.set_key('recently_open', recent)
self.recently_visited_menu.clear()
gui_object = self.gui_object
for i, k in enumerate(sorted(recent)):
b = os.path.basename(k)
def loader(k):
return lambda: gui_object.new_window(k)
self.recently_visited_menu.addAction(b, loader(k)).setShortcut(QKeySequence("Ctrl+%d"%(i+1)))
self.recently_visited_menu.setEnabled(len(recent))
def get_wallet_folder(self):
return self.gui_object.get_wallet_folder()
def new_wallet(self):
try:
full_path = self.gui_object.get_new_wallet_path()
except FileNotFoundError as e:
self.show_error(str(e))
return
self.gui_object.start_new_window(full_path, None)
def init_menubar(self):
menubar = self.menuBar()
menubar.setObjectName(self.diagnostic_name() + ".QMenuBar")
destroyed_print_error(menubar)
file_menu = menubar.addMenu(_("&File"))
self.recently_visited_menu = file_menu.addMenu(_("&Recently open"))
file_menu.addAction(_("&Open"), self.open_wallet).setShortcut(QKeySequence.Open)
file_menu.addAction(_("&New/Restore"), self.new_wallet).setShortcut(QKeySequence.New)
file_menu.addAction(_("&Save Copy"), self.backup_wallet).setShortcut(QKeySequence.SaveAs)
file_menu.addAction(_("Delete"), self.remove_wallet)
file_menu.addSeparator()
file_menu.addAction(_("&Quit"), self.close)
wallet_menu = menubar.addMenu(_("&Wallet"))
wallet_menu.addAction(_("&Information"), self.show_master_public_keys, QKeySequence("Ctrl+I"))
wallet_menu.addSeparator()
self.password_menu = wallet_menu.addAction(_("&Password"), self.change_password_dialog)
self.seed_menu = wallet_menu.addAction(_("&Seed"), self.show_seed_dialog)
self.private_keys_menu = wallet_menu.addMenu(_("&Private keys"))
self.private_keys_menu.addAction(_("&Sweep"), self.sweep_key_dialog)
self.import_privkey_menu = self.private_keys_menu.addAction(_("&Import"), self.do_import_privkey)
self.export_menu = self.private_keys_menu.addAction(_("&Export"), self.export_privkeys_dialog)
self.import_address_menu = wallet_menu.addAction(_("Import addresses"), self.import_addresses)
wallet_menu.addSeparator()
self._rebuild_history_action = wallet_menu.addAction(_("&Rebuild history"), self.rebuild_history)
self._scan_beyond_gap_action = wallet_menu.addAction(_("&Scan beyond gap..."), self.scan_beyond_gap)
self._scan_beyond_gap_action.setEnabled(bool(self.wallet.is_deterministic() and self.network))
wallet_menu.addSeparator()
labels_menu = wallet_menu.addMenu(_("&Labels"))
labels_menu.addAction(_("&Import"), self.do_import_labels)
labels_menu.addAction(_("&Export"), self.do_export_labels)
contacts_menu = wallet_menu.addMenu(_("Contacts"))
contacts_menu.addAction(_("&New"), self.new_contact_dialog)
contacts_menu.addAction(_("Import"), lambda: self.contact_list.import_contacts())
contacts_menu.addAction(_("Export"), lambda: self.contact_list.export_contacts())
invoices_menu = wallet_menu.addMenu(_("Invoices"))
invoices_menu.addAction(_("Import"), lambda: self.invoice_list.import_invoices())
hist_menu = wallet_menu.addMenu(_("&History"))
#hist_menu.addAction(_("Plot"), self.plot_history_dialog).setEnabled(plot_history is not None)
hist_menu.addAction(_("Export"), self.export_history_dialog)
wallet_menu.addSeparator()
wallet_menu.addAction(_("Find"), self.toggle_search, QKeySequence("Ctrl+F"))
wallet_menu.addAction(_("&Refresh GUI"), self.update_wallet, QKeySequence("Ctrl+R"))
def add_toggle_action(view_menu, tab):
is_shown = self.tabs.indexOf(tab) > -1
item_format = _("Hide {tab_description}") if is_shown else _("Show {tab_description}")
item_name = item_format.format(tab_description=tab.tab_description)
tab.menu_action = view_menu.addAction(item_name, lambda: self.toggle_tab(tab))
view_menu = menubar.addMenu(_("&View"))
add_toggle_action(view_menu, self.addresses_tab)
add_toggle_action(view_menu, self.utxo_tab)
add_toggle_action(view_menu, self.contacts_tab)
add_toggle_action(view_menu, self.console_tab)
tools_menu = menubar.addMenu(_("&Tools"))
# Settings / Preferences are all reserved keywords in OSX using this as work around
prefs_tit = _("ViLight preferences") if sys.platform == 'darwin' else _("Preferences")
tools_menu.addAction(prefs_tit, self.settings_dialog, QKeySequence("Ctrl+,") )
gui_object = self.gui_object
weakSelf = Weak.ref(self)
tools_menu.addAction(_("&Network"), lambda: gui_object.show_network_dialog(weakSelf()), QKeySequence("Ctrl+K"))
tools_menu.addAction(_("Optional &Features"), self.internal_plugins_dialog, QKeySequence("Shift+Ctrl+P"))
tools_menu.addAction(_("Installed &Plugins"), self.external_plugins_dialog, QKeySequence("Ctrl+P"))
if sys.platform.startswith('linux'):
tools_menu.addSeparator()
tools_menu.addAction(_("&Hardware wallet support..."), self.hardware_wallet_support)
tools_menu.addSeparator()
tools_menu.addAction(_("&Sign/verify message"), self.sign_verify_message)
tools_menu.addAction(_("&Encrypt/decrypt message"), self.encrypt_message)
tools_menu.addSeparator()
paytomany_menu = tools_menu.addAction(_("&Pay to many"), self.paytomany, QKeySequence("Ctrl+M"))
raw_transaction_menu = tools_menu.addMenu(_("&Load transaction"))
raw_transaction_menu.addAction(_("From &file"), self.do_process_from_file)
raw_transaction_menu.addAction(_("From &text"), self.do_process_from_text, QKeySequence("Ctrl+T"))
raw_transaction_menu.addAction(_("From the &blockchain"), self.do_process_from_txid, QKeySequence("Ctrl+B"))
raw_transaction_menu.addAction(_("From &QR code"), self.read_tx_from_qrcode)
self.raw_transaction_menu = raw_transaction_menu
tools_menu.addSeparator()
if ColorScheme.dark_scheme and sys.platform != 'darwin': # use dark icon in menu except for on macOS where we can't be sure it will look right due to the way menus work on macOS
icon = QIcon(":icons/cashacct-button-darkmode.png")
else:
icon = QIcon(":icons/cashacct-logo.png")
# tools_menu.addAction(icon, _("Lookup &Cash Account..."), self.lookup_cash_account_dialog, QKeySequence("Ctrl+L"))
run_hook('init_menubar_tools', self, tools_menu)
help_menu = menubar.addMenu(_("&Help"))
help_menu.addAction(_("&About"), self.show_about)
help_menu.addAction(_("About Qt"), self.app.aboutQt)
help_menu.addAction(_("&Check for updates..."), lambda: self.gui_object.show_update_checker(self))
help_menu.addAction(_("&Official website"), lambda: webopen("https://vitae.cc"))
help_menu.addSeparator()
# help_menu.addAction(_("Documentation"), lambda: webopen("http://electroncash.readthedocs.io/")).setShortcut(QKeySequence.HelpContents)
help_menu.addAction(_("&Report Bug"), self.show_report_bug)
help_menu.addSeparator()
help_menu.addAction(_("&Donate to server"), self.donate_to_server)
def donate_to_server(self):
d = self.network.get_donation_address()
if d:
host = self.network.get_parameters()[0]
# The message is intentionally untranslated, leave it like that
self.pay_to_URI('{}:{}?message=donation for {}'
.format(networks.net.CASHADDR_PREFIX, d, host))
else:
self.show_error(_('No donation address for this server'))
def show_about(self):
QMessageBox.about(self, "ViLight",
"<p><font size=+3><b>ViLight</b></font></p><p>" + _("Version") + f" {self.wallet.electrum_version}" + "</p>" +
'<p><span style="font-size:11pt; font-weight:500;">' + "Copyright © 2019<br>The Vitae Developers" + "</span></p>" +
'<p><span style="font-weight:200;">' +
'<p><span style="font-size:11pt; font-weight:500;">' + "Copyright © 2017-2019<br>Electron Cash LLC & The Electron Cash Developers" + "</span></p>" +
'<p><span style="font-weight:200;">' +
_("ViLight's focus is speed, with low resource usage and simplifying Vitae. You do not need to perform regular backups, because your wallet can be recovered from a secret phrase that you can memorize or write on paper. Startup times are instant because it operates in conjunction with high-performance servers that handle the most complicated parts of the Vitae system.") +
"</span></p>"
)
def show_report_bug(self):
msg = ' '.join([
_("Please report any bugs as issues on github:<br/>"),
"<a href=\"https://github.com/VitaeTeam/ViLight/issues\">https://github.com/VitaeTeam/ViLight/issues</a><br/><br/>",
_("Before reporting a bug, upgrade to the most recent version of ViLight (latest release or git HEAD), and include the version number in your report."),
_("Try to explain not only what the bug is, but how it occurs.")
])
self.show_message(msg, title="ViLight - " + _("Reporting Bugs"), rich_text = True)
def notify(self, message):
self.gui_object.notify(message)
# custom wrappers for getOpenFileName and getSaveFileName, that remember the path selected by the user
def getOpenFileName(self, title, filter = ""):
return __class__.static_getOpenFileName(title=title, filter=filter, config=self.config, parent=self)
def getSaveFileName(self, title, filename, filter = ""):
return __class__.static_getSaveFileName(title=title, filename=filename, filter=filter, config=self.config, parent=self)
@staticmethod
def static_getOpenFileName(*, title, parent=None, config=None, filter=""):
if not config:
config = get_config()
userdir = os.path.expanduser('~')
directory = config.get('io_dir', userdir) if config else userdir
fileName, __ = QFileDialog.getOpenFileName(parent, title, directory, filter)
if fileName and directory != os.path.dirname(fileName) and config:
config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
@staticmethod
def static_getSaveFileName(*, title, filename, parent=None, config=None, filter=""):
if not config:
config = get_config()
userdir = os.path.expanduser('~')
directory = config.get('io_dir', userdir) if config else userdir
path = os.path.join( directory, filename )
fileName, __ = QFileDialog.getSaveFileName(parent, title, path, filter)
if fileName and directory != os.path.dirname(fileName) and config:
config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
def timer_actions(self):
# Note this runs in the GUI thread
if self.need_update.is_set():
self._update_wallet() # will clear flag when it runs. (also clears labels_need_update as well)
if self.labels_need_update.is_set():
self._update_labels() # will clear flag when it runs.
# resolve aliases
# FIXME this is a blocking network call that has a timeout of 5 sec
self.payto_e.resolve()
# update fee
if self.require_fee_update:
self.do_update_fee()
self.require_fee_update = False
# hook for other classes to be called here. For example the tx_update_mgr is called here (see TxUpdateMgr.do_check).
self.on_timer_signal.emit()
def format_amount(self, x, is_diff=False, whitespaces=False):
return format_satoshis(x, self.num_zeros, self.decimal_point, is_diff=is_diff, whitespaces=whitespaces)
def format_amount_and_units(self, amount, is_diff=False):
text = self.format_amount(amount, is_diff=is_diff) + ' '+ self.base_unit()
x = self.fx.format_amount_and_units(amount, is_diff=is_diff)
if text and x:
text += ' (%s)'%x
return text
def format_fee_rate(self, fee_rate):
sats_per_byte = format_fee_satoshis(fee_rate/1000, max(self.num_zeros, 1))
return _('{sats_per_byte} sat/byte').format(sats_per_byte=sats_per_byte)
def get_decimal_point(self):
return self.decimal_point
def base_unit(self):
if self.decimal_point in util.inv_base_units:
return util.inv_base_units[self.decimal_point]
raise Exception('Unknown base unit')
def connect_fields(self, window, btc_e, fiat_e, fee_e):
def edit_changed(edit):
if edit.follows:
return
edit.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet())
fiat_e.is_last_edited = (edit == fiat_e)
amount = edit.get_amount()
rate = self.fx.exchange_rate() if self.fx else None
if rate is None or amount is None:
if edit is fiat_e:
btc_e.setText("")
if fee_e:
fee_e.setText("")
else:
fiat_e.setText("")
else:
if edit is fiat_e:
btc_e.follows = True
btc_e.setAmount(int(amount / PyDecimal(rate) * COIN))
btc_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
btc_e.follows = False
if fee_e:
window.update_fee()
else:
fiat_e.follows = True
fiat_e.setText(self.fx.ccy_amount_str(
amount * PyDecimal(rate) / COIN, False))
fiat_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
fiat_e.follows = False
btc_e.follows = False
fiat_e.follows = False
fiat_e.textChanged.connect(partial(edit_changed, fiat_e))
btc_e.textChanged.connect(partial(edit_changed, btc_e))
fiat_e.is_last_edited = False
_network_status_tip_dict = dict()
def update_status(self):
if not self.wallet:
return
icon_dict = ElectrumWindow.status_icon_dict
if not icon_dict:
# cache the icons to save on CPU overhead per update_status call
icon_dict.update({
"status_disconnected" : QIcon(":icons/status_disconnected.svg"),
"status_waiting" : QIcon(":icons/status_waiting.svg"),
"status_lagging" : QIcon(":icons/status_lagging.svg"),
"status_lagging_fork" : QIcon(":icons/status_lagging_fork.svg"),
"status_connected" : QIcon(":icons/status_connected.svg"),
"status_connected_fork" : QIcon(":icons/status_connected_fork.svg"),
"status_connected_proxy" : QIcon(":icons/status_connected_proxy.svg"),
"status_connected_proxy_fork" : QIcon(":icons/status_connected_proxy_fork.svg"),
})
status_tip_dict = ElectrumWindow._network_status_tip_dict
if not status_tip_dict:
# Since we're caching stuff, might as well cache this too
status_tip_dict.update({
"status_disconnected" : _('Network Status') + " - " + _("Offline"),
"status_waiting" : _('Network Status') + " - " + _("Updating..."),
"status_lagging" : _('Network Status') + " - " + '',
"status_lagging_fork" : _('Network Status') + " - " + _("Chain fork(s) detected"),
"status_connected" : _('Network Status') + " - " + _("Connected"),
"status_connected_fork" : _('Network Status') + " - " + _("Chain fork(s) detected"),
"status_connected_proxy" : _('Network Status') + " - " + _("Connected via proxy"),
"status_connected_proxy_fork" : _('Network Status') + " - " + _("Connected via proxy") + "; " + _("Chain fork(s) detected"),
})
status_tip = ''
if self.network is None or not self.network.is_running():
text = _("Offline")
icon = icon_dict["status_disconnected"]
status_tip = status_tip_dict['status_disconnected']
elif self.network.is_connected():
server_height = self.network.get_server_height()
server_lag = self.network.get_local_height() - server_height
num_chains = len(self.network.get_blockchains())
# Server height can be 0 after switching to a new server
# until we get a headers subscription request response.
# Display the synchronizing message in that case.
if not self.wallet.up_to_date or server_height == 0:
num_sent, num_answered = self.wallet.get_history_sync_state_details()
text = ("{} ({}/{})"
.format(_("Synchronizing..."), num_answered, num_sent))
icon = icon_dict["status_waiting"]
status_tip = status_tip_dict["status_waiting"]
elif server_lag > 1:
text = _("Server is lagging ({} blocks)").format(server_lag)
if num_chains <= 1:
icon = icon_dict["status_lagging"]
status_tip = status_tip_dict["status_lagging"] + text
else:
icon = icon_dict["status_lagging_fork"]
status_tip = status_tip_dict["status_lagging_fork"] + "; " + text
else:
c, u, x = self.wallet.get_balance()
text = _("Balance" ) + ": %s "%(self.format_amount_and_units(c))
if u:
text += " [%s unconfirmed]"%(self.format_amount(u, True).strip())
if x:
text += " [%s unmatured]"%(self.format_amount(x, True).strip())
extra = run_hook("balance_label_extra", self)
if isinstance(extra, str) and extra:
text += " [{}]".format(extra)
# append fiat balance and price
if self.fx.is_enabled():
text += self.fx.get_fiat_status_text(c + u + x,
self.base_unit(), self.get_decimal_point()) or ''
n_unverif = self.wallet.get_unverified_tx_pending_count()
if n_unverif >= 10:
# if there are lots left to verify, display this informative text
text += " " + ( _("[%d unverified TXs]") % n_unverif )
if not self.network.proxy:
icon = icon_dict["status_connected"] if num_chains <= 1 else icon_dict["status_connected_fork"]
status_tip = status_tip_dict["status_connected"] if num_chains <= 1 else status_tip_dict["status_connected_fork"]
else:
icon = icon_dict["status_connected_proxy"] if num_chains <= 1 else icon_dict["status_connected_proxy_fork"]
status_tip = status_tip_dict["status_connected_proxy"] if num_chains <= 1 else status_tip_dict["status_connected_proxy_fork"]
else:
text = _("Not connected")
icon = icon_dict["status_disconnected"]
status_tip = status_tip_dict["status_disconnected"]
self.tray.setToolTip("%s (%s)" % (text, self.wallet.basename()))
self.balance_label.setText(text)
self.status_button.setIcon( icon )
self.status_button.setStatusTip( status_tip )
self.update_cashshuffle_icon()
def update_wallet(self):
self.need_update.set() # will enqueue an _update_wallet() call in at most 0.5 seconds from now.
def _update_wallet(self):
''' Called by self.timer_actions every 0.5 secs if need_update flag is set.
Note that the flag is actually cleared by update_tabs.'''
self.update_status()
if self.wallet.up_to_date or not self.network or not self.network.is_connected():
self.update_tabs()
@rate_limited(1.0, classlevel=True, ts_after=True) # Limit tab updates to no more than 1 per second, app-wide. Multiple calls across instances will be collated into 1 deferred series of calls (1 call per extant instance)
def update_tabs(self):
if self.cleaned_up: return
self.history_list.update()
self.request_list.update()
self.address_list.update()
self.utxo_list.update()
self.contact_list.update()
self.invoice_list.update()
self.update_completions()
self.history_updated_signal.emit() # inform things like address_dialog that there's a new history, also clears self.tx_update_mgr.verif_q
self.need_update.clear() # clear flag
if self.labels_need_update.is_set():
# if flag was set, might as well declare the labels updated since they necessarily were due to a full update.
self.labels_updated_signal.emit() # just in case client code was waiting for this signal to proceed.
self.labels_need_update.clear() # clear flag
def update_labels(self):
self.labels_need_update.set() # will enqueue an _update_labels() call in at most 0.5 seconds from now
@rate_limited(1.0)
def _update_labels(self):
''' Called by self.timer_actions every 0.5 secs if labels_need_update flag is set. '''
if self.cleaned_up: return
self.history_list.update_labels()
self.address_list.update_labels()
self.utxo_list.update_labels()
self.update_completions()
self.labels_updated_signal.emit()
self.labels_need_update.clear() # clear flag
def create_history_tab(self):
from .history_list import HistoryList
self.history_list = l = HistoryList(self)
l.searchable_list = l
return l
def show_address(self, addr, *, parent=None):
parent = parent or self.top_level_window()
from . import address_dialog
d = address_dialog.AddressDialog(self, addr, windowParent=parent)
d.exec_()
def show_transaction(self, tx, tx_desc = None):
'''tx_desc is set only for txs created in the Send tab'''
d = show_transaction(tx, self, tx_desc)
self._tx_dialogs.add(d)
def on_toggled_opreturn(self, b):
''' toggles opreturn-related widgets for both the receive and send
tabs'''
b = bool(b)
self.config.set_key('enable_opreturn', b)
# send tab
if not b:
self.message_opreturn_e.setText("")
self.op_return_toolong = False
for x in self.send_tab_opreturn_widgets:
x.setVisible(b)
# receive tab
for x in self.receive_tab_opreturn_widgets:
x.setVisible(b)
def create_receive_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.receive_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
self.receive_address = None
self.receive_address_e = ButtonsLineEdit()
self.receive_address_e.addCopyButton()
self.receive_address_e.setReadOnly(True)
msg = _('Vitae address where the payment should be received. Note that each payment request uses a different Vitae address.')
label = HelpLabel(_('&Receiving address'), msg)
label.setBuddy(self.receive_address_e)
self.receive_address_e.textChanged.connect(self.update_receive_qr)
self.gui_object.cashaddr_toggled_signal.connect(self.update_receive_address_widget)
grid.addWidget(label, 0, 0)
grid.addWidget(self.receive_address_e, 0, 1, 1, -1)
# Cash Account for this address (if any)
self.receive_message_e = QLineEdit()
label = QLabel(_('&Description'))
label.setBuddy(self.receive_message_e)
grid.addWidget(label, 2, 0)
grid.addWidget(self.receive_message_e, 2, 1, 1, -1)
self.receive_message_e.textChanged.connect(self.update_receive_qr)
# OP_RETURN requests
self.receive_opreturn_e = QLineEdit()
msg = _("You may optionally append an OP_RETURN message to the payment URI and/or QR you generate.\n\nNote: Not all wallets yet support OP_RETURN parameters, so make sure the other party's wallet supports OP_RETURN URIs.")
self.receive_opreturn_label = label = HelpLabel(_('&OP_RETURN'), msg)
label.setBuddy(self.receive_opreturn_e)
self.receive_opreturn_rawhex_cb = QCheckBox(_('Raw &hex script'))
self.receive_opreturn_rawhex_cb.setToolTip(_('If unchecked, the textbox contents are UTF8-encoded into a single-push script: <tt>OP_RETURN PUSH <text></tt>. If checked, the text contents will be interpreted as a raw hexadecimal script to be appended after the OP_RETURN opcode: <tt>OP_RETURN <script></tt>.'))
grid.addWidget(label, 3, 0)
grid.addWidget(self.receive_opreturn_e, 3, 1, 1, 3)
grid.addWidget(self.receive_opreturn_rawhex_cb, 3, 4, Qt.AlignLeft)
self.receive_opreturn_e.textChanged.connect(self.update_receive_qr)
self.receive_opreturn_rawhex_cb.clicked.connect(self.update_receive_qr)
self.receive_tab_opreturn_widgets = [
self.receive_opreturn_e,
self.receive_opreturn_rawhex_cb,
self.receive_opreturn_label,
]
self.receive_amount_e = BTCAmountEdit(self.get_decimal_point)
label = QLabel(_('Requested &amount'))
label.setBuddy(self.receive_amount_e)
grid.addWidget(label, 4, 0)
grid.addWidget(self.receive_amount_e, 4, 1)
self.receive_amount_e.textChanged.connect(self.update_receive_qr)
self.fiat_receive_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_receive_e.setVisible(False)
grid.addWidget(self.fiat_receive_e, 4, 2, Qt.AlignLeft)
self.connect_fields(self, self.receive_amount_e, self.fiat_receive_e, None)
self.expires_combo = QComboBox()
self.expires_combo.addItems([i[0] for i in expiration_values])
self.expires_combo.setCurrentIndex(3)
self.expires_combo.setFixedWidth(self.receive_amount_e.width())
msg = ' '.join([
_('Expiration date of your request.'),
_('This information is seen by the recipient if you send them a signed payment request.'),
_('Expired requests have to be deleted manually from your list, in order to free the corresponding Vitae addresses.'),
_('The Vitae address never expires and will always be part of this ViLight wallet.'),
])
label = HelpLabel(_('Request &expires'), msg)
label.setBuddy(self.expires_combo)
grid.addWidget(label, 5, 0)
grid.addWidget(self.expires_combo, 5, 1)
self.expires_label = QLineEdit('')
self.expires_label.setReadOnly(1)
self.expires_label.hide()
grid.addWidget(self.expires_label, 5, 1)
self.save_request_button = QPushButton(_('&Save'))
self.save_request_button.clicked.connect(self.save_payment_request)
self.new_request_button = QPushButton(_('&Clear'))
self.new_request_button.clicked.connect(self.new_payment_request)
weakSelf = Weak.ref(self)
class MyQRCodeWidget(QRCodeWidget):
def mouseReleaseEvent(slf, e):
''' to make the QRWidget clickable '''
weakSelf() and weakSelf().show_qr_window()
self.receive_qr = MyQRCodeWidget(fixedSize=200)
self.receive_qr.setCursor(QCursor(Qt.PointingHandCursor))
self.receive_buttons = buttons = QHBoxLayout()
buttons.addWidget(self.save_request_button)
buttons.addWidget(self.new_request_button)
buttons.addStretch(1)
grid.addLayout(buttons, 6, 2, 1, -1)
self.receive_requests_label = QLabel(_('Re&quests'))
from .request_list import RequestList
self.request_list = RequestList(self)
self.request_list.chkVisible()
self.receive_requests_label.setBuddy(self.request_list)
# layout
vbox_g = QVBoxLayout()
vbox_g.addLayout(grid)
vbox_g.addStretch()
hbox = QHBoxLayout()
hbox.addLayout(vbox_g)
vbox2 = QVBoxLayout()
vbox2.setContentsMargins(0,0,0,0)
vbox2.setSpacing(4)
vbox2.addWidget(self.receive_qr, Qt.AlignHCenter|Qt.AlignTop)
self.receive_qr.setToolTip(_('Receive request QR code (click for details)'))
but = uribut = QPushButton(_('Copy &URI'))
def on_copy_uri():
if self.receive_qr.data:
uri = str(self.receive_qr.data)
self.copy_to_clipboard(uri, _('Receive request URI copied to clipboard'), uribut)
but.clicked.connect(on_copy_uri)
but.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
but.setToolTip(_('Click to copy the receive request URI to the clipboard'))
vbox2.addWidget(but)
vbox2.setAlignment(but, Qt.AlignHCenter|Qt.AlignVCenter)
hbox.addLayout(vbox2)
class ReceiveTab(QWidget):
def showEvent(slf, e):
super().showEvent(e)
if e.isAccepted():
wslf = weakSelf()
if wslf:
wslf.check_and_reset_receive_address_if_needed()
w = ReceiveTab()
w.searchable_list = self.request_list
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.receive_requests_label)
vbox.addWidget(self.request_list)
vbox.setStretchFactor(self.request_list, 1000)
return w
def delete_payment_request(self, addr):
self.wallet.remove_payment_request(addr, self.config)
self.request_list.update()
self.address_list.update()
self.clear_receive_tab()
def get_request_URI(self, addr):
req = self.wallet.receive_requests[addr]
message = self.wallet.labels.get(addr.to_storage_string(), '')
amount = req['amount']
op_return = req.get('op_return')
op_return_raw = req.get('op_return_raw') if not op_return else None
URI = web.create_URI(addr, amount, message, op_return=op_return, op_return_raw=op_return_raw)
if req.get('time'):
URI += "&time=%d"%req.get('time')
if req.get('exp'):
URI += "&exp=%d"%req.get('exp')
if req.get('name') and req.get('sig'):
sig = bfh(req.get('sig'))
sig = bitcoin.base_encode(sig, base=58)
URI += "&name=" + req['name'] + "&sig="+sig
return str(URI)
def sign_payment_request(self, addr):
alias = self.config.get('alias')
alias_privkey = None
if alias and self.alias_info:
alias_addr, alias_name, validated = self.alias_info
if alias_addr:
if self.wallet.is_mine(alias_addr):
msg = _('This payment request will be signed.') + '\n' + _('Please enter your password')
password = self.password_dialog(msg)
if password:
try:
self.wallet.sign_payment_request(addr, alias, alias_addr, password)
except Exception as e:
self.show_error(str(e))
return
else:
return
else:
return
def save_payment_request(self):
if not self.receive_address:
self.show_error(_('No receiving address'))
amount = self.receive_amount_e.get_amount()
message = self.receive_message_e.text()
if not message and not amount:
self.show_error(_('No message or amount'))
return False
i = self.expires_combo.currentIndex()
expiration = list(map(lambda x: x[1], expiration_values))[i]
kwargs = {}
opr = self.receive_opreturn_e.text().strip()
if opr:
# save op_return, if any
arg = 'op_return'
if self.receive_opreturn_rawhex_cb.isChecked():
arg = 'op_return_raw'
kwargs[arg] = opr
req = self.wallet.make_payment_request(self.receive_address, amount,
message, expiration, **kwargs)
self.wallet.add_payment_request(req, self.config)
self.sign_payment_request(self.receive_address)
self.request_list.update()
self.request_list.select_item_by_address(req.get('address')) # when adding items to the view the current selection may not reflect what's in the UI. Make sure it's selected.
self.address_list.update()
self.save_request_button.setEnabled(False)
def view_and_paste(self, title, msg, data):
dialog = WindowModalDialog(self.top_level_window(), title)
vbox = QVBoxLayout()
label = QLabel(msg)
label.setWordWrap(True)
vbox.addWidget(label)
pr_e = ShowQRTextEdit(text=data)
vbox.addWidget(pr_e)
vbox.addLayout(Buttons(CopyCloseButton(pr_e.text, self.app, dialog)))
dialog.setLayout(vbox)
dialog.exec_()
def export_payment_request(self, addr):
r = self.wallet.receive_requests[addr]
pr = paymentrequest.serialize_request(r).SerializeToString()
name = r['id'] + '.bip70'
fileName = self.getSaveFileName(_("Select where to save your payment request"), name, "*.bip70")
if fileName:
with open(fileName, "wb+") as f:
f.write(util.to_bytes(pr))
self.show_message(_("Request saved successfully"))
self.saved = True
def new_payment_request(self):
addr = self.wallet.get_unused_address(frozen_ok=False)
if addr is None:
if not self.wallet.is_deterministic():
msg = [
_('No more addresses in your wallet.'),
_('You are using a non-deterministic wallet, which cannot create new addresses.'),
_('If you want to create new addresses, use a deterministic wallet instead.')
]
self.show_message(' '.join(msg))
# New! Since the button is called 'Clear' now, we let them proceed with a re-used address
addr = self.wallet.get_receiving_address()
else:
# Warn if past gap limit.
if not self.question(_("Warning: The next address will not be recovered automatically if you restore your wallet from seed; you may need to add it manually.\n\nThis occurs because you have too many unused addresses in your wallet. To avoid this situation, use the existing addresses first.\n\nCreate anyway?")):
return
addr = self.wallet.create_new_address(False)
self.set_receive_address(addr)
self.expires_label.hide()
self.expires_combo.show()
self.request_list.setCurrentItem(None) # We want the current item to always reflect what's in the UI. So if new, clear selection.
self.receive_message_e.setFocus(1)
def set_receive_address(self, addr):
self.receive_address = addr
self.receive_message_e.setText('')
self.receive_opreturn_rawhex_cb.setChecked(False)
self.receive_opreturn_e.setText('')
self.receive_amount_e.setAmount(None)
self.update_receive_address_widget()
def update_receive_address_widget(self):
text = ''
if self.receive_address:
text = self.receive_address.to_full_ui_string()
self.receive_address_e.setText(text)
@rate_limited(0.250, ts_after=True) # this function potentially re-computes the QR widget, so it's rate limited to once every 250ms
def check_and_reset_receive_address_if_needed(self):
''' Check to make sure the receive tab is kosher and doesn't contain
an already-used address. This should be called from the showEvent
for the tab. '''
if not self.wallet.use_change or self.cleaned_up:
# if they don't care about change addresses, they are ok
# with re-using addresses, so skip this check.
return
# ok, they care about anonymity, so make sure the receive address
# is always an unused address.
if (not self.receive_address # this should always be defined but check anyway
or self.receive_address in self.wallet.frozen_addresses # make sure it's not frozen
or (self.wallet.get_address_history(self.receive_address) # make a new address if it has a history
and not self.wallet.get_payment_request(self.receive_address, self.config))): # and if they aren't actively editing one in the request_list widget
addr = self.wallet.get_unused_address(frozen_ok=False) # try unused, not frozen
if addr is None:
if self.wallet.is_deterministic():
# creae a new one if deterministic
addr = self.wallet.create_new_address(False)
else:
# otherwise give up and just re-use one.
addr = self.wallet.get_receiving_address()
self.receive_address = addr
self.update_receive_address_widget()
def clear_receive_tab(self):
self.expires_label.hide()
self.expires_combo.show()
self.request_list.setCurrentItem(None)
self.set_receive_address(self.wallet.get_receiving_address(frozen_ok=False))
def show_qr_window(self):
from . import qrwindow
if not self.qr_window:
self.qr_window = qrwindow.QR_Window()
self.qr_window.setAttribute(Qt.WA_DeleteOnClose, True)
weakSelf = Weak.ref(self)
def destroyed_clean(x):
if weakSelf():
weakSelf().qr_window = None
weakSelf().print_error("QR Window destroyed.")
self.qr_window.destroyed.connect(destroyed_clean)
self.update_receive_qr()
if self.qr_window.isMinimized():
self.qr_window.showNormal()
else:
self.qr_window.show()
self.qr_window.raise_()
self.qr_window.activateWindow()
def show_send_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.send_tab))
def show_receive_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.receive_tab))
def receive_at(self, addr):
self.receive_address = addr
self.show_receive_tab()
self.update_receive_address_widget()
def update_receive_qr(self):
amount = self.receive_amount_e.get_amount()
message = self.receive_message_e.text()
self.save_request_button.setEnabled((amount is not None) or (message != ""))
kwargs = {}
if self.receive_opreturn_e.isVisible():
# set op_return if enabled
arg = 'op_return'
if self.receive_opreturn_rawhex_cb.isChecked():
arg = 'op_return_raw'
opret = self.receive_opreturn_e.text()
if opret:
kwargs[arg] = opret
# Special case hack -- see #1473. Omit vitae: prefix from
# legacy address if no other params present in receive request.
if Address.FMT_UI == Address.FMT_LEGACY and not kwargs and not amount and not message:
uri = self.receive_address.to_ui_string()
else:
# Otherwise proceed as normal, prepending vitae: to URI
uri = web.create_URI(self.receive_address, amount, message, **kwargs)
self.receive_qr.setData(uri)
if self.qr_window:
self.qr_window.set_content(self, self.receive_address_e.text(), amount,
message, uri, **kwargs)
def create_send_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.send_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
from .paytoedit import PayToEdit
self.amount_e = BTCAmountEdit(self.get_decimal_point)
self.payto_e = PayToEdit(self)
# NB: the translators hopefully will not have too tough a time with this
# *fingers crossed* :)
msg = "<span style=\"font-weight:400;\">" + _('Recipient of the funds.') + " " + \
_("You may enter:"
"<ul>"
"<li> Vitae <b>Address</b> <b>★</b>"
"<li> <b>Vitae ID</b> <b>★</b> e.g. <i>satoshi#123</i>"
"<li> <b>Contact name</b> <b>★</b> from the Contacts tab"
"<li> <b>CoinText</b> e.g. <i>cointext:+1234567</i>"
"<li> <b>OpenAlias</b> e.g. <i>[email protected]</i>"
"</ul><br>"
" <b>★</b> = Supports <b>pay-to-many</b>, where"
" you may optionally enter multiple lines of the form:"
"</span><br><pre>"
" recipient1, amount1 \n"
" recipient2, amount2 \n"
" etc..."
"</pre>")
self.payto_label = payto_label = HelpLabel(_('Pay &to'), msg)
payto_label.setBuddy(self.payto_e)
qmark = ":icons/question-mark-dark.svg" if ColorScheme.dark_scheme else ":icons/question-mark-light.svg"
self.payto_e.addButton(icon_name = qmark, on_click = payto_label.show_help,
tooltip = _('Show help'), index = 0)
grid.addWidget(payto_label, 1, 0)
grid.addWidget(self.payto_e, 1, 1, 1, -1)
completer = QCompleter(self.payto_e)
completer.setCaseSensitivity(False)
self.payto_e.setCompleter(completer)
completer.setModel(self.completions)
msg = _('Description of the transaction (not mandatory).') + '\n\n'\
+ _('The description is not sent to the recipient of the funds. It is stored in your wallet file, and displayed in the \'History\' tab.')
description_label = HelpLabel(_('&Description'), msg)
grid.addWidget(description_label, 2, 0)
self.message_e = MyLineEdit()
description_label.setBuddy(self.message_e)
grid.addWidget(self.message_e, 2, 1, 1, -1)
msg_opreturn = ( _('OP_RETURN data (optional).') + '\n\n'
+ _('Posts a PERMANENT note to the VITAE blockchain as part of this transaction.')
+ '\n\n' + _('If you specify OP_RETURN text, you may leave the \'Pay to\' field blank.') )
self.opreturn_label = HelpLabel(_('&OP_RETURN'), msg_opreturn)
grid.addWidget(self.opreturn_label, 3, 0)
self.message_opreturn_e = MyLineEdit()
self.opreturn_label.setBuddy(self.message_opreturn_e)
hbox = QHBoxLayout()
hbox.addWidget(self.message_opreturn_e)
self.opreturn_rawhex_cb = QCheckBox(_('&Raw hex script'))
self.opreturn_rawhex_cb.setToolTip(_('If unchecked, the textbox contents are UTF8-encoded into a single-push script: <tt>OP_RETURN PUSH <text></tt>. If checked, the text contents will be interpreted as a raw hexadecimal script to be appended after the OP_RETURN opcode: <tt>OP_RETURN <script></tt>.'))
hbox.addWidget(self.opreturn_rawhex_cb)
grid.addLayout(hbox, 3 , 1, 1, -1)
self.send_tab_opreturn_widgets = [
self.message_opreturn_e,
self.opreturn_rawhex_cb,
self.opreturn_label,
]
self.from_label = QLabel(_('&From'))
grid.addWidget(self.from_label, 4, 0)
self.from_list = MyTreeWidget(self, self.from_list_menu, ['',''])
self.from_label.setBuddy(self.from_list)
self.from_list.setHeaderHidden(True)
self.from_list.setMaximumHeight(80)
grid.addWidget(self.from_list, 4, 1, 1, -1)
self.set_pay_from([])
msg = _('Amount to be sent.') + '\n\n' \
+ _('The amount will be displayed in red if you do not have enough funds in your wallet.') + ' ' \
+ _('Note that if you have frozen some of your addresses, the available funds will be lower than your total balance.') + '\n\n' \
+ _('Keyboard shortcut: type "!" to send all your coins.')
amount_label = HelpLabel(_('&Amount'), msg)
amount_label.setBuddy(self.amount_e)
grid.addWidget(amount_label, 5, 0)
grid.addWidget(self.amount_e, 5, 1)
self.fiat_send_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_send_e.setVisible(False)
grid.addWidget(self.fiat_send_e, 5, 2)
self.amount_e.frozen.connect(
lambda: self.fiat_send_e.setFrozen(self.amount_e.isReadOnly()))
self.max_button = EnterButton(_("&Max"), self.spend_max)
self.max_button.setFixedWidth(140)
self.max_button.setCheckable(True)
grid.addWidget(self.max_button, 5, 3)
hbox = QHBoxLayout()
hbox.addStretch(1)
grid.addLayout(hbox, 5, 4)
msg = _('Vitae transactions are in general not free. A transaction fee is paid by the sender of the funds.') + '\n\n'\
+ _('The amount of fee can be decided freely by the sender. However, transactions with low fees take more time to be processed.') + '\n\n'\
+ _('A suggested fee is automatically added to this field. You may override it. The suggested fee increases with the size of the transaction.')
self.fee_e_label = HelpLabel(_('F&ee'), msg)
def fee_cb(dyn, pos, fee_rate):
if dyn:
self.config.set_key('fee_level', pos, False)
else:
self.config.set_key('fee_per_kb', fee_rate, False)
self.spend_max() if self.max_button.isChecked() else self.update_fee()
self.fee_slider = FeeSlider(self, self.config, fee_cb)
self.fee_e_label.setBuddy(self.fee_slider)
self.fee_slider.setFixedWidth(140)
self.fee_custom_lbl = HelpLabel(self.get_custom_fee_text(),
_('This is the fee rate that will be used for this transaction.')
+ "\n\n" + _('It is calculated from the Custom Fee Rate in preferences, but can be overridden from the manual fee edit on this form (if enabled).')
+ "\n\n" + _('Generally, a fee of 1.0 sats/B is a good minimal rate to ensure your transaction will make it into the next block.'))
self.fee_custom_lbl.setFixedWidth(140)
self.fee_slider_mogrifier()
self.fee_e = BTCAmountEdit(self.get_decimal_point)
if not self.config.get('show_fee', False):
self.fee_e.setVisible(False)
self.fee_e.textEdited.connect(self.update_fee)
# This is so that when the user blanks the fee and moves on,
# we go back to auto-calculate mode and put a fee back.
self.fee_e.editingFinished.connect(self.update_fee)
self.connect_fields(self, self.amount_e, self.fiat_send_e, self.fee_e)
grid.addWidget(self.fee_e_label, 6, 0)
grid.addWidget(self.fee_slider, 6, 1)
grid.addWidget(self.fee_custom_lbl, 6, 1)
grid.addWidget(self.fee_e, 6, 2)
self.preview_button = EnterButton(_("&Preview"), self.do_preview)
self.preview_button.setToolTip(_('Display the details of your transactions before signing it.'))
self.send_button = EnterButton(_("&Send"), self.do_send)
self.cointext_button = EnterButton(_("Coin&Text"), self.do_cointext)
self.cointext_button.setToolTip(_('Process CoinText, transforming it into a BIP70 payment request.'))
self.clear_button = EnterButton(_("&Clear"), self.do_clear)
buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.clear_button)
buttons.addWidget(self.preview_button)
buttons.addWidget(self.send_button)
buttons.addWidget(self.cointext_button)
grid.addLayout(buttons, 7, 1, 1, 3)
self.payto_e.textChanged.connect(self.update_buttons_on_seed) # hide/unhide cointext button, etc
self.amount_e.shortcut.connect(self.spend_max)
self.payto_e.textChanged.connect(self.update_fee)
self.amount_e.textEdited.connect(self.update_fee)
self.message_opreturn_e.textEdited.connect(self.update_fee)
self.message_opreturn_e.textChanged.connect(self.update_fee)
self.message_opreturn_e.editingFinished.connect(self.update_fee)
self.opreturn_rawhex_cb.stateChanged.connect(self.update_fee)
def reset_max(text):
self.max_button.setChecked(False)
enabled = not bool(text) and not self.amount_e.isReadOnly()
self.max_button.setEnabled(enabled)
self.amount_e.textEdited.connect(reset_max)
self.fiat_send_e.textEdited.connect(reset_max)
def entry_changed():
text = ""
if self.not_enough_funds:
amt_color, fee_color = ColorScheme.RED, ColorScheme.RED
text = _( "Not enough funds" )
c, u, x = self.wallet.get_frozen_balance()
if c+u+x:
text += ' (' + self.format_amount(c+u+x).strip() + ' ' + self.base_unit() + ' ' +_("are frozen") + ')'
extra = run_hook("not_enough_funds_extra", self)
if isinstance(extra, str) and extra:
text += " ({})".format(extra)
elif self.fee_e.isModified():
amt_color, fee_color = ColorScheme.DEFAULT, ColorScheme.DEFAULT
elif self.amount_e.isModified():
amt_color, fee_color = ColorScheme.DEFAULT, ColorScheme.BLUE
else:
amt_color, fee_color = ColorScheme.BLUE, ColorScheme.BLUE
opret_color = ColorScheme.DEFAULT
if self.op_return_toolong:
opret_color = ColorScheme.RED
text = _("OP_RETURN message too large, needs to be no longer than 220 bytes") + (", " if text else "") + text
self.statusBar().showMessage(text)
self.amount_e.setStyleSheet(amt_color.as_stylesheet())
self.fee_e.setStyleSheet(fee_color.as_stylesheet())
self.message_opreturn_e.setStyleSheet(opret_color.as_stylesheet())
self.amount_e.textChanged.connect(entry_changed)
self.fee_e.textChanged.connect(entry_changed)
self.message_opreturn_e.textChanged.connect(entry_changed)
self.message_opreturn_e.textEdited.connect(entry_changed)
self.message_opreturn_e.editingFinished.connect(entry_changed)
self.opreturn_rawhex_cb.stateChanged.connect(entry_changed)
self.invoices_label = QLabel(_('Invoices'))
from .invoice_list import InvoiceList
self.invoice_list = InvoiceList(self)
self.invoice_list.chkVisible()
vbox0 = QVBoxLayout()
vbox0.addLayout(grid)
hbox = QHBoxLayout()
hbox.addLayout(vbox0)
w = QWidget()
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.invoices_label)
vbox.addWidget(self.invoice_list)
vbox.setStretchFactor(self.invoice_list, 1000)
w.searchable_list = self.invoice_list
run_hook('create_send_tab', grid)
return w
def spend_max(self):
self.max_button.setChecked(True)
self.do_update_fee()
def update_fee(self):
self.require_fee_update = True
def get_payto_or_dummy(self):
r = self.payto_e.get_recipient()
if r:
return r
return (TYPE_ADDRESS, self.wallet.dummy_address())
def get_custom_fee_text(self, fee_rate = None):
if not self.config.has_custom_fee_rate():
return ""
else:
if fee_rate is None: fee_rate = self.config.custom_fee_rate() / 1000.0
return str(round(fee_rate*100)/100) + " sats/B"
@staticmethod
def output_for_opreturn_stringdata(op_return):
if not isinstance(op_return, str):
raise OPReturnError('OP_RETURN parameter needs to be of type str!')
op_return_code = "OP_RETURN "
op_return_encoded = op_return.encode('utf-8')
if len(op_return_encoded) > 220:
raise OPReturnTooLarge(_("OP_RETURN message too large, needs to be no longer than 220 bytes"))
op_return_payload = op_return_encoded.hex()
script = op_return_code + op_return_payload
amount = 0
return (TYPE_SCRIPT, ScriptOutput.from_string(script), amount)
@staticmethod
def output_for_opreturn_rawhex(op_return):
if not isinstance(op_return, str):
raise OPReturnError('OP_RETURN parameter needs to be of type str!')
if op_return == 'empty':
op_return = ''
try:
op_return_script = b'\x6a' + bytes.fromhex(op_return.strip())
except ValueError:
raise OPReturnError(_('OP_RETURN script expected to be hexadecimal bytes'))
if len(op_return_script) > 223:
raise OPReturnTooLarge(_("OP_RETURN script too large, needs to be no longer than 223 bytes"))
amount = 0
return (TYPE_SCRIPT, ScriptOutput.protocol_factory(op_return_script), amount)
def do_update_fee(self):
'''Recalculate the fee. If the fee was manually input, retain it, but
still build the TX to see if there are enough funds.
'''
freeze_fee = (self.fee_e.isModified()
and (self.fee_e.text() or self.fee_e.hasFocus()))
amount = '!' if self.max_button.isChecked() else self.amount_e.get_amount()
fee_rate = None
if amount is None:
if not freeze_fee:
self.fee_e.setAmount(None)
self.not_enough_funds = False
self.statusBar().showMessage('')
else:
fee = self.fee_e.get_amount() if freeze_fee else None
outputs = self.payto_e.get_outputs(self.max_button.isChecked())
if not outputs:
_type, addr = self.get_payto_or_dummy()
outputs = [(_type, addr, amount)]
try:
opreturn_message = self.message_opreturn_e.text() if self.config.get('enable_opreturn') else None
if opreturn_message:
if self.opreturn_rawhex_cb.isChecked():
outputs.append(self.output_for_opreturn_rawhex(opreturn_message))
else:
outputs.append(self.output_for_opreturn_stringdata(opreturn_message))
tx = self.wallet.make_unsigned_transaction(self.get_coins(), outputs, self.config, fee)
self.not_enough_funds = False
self.op_return_toolong = False
except NotEnoughFunds:
self.not_enough_funds = True
if not freeze_fee:
self.fee_e.setAmount(None)
return
except OPReturnTooLarge:
self.op_return_toolong = True
return
except OPReturnError as e:
self.statusBar().showMessage(str(e))
return
except BaseException:
return
if not freeze_fee:
fee = None if self.not_enough_funds else tx.get_fee()
self.fee_e.setAmount(fee)
if self.max_button.isChecked():
amount = tx.output_value()
self.amount_e.setAmount(amount)
if fee is not None:
fee_rate = fee / tx.estimated_size()
self.fee_slider_mogrifier(self.get_custom_fee_text(fee_rate))
def fee_slider_mogrifier(self, text = None):
fee_slider_hidden = self.config.has_custom_fee_rate()
self.fee_slider.setHidden(fee_slider_hidden)
self.fee_custom_lbl.setHidden(not fee_slider_hidden)
if text is not None: self.fee_custom_lbl.setText(text)
def from_list_delete(self, name):
item = self.from_list.currentItem()
if (item and item.data(0, Qt.UserRole) == name
and not item.data(0, Qt.UserRole+1) ):
i = self.from_list.indexOfTopLevelItem(item)
try:
self.pay_from.pop(i)
except IndexError:
# The list may contain items not in the pay_from if added by a
# plugin using the spendable_coin_filter hook
pass
self.redraw_from_list()
self.update_fee()
def from_list_menu(self, position):
item = self.from_list.itemAt(position)
if not item:
return
menu = QMenu()
name = item.data(0, Qt.UserRole)
action = menu.addAction(_("Remove"), lambda: self.from_list_delete(name))
if item.data(0, Qt.UserRole+1):
action.setText(_("Not Removable"))
action.setDisabled(True)
menu.exec_(self.from_list.viewport().mapToGlobal(position))
def set_pay_from(self, coins):
self.pay_from = list(coins)
self.redraw_from_list()
def redraw_from_list(self, *, spendable=None):
''' Optional kwarg spendable indicates *which* of the UTXOs in the
self.pay_from list are actually spendable. If this arg is specifid,
coins in the self.pay_from list that aren't also in the 'spendable' list
will be grayed out in the UI, to indicate that they will not be used.
Otherwise all coins will be non-gray (default).
(Added for CashShuffle 02/23/2019) '''
sel = self.from_list.currentItem() and self.from_list.currentItem().data(0, Qt.UserRole)
self.from_list.clear()
self.from_label.setHidden(len(self.pay_from) == 0)
self.from_list.setHidden(len(self.pay_from) == 0)
def name(x):
return "{}:{}".format(x['prevout_hash'], x['prevout_n'])
def format(x):
h = x['prevout_hash']
return '{}...{}:{:d}\t{}'.format(h[0:10], h[-10:],
x['prevout_n'], x['address'])
def grayify(twi):
b = twi.foreground(0)
b.setColor(Qt.gray)
for i in range(twi.columnCount()):
twi.setForeground(i, b)
def new(item, is_unremovable=False):
ret = QTreeWidgetItem( [format(item), self.format_amount(item['value']) ])
ret.setData(0, Qt.UserRole, name(item))
ret.setData(0, Qt.UserRole+1, is_unremovable)
return ret
for item in self.pay_from:
twi = new(item)
if spendable is not None and item not in spendable:
grayify(twi)
self.from_list.addTopLevelItem(twi)
if name(item) == sel:
self.from_list.setCurrentItem(twi)
if spendable is not None: # spendable may be None if no plugin filtered coins.
for item in spendable:
# append items added by the plugin to the spendable list
# at the bottom. These coins are marked as "not removable"
# in the UI (the plugin basically insisted these coins must
# be spent with the other coins in the list for privacy).
if item not in self.pay_from:
twi = new(item, True)
self.from_list.addTopLevelItem(twi)
if name(item) == sel:
self.from_list.setCurrentItem(twi)
def get_contact_payto(self, contact : Contact) -> str:
assert isinstance(contact, Contact)
_type, label = contact.type, contact.name
emoji_str = ''
mod_type = _type
mine_str = ''
if _type.startswith('cashacct'): # picks up cashacct and the cashacct_W pseudo-contacts
if _type == 'cashacct_T':
# temporary "pending verification" registration pseudo-contact. Never offer it as a completion!
return None
mod_type = 'cashacct'
info = self.wallet.cashacct.get_verified(label)
if info:
emoji_str = f' {info.emoji}'
if _type == 'cashacct_W':
mine_str = ' [' + _('Mine') + '] '
else:
self.print_error(label, "not found")
# could not get verified contact, don't offer it as a completion
return None
elif _type == 'openalias':
return contact.address
return label + emoji_str + ' ' + mine_str + '<' + contact.address + '>' if mod_type in ('address', 'cashacct') else None
def update_completions(self):
l = []
for contact in self.contact_list.get_full_contacts(include_pseudo=True):
s = self.get_contact_payto(contact)
if s is not None: l.append(s)
l.sort(key=lambda x: x.lower()) # case-insensitive sort
self.completions.setStringList(l)
def protected(func):
'''Password request wrapper. The password is passed to the function
as the 'password' named argument. "None" indicates either an
unencrypted wallet, or the user cancelled the password request.
An empty input is passed as the empty string.'''
def request_password(self, *args, **kwargs):
parent = self.top_level_window()
password = None
on_pw_cancel = kwargs.pop('on_pw_cancel', None)
while self.wallet.has_password():
password = self.password_dialog(parent=parent)
if password is None:
# User cancelled password input
if callable(on_pw_cancel):
on_pw_cancel()
return
try:
self.wallet.check_password(password)
break
except Exception as e:
self.show_error(str(e), parent=parent)
continue
kwargs['password'] = password
return func(self, *args, **kwargs)
return request_password
def read_send_tab(self):
isInvoice= False;
if self.payment_request and self.payment_request.has_expired():
self.show_error(_('Payment request has expired'))
return
label = self.message_e.text()
if self.payment_request:
isInvoice = True;
outputs = self.payment_request.get_outputs()
else:
errors = self.payto_e.get_errors()
if errors:
self.show_warning(_("Invalid lines found:") + "\n\n" + '\n'.join([ _("Line #") + str(x[0]+1) + ": " + x[1] for x in errors]))
return
outputs = self.payto_e.get_outputs(self.max_button.isChecked())
if self.payto_e.is_alias and not self.payto_e.validated:
alias = self.payto_e.toPlainText()
msg = _('WARNING: the alias "{}" could not be validated via an additional '
'security check, DNSSEC, and thus may not be correct.').format(alias) + '\n'
msg += _('Do you wish to continue?')
if not self.question(msg):
return
try:
# handle op_return if specified and enabled
opreturn_message = self.message_opreturn_e.text()
if opreturn_message:
if self.opreturn_rawhex_cb.isChecked():
outputs.append(self.output_for_opreturn_rawhex(opreturn_message))
else:
outputs.append(self.output_for_opreturn_stringdata(opreturn_message))
except OPReturnTooLarge as e:
self.show_error(str(e))
return
except OPReturnError as e:
self.show_error(str(e))
return
if not outputs:
self.show_error(_('No outputs'))
return
for _type, addr, amount in outputs:
if amount is None:
self.show_error(_('Invalid Amount'))
return
freeze_fee = self.fee_e.isVisible() and self.fee_e.isModified() and (self.fee_e.text() or self.fee_e.hasFocus())
fee = self.fee_e.get_amount() if freeze_fee else None
coins = self.get_coins(isInvoice)
return outputs, fee, label, coins
_cointext_popup_kill_tab_changed_connection = None
def do_cointext(self):
''' This is called by the cointext button 'clicked' signal and it
initiates the processing of the cointext URL. This should only be
called if self.payto_e.cointext is not None, otherwise it will do
nothing. '''
if self.payto_e.cointext and not self.payment_request:
if self.gui_object.warn_if_no_network(self):
return
phone = self.payto_e.cointext
sats = self.amount_e.get_amount()
if sats:
url = "https://pay.cointext.io/p/{}/{}".format(phone, sats)
def get_cointext_pr():
# Runs in thread
self.print_error("CoinText URL", url)
pr = paymentrequest.get_payment_request(url) # raises on error
return pr
def on_success(pr):
# Runs in main thread
if pr:
if pr.error:
self.print_error("CoinText ERROR", pr.error)
self.show_error(_("There was an error processing the CoinText. Please check the phone number and try again."))
return
self.print_error("CoinText RESULT", repr(pr))
self.prepare_for_payment_request()
def show_popup():
if not self.send_button.isVisible():
# likely a watching-only wallet, in which case
# showing the popup label for the send button
# leads to unspecified position for the button
return
show_it = partial(
ShowPopupLabel,
text=_("Please review payment before sending CoinText"),
target=self.send_button, timeout=15000.0,
name="CoinTextPopup",
pointer_position=PopupWidget.LeftSide,
activation_hides=True, track_target=True,
dark_mode = ColorScheme.dark_scheme
)
if not self._cointext_popup_kill_tab_changed_connection:
# this ensures that if user changes tabs, the popup dies
# ... it is only connected once per instance lifetime
self._cointext_popup_kill_tab_changed_connection = self.tabs.currentChanged.connect(lambda: KillPopupLabel("CoinTextPopup"))
QTimer.singleShot(0, show_it)
pr.request_ok_callback = show_popup
self.on_pr(pr)
def on_error(exc):
self.print_error("CoinText EXCEPTION", repr(exc))
self.on_error(exc)
WaitingDialog(self.top_level_window(),
_("Retrieving CoinText info, please wait ..."),
get_cointext_pr, on_success, on_error)
else:
self.show_error(_('CoinText: Please specify an amount'))
def do_preview(self):
self.do_send(preview = True)
def do_send(self, preview = False):
if run_hook('abort_send', self):
return
r = self.read_send_tab()
if not r:
return
outputs, fee, tx_desc, coins = r
try:
tx = self.wallet.make_unsigned_transaction(coins, outputs, self.config, fee)
except NotEnoughFunds:
self.show_message(_("Insufficient funds"))
return
except ExcessiveFee:
self.show_message(_("Your fee is too high. Max is 500 sat/byte."))
return
except BaseException as e:
traceback.print_exc(file=sys.stdout)
self.show_message(str(e))
return
amount = tx.output_value() if self.max_button.isChecked() else sum(map(lambda x:x[2], outputs))
fee = tx.get_fee()
if (fee < MIN_AMOUNT): fee = MIN_AMOUNT
#if fee < self.wallet.relayfee() * tx.estimated_size() / 1000 and tx.requires_fee(self.wallet):
#self.show_error(_("This transaction requires a higher fee, or it will not be propagated by the network"))
#return
if preview:
self.show_transaction(tx, tx_desc)
return
# confirmation dialog
msg = [
_("Amount to be sent") + ": " + self.format_amount_and_units(amount),
_("Mining fee") + ": " + self.format_amount_and_units(fee),
]
x_fee = run_hook('get_tx_extra_fee', self.wallet, tx)
if x_fee:
x_fee_address, x_fee_amount = x_fee
msg.append( _("Additional fees") + ": " + self.format_amount_and_units(x_fee_amount) )
confirm_rate = 2 * self.config.max_fee_rate()
# IN THE FUTURE IF WE WANT TO APPEND SOMETHING IN THE MSG ABOUT THE FEE, CODE IS COMMENTED OUT:
#if fee > confirm_rate * tx.estimated_size() / 1000:
# msg.append(_('Warning') + ': ' + _("The fee for this transaction seems unusually high."))
if (fee < (tx.estimated_size())):
msg.append(_('Warning') + ': ' + _("You're using a fee of less than 1.0 sats/B. It may take a very long time to confirm."))
tx.ephemeral['warned_low_fee_already'] = True
if self.config.get('enable_opreturn') and self.message_opreturn_e.text():
msg.append(_("You are using an OP_RETURN message. This gets permanently written to the blockchain."))
if self.wallet.has_password():
msg.append("")
msg.append(_("Enter your password to proceed"))
password = self.password_dialog('\n'.join(msg))
if not password:
return
else:
msg.append(_('Proceed?'))
password = None
if not self.question('\n\n'.join(msg)):
return
def sign_done(success):
if success:
if not tx.is_complete():
self.show_transaction(tx, tx_desc)
self.do_clear()
else:
self.broadcast_transaction(tx, tx_desc)
self.sign_tx_with_password(tx, sign_done, password)
@protected
def sign_tx(self, tx, callback, password):
self.sign_tx_with_password(tx, callback, password)
def sign_tx_with_password(self, tx, callback, password):
'''Sign the transaction in a separate thread. When done, calls
the callback with a success code of True or False.
'''
# call hook to see if plugin needs gui interaction
run_hook('sign_tx', self, tx)
def on_signed(result):
callback(True)
def on_failed(exc_info):
self.on_error(exc_info)
callback(False)
if self.tx_external_keypairs:
task = partial(Transaction.sign, tx, self.tx_external_keypairs)
else:
task = partial(self.wallet.sign_transaction, tx, password)
WaitingDialog(self, _('Signing transaction...'), task,
on_signed, on_failed)
def broadcast_transaction(self, tx, tx_desc):
def broadcast_thread():
# non-GUI thread
status = False
msg = "Failed"
pr = self.payment_request
if pr and pr.has_expired():
self.payment_request = None
return False, _("Payment request has expired")
if pr:
refund_address = self.wallet.get_receiving_addresses()[0]
ack_status, ack_msg = pr.send_payment(str(tx), str(refund_address))
msg = ack_msg
if ack_status:
self.invoices.set_paid(pr, tx.txid())
self.invoices.save()
self.payment_request = None
status = True
else:
status, msg = self.network.broadcast_transaction(tx)
return status, msg
# Check fee and warn if it's below 1.0 sats/B (and not warned already)
fee = None
try: fee = tx.get_fee()
except: pass # no fee info available for tx
# Check fee >= size otherwise warn. FIXME: If someday network relay
# rules change to be other than 1.0 sats/B minimum, this code needs
# to be changed.
if (isinstance(fee, int) and tx.is_complete() and fee < len(str(tx))//2
and not tx.ephemeral.get('warned_low_fee_already')):
msg = _('Warning') + ': ' + _("You're using a fee of less than 1.0 sats/B. It may take a very long time to confirm.") + "\n\n" + _("Proceed?")
if not self.question(msg, title = _("Low Fee")):
return
# /end fee check
# Capture current TL window; override might be removed on return
parent = self.top_level_window()
if self.gui_object.warn_if_no_network(self):
# Don't allow a useless broadcast when in offline mode. Previous to this we were getting an exception on broadcast.
return
elif not self.network.is_connected():
# Don't allow a potentially very slow broadcast when obviously not connected.
parent.show_error(_("Not connected"))
return
def broadcast_done(result):
# GUI thread
if result:
status, msg = result
if status:
buttons, copy_index, copy_link = [ _('Ok') ], None, ''
try: txid = tx.txid() # returns None if not is_complete, but may raise potentially as well
except: txid = None
if txid is not None:
if tx_desc is not None:
self.wallet.set_label(txid, tx_desc)
copy_link = web.BE_URL(self.config, 'tx', txid)
if copy_link:
# tx is complete and there is a copy_link
buttons.insert(0, _("Copy link"))
copy_index = 0
if parent.show_message(_('Payment sent.') + '\n' + msg,
buttons = buttons,
defaultButton = buttons[-1],
escapeButton = buttons[-1]) == copy_index:
# There WAS a 'Copy link' and they clicked it
self.copy_to_clipboard(copy_link, _("Block explorer link copied to clipboard"), self.top_level_window())
self.invoice_list.update()
self.do_clear()
else:
if msg.startswith("error: "):
msg = msg.split(" ", 1)[-1] # take the last part, sans the "error: " prefix
parent.show_error(msg)
WaitingDialog(self, _('Broadcasting transaction...'),
broadcast_thread, broadcast_done, self.on_error)
def query_choice(self, msg, choices):
# Needed by QtHandler for hardware wallets
dialog = WindowModalDialog(self.top_level_window())
clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout(dialog)
vbox.addLayout(clayout.layout())
vbox.addLayout(Buttons(OkButton(dialog)))
result = dialog.exec_()
dialog.setParent(None)
if not result:
return None
return clayout.selected_index()
def lock_amount(self, b):
self.amount_e.setFrozen(b)
self.max_button.setEnabled(not b)
def prepare_for_payment_request(self):
self.show_send_tab()
self.payto_e.cointext = None
self.payto_e.is_pr = True
for e in [self.payto_e, self.amount_e, self.message_e]:
e.setFrozen(True)
self.max_button.setDisabled(True)
self.payto_e.setText(_("please wait..."))
return True
def delete_invoice(self, key):
self.invoices.remove(key)
self.invoice_list.update()
def payment_request_ok(self):
pr = self.payment_request
key = self.invoices.add(pr)
status = self.invoices.get_status(key)
self.invoice_list.update()
if status == PR_PAID:
self.show_message("invoice already paid")
self.do_clear()
self.payment_request = None
return
self.payto_e.is_pr = True
if not pr.has_expired():
self.payto_e.setGreen()
else:
self.payto_e.setExpired()
self.payto_e.setText(pr.get_requestor())
self.amount_e.setText(format_satoshis_plain(pr.get_amount(), self.decimal_point))
self.message_e.setText(pr.get_memo())
# signal to set fee
self.amount_e.textEdited.emit("")
# New! Payment requests have an optional (may not be there!) attribute
# 'request_ok_callback' which takes 0 args and is called on request ok
# This facility was needed to do the CoinTextPopup label properly.
cb = getattr(self.payment_request, 'request_ok_callback', None)
if callable(cb):
cb()
def payment_request_error(self):
request_error = self.payment_request and self.payment_request.error
self.payment_request = None
self.print_error("PaymentRequest error:", request_error)
self.show_error(_("There was an error processing the payment request"), rich_text=False)
self.do_clear()
def on_pr(self, request):
self.payment_request = request
if self.payment_request.verify(self.contacts):
self.payment_request_ok_signal.emit()
else:
self.payment_request_error_signal.emit()
def pay_to_URI(self, URI):
if not URI:
return
try:
out = web.parse_URI(URI, self.on_pr)
except Exception as e:
self.show_error(_('Invalid vitae URI:') + '\n' + str(e))
return
self.show_send_tab()
r = out.get('r')
sig = out.get('sig')
name = out.get('name')
if r or (name and sig):
self.prepare_for_payment_request()
return
address = out.get('address')
amount = out.get('amount')
label = out.get('label')
message = out.get('message')
op_return = out.get('op_return')
op_return_raw = out.get('op_return_raw')
# use label as description (not BIP21 compliant)
if label and not message:
message = label
if address:
self.payto_e.setText(address)
if message:
self.message_e.setText(message)
if amount:
self.amount_e.setAmount(amount)
self.amount_e.textEdited.emit("")
if op_return:
self.message_opreturn_e.setText(op_return)
self.message_opreturn_e.setHidden(False)
self.opreturn_rawhex_cb.setHidden(False)
self.opreturn_rawhex_cb.setChecked(False)
self.opreturn_label.setHidden(False)
elif op_return_raw is not None:
# 'is not None' allows blank value.
# op_return_raw is secondary precedence to op_return
if not op_return_raw:
op_return_raw='empty'
self.message_opreturn_e.setText(op_return_raw)
self.message_opreturn_e.setHidden(False)
self.opreturn_rawhex_cb.setHidden(False)
self.opreturn_rawhex_cb.setChecked(True)
self.opreturn_label.setHidden(False)
elif not self.config.get('enable_opreturn'):
self.message_opreturn_e.setText('')
self.message_opreturn_e.setHidden(True)
self.opreturn_rawhex_cb.setHidden(True)
self.opreturn_label.setHidden(True)
def do_clear(self):
''' Clears the send tab, reseting its UI state to its initiatial state.'''
KillPopupLabel("CoinTextPopup") # just in case it was alive
self.max_button.setChecked(False)
self.not_enough_funds = False
self.op_return_toolong = False
self.payment_request = None
self.payto_e.cointext = None
self.payto_e.is_pr = False
self.payto_e.is_alias, self.payto_e.validated = False, False # clear flags to avoid bad things
for e in [self.payto_e, self.message_e, self.amount_e, self.fiat_send_e, self.fee_e, self.message_opreturn_e]:
e.setText('')
e.setFrozen(False)
self.payto_e.setHidden(False)
self.payto_label.setHidden(False)
self.max_button.setDisabled(False)
self.opreturn_rawhex_cb.setChecked(False)
self.opreturn_rawhex_cb.setDisabled(False)
self.set_pay_from([])
self.tx_external_keypairs = {}
self.message_opreturn_e.setVisible(self.config.get('enable_opreturn', False))
self.opreturn_rawhex_cb.setVisible(self.config.get('enable_opreturn', False))
self.opreturn_label.setVisible(self.config.get('enable_opreturn', False))
self.update_status()
run_hook('do_clear', self)
def set_frozen_state(self, addrs, freeze):
self.wallet.set_frozen_state(addrs, freeze)
self.address_list.update()
self.utxo_list.update()
self.update_fee()
def set_frozen_coin_state(self, utxos, freeze):
self.wallet.set_frozen_coin_state(utxos, freeze)
self.utxo_list.update()
self.update_fee()
def create_list_tab(self, l, list_header=None):
w = QWidget()
w.searchable_list = l
vbox = QVBoxLayout()
w.setLayout(vbox)
vbox.setContentsMargins(0, 0, 0, 0)
vbox.setSpacing(0)
if list_header:
hbox = QHBoxLayout()
for b in list_header:
hbox.addWidget(b)
hbox.addStretch()
vbox.addLayout(hbox)
vbox.addWidget(l)
return w
def create_addresses_tab(self):
from .address_list import AddressList
self.address_list = l = AddressList(self)
self.gui_object.cashaddr_toggled_signal.connect(l.update)
return self.create_list_tab(l)
def create_utxo_tab(self):
from .utxo_list import UTXOList
self.utxo_list = l = UTXOList(self)
self.gui_object.cashaddr_toggled_signal.connect(l.update)
return self.create_list_tab(l)
def create_contacts_tab(self):
from .contact_list import ContactList
self.contact_list = l = ContactList(self)
self.gui_object.cashaddr_toggled_signal.connect(l.update)
return self.create_list_tab(l)
def remove_address(self, addr):
if self.question(_("Do you want to remove {} from your wallet?"
.format(addr.to_ui_string()))):
self.wallet.delete_address(addr)
self.update_tabs()
self.update_status()
self.clear_receive_tab()
def get_coins(self, isInvoice = False):
coins = []
if self.pay_from:
coins = self.pay_from.copy()
else:
coins = self.wallet.get_spendable_coins(None, self.config, isInvoice)
run_hook("spendable_coin_filter", self, coins) # may modify coins -- used by CashShuffle if in shuffle = ENABLED mode.
if self.pay_from:
# coins may have been filtered, so indicate this in the UI
self.redraw_from_list(spendable=coins)
return coins
def spend_coins(self, coins):
self.set_pay_from(coins)
self.show_send_tab()
run_hook('on_spend_coins', self, coins) # CashShuffle: will set the mode of send tab to coins[0]'s shuffled/unshuffled state
self.update_fee()
def paytomany(self):
self.show_send_tab()
self.do_clear()
self.payto_e.paytomany()
msg = '\n'.join([
_('Enter a list of outputs in the \'Pay to\' field.'),
_('One output per line.'),
_('Format: address, amount'),
_('You may load a CSV file using the file icon.')
])
self.show_message(msg, title=_('Pay to many'))
def payto_contacts(self, contacts : List[Contact]):
paytos = []
for contact in contacts:
s = self.get_contact_payto(contact)
if s is not None: paytos.append(s)
self.payto_payees(paytos)
def payto_payees(self, payees : List[str]):
''' Like payto_contacts except it accepts a list of free-form strings
rather than requiring a list of Contacts objects '''
self.show_send_tab()
if len(payees) == 1:
self.payto_e.setText(payees[0])
self.amount_e.setFocus()
else:
text = "\n".join([payee + ", 0" for payee in payees])
self.payto_e.setText(text)
self.payto_e.setFocus()
def resolve_cashacct(self, name):
''' Throws up a WaitingDialog while it resolves a Vitae ID.
Goes out to network, verifies all tx's.
Returns: a tuple of: (Info, Minimally_Encoded_Formatted_AccountName)
Argument `name` should be a Vitae ID name string of the form:
name#number.123
name#number
name#number.; etc
If the result would be ambigious, that is considered an error, so enough
of the account name#number.collision_hash needs to be specified to
unambiguously resolve the Vitae ID.
On failure throws up an error window and returns None.'''
return cashacctqt.resolve_cashacct(self, name)
def set_contact(self, label, address, typ='address', replace=None) -> Contact:
''' Returns a reference to the newly inserted Contact object.
replace is optional and if specified, replace an existing contact,
otherwise add a new one.
Note that duplicate contacts will not be added multiple times, but in
that case the returned value would still be a valid Contact.
Returns None on failure.'''
assert typ in ('address', 'cashacct')
contact = None
if typ == 'cashacct':
tup = self.resolve_cashacct(label) # this displays an error message for us
if not tup:
self.contact_list.update() # Displays original
return
info, label = tup
address = info.address.to_ui_string()
contact = Contact(name=label, address=address, type=typ)
elif not Address.is_valid(address):
# Bad 'address' code path
self.show_error(_('Invalid Address'))
self.contact_list.update() # Displays original unchanged value
return
else:
# Good 'address' code path...
contact = Contact(name=label, address=address, type=typ)
assert contact
if replace != contact:
if self.contacts.has(contact):
self.show_error(_(f"A contact named {contact.name} with the same address and type already exists."))
self.contact_list.update()
return replace or contact
self.contacts.add(contact, replace_old=replace, unique=True)
self.contact_list.update()
self.history_list.update()
self.history_updated_signal.emit() # inform things like address_dialog that there's a new history
self.update_completions()
# The contact has changed, update any addresses that are displayed with the old information.
run_hook('update_contact2', contact, replace)
return contact
def delete_contacts(self, contacts):
n = len(contacts)
qtext = ''
if n <= 3:
def fmt(contact):
if len(contact.address) > 20:
addy = contact.address[:10] + '…' + contact.address[-10:]
else:
addy = contact.address
return f"{contact.name} <{addy}>"
names = [fmt(contact) for contact in contacts]
contact_str = ", ".join(names)
qtext = _("Remove {list_of_contacts} from your contact list?").format(list_of_contacts = contact_str)
else:
# Note: we didn't use ngettext here for plural check because n > 1 in this branch
qtext = _("Remove {number_of_contacts} contacts from your contact list?").format(number_of_contacts=n)
if not self.question(qtext):
return
removed_entries = []
for contact in contacts:
if self.contacts.remove(contact):
removed_entries.append(contact)
self.history_list.update()
self.history_updated_signal.emit() # inform things like address_dialog that there's a new history
self.contact_list.update()
self.update_completions()
run_hook('delete_contacts2', removed_entries)
def show_invoice(self, key):
pr = self.invoices.get(key)
pr.verify(self.contacts)
self.show_pr_details(pr)
def show_pr_details(self, pr):
key = pr.get_id()
d = WindowModalDialog(self.top_level_window(), _("Invoice"))
vbox = QVBoxLayout(d)
grid = QGridLayout()
grid.addWidget(QLabel(_("Requestor") + ':'), 0, 0)
grid.addWidget(QLabel(pr.get_requestor()), 0, 1)
grid.addWidget(QLabel(_("Amount") + ':'), 1, 0)
outputs_str = '\n'.join(map(lambda x: self.format_amount(x[2])+ self.base_unit() + ' @ ' + x[1].to_ui_string(), pr.get_outputs()))
grid.addWidget(QLabel(outputs_str), 1, 1)
expires = pr.get_expiration_date()
grid.addWidget(QLabel(_("Memo") + ':'), 2, 0)
grid.addWidget(QLabel(pr.get_memo()), 2, 1)
grid.addWidget(QLabel(_("Signature") + ':'), 3, 0)
grid.addWidget(QLabel(pr.get_verify_status()), 3, 1)
if expires:
grid.addWidget(QLabel(_("Expires") + ':'), 4, 0)
grid.addWidget(QLabel(format_time(expires)), 4, 1)
vbox.addLayout(grid)
weakD = Weak.ref(d)
def do_export():
fn = self.getSaveFileName(_("Save invoice to file"), "*.bip70")
if not fn:
return
with open(fn, 'wb') as f:
data = f.write(pr.raw)
self.show_message(_('Invoice saved as' + ' ' + fn))
exportButton = EnterButton(_('Save'), do_export)
def do_delete():
if self.question(_('Delete invoice?')):
self.invoices.remove(key)
self.history_list.update()
self.history_updated_signal.emit() # inform things like address_dialog that there's a new history
self.invoice_list.update()
d = weakD()
if d: d.close()
deleteButton = EnterButton(_('Delete'), do_delete)
vbox.addLayout(Buttons(exportButton, deleteButton, CloseButton(d)))
d.exec_()
d.setParent(None) # So Python can GC
def do_pay_invoice(self, key):
pr = self.invoices.get(key)
self.payment_request = pr
self.prepare_for_payment_request()
pr.error = None # this forces verify() to re-run
if pr.verify(self.contacts):
self.payment_request_ok()
else:
self.payment_request_error()
def create_console_tab(self):
from .console import Console
self.console = console = Console(wallet=self.wallet)
return console
def update_console(self):
console = self.console
console.history = self.config.get("console-history",[])
console.history_index = len(console.history)
console.updateNamespace({'wallet' : self.wallet,
'network' : self.network,
'plugins' : self.gui_object.plugins,
'window': self})
console.updateNamespace({'util' : util, 'vitae':bitcoin})
set_json = Weak(self.console.set_json)
c = commands.Commands(self.config, self.wallet, self.network, lambda: set_json(True))
methods = {}
password_getter = Weak(self.password_dialog)
def mkfunc(f, method):
return lambda *args, **kwargs: f(method, *args, password_getter=password_getter,
**kwargs)
for m in dir(c):
if m[0]=='_' or m in ['network','wallet','config']: continue
methods[m] = mkfunc(c._run, m)
console.updateNamespace(methods)
def create_status_bar(self):
sb = QStatusBar()
sb.setFixedHeight(35)
qtVersion = qVersion()
self.balance_label = QLabel("")
sb.addWidget(self.balance_label)
self._search_box_spacer = QWidget()
self._search_box_spacer.setFixedWidth(6) # 6 px spacer
self.search_box = QLineEdit()
self.search_box.setPlaceholderText(_("Search wallet, {key}+F to hide").format(key='Ctrl' if sys.platform != 'darwin' else '⌘'))
self.search_box.textChanged.connect(self.do_search)
self.search_box.hide()
sb.addPermanentWidget(self.search_box, 1)
self.update_available_button = StatusBarButton(QIcon(":icons/electron-cash-update.svg"), _("Update available, click for details"), lambda: self.gui_object.show_update_checker(self, skip_check=True))
self.update_available_button.setStatusTip(_("A ViLight update is available"))
sb.addPermanentWidget(self.update_available_button)
self.update_available_button.setVisible(bool(self.gui_object.new_version_available)) # if hidden now gets unhidden by on_update_available when a new version comes in
self.lock_icon = QIcon()
self.password_button = StatusBarButton(self.lock_icon, _("Password"), self.change_password_dialog )
sb.addPermanentWidget(self.password_button)
self.cashshuffle_status_button = StatusBarButton(
self.cashshuffle_icon(),
'', # ToolTip will be set in update_cashshuffle code
self.cashshuffle_icon_leftclick
)
self.cashshuffle_toggle_action = QAction("", self.cashshuffle_status_button) # action text will get set in update_cashshuffle_icon()
self.cashshuffle_toggle_action.triggered.connect(self.toggle_cashshuffle)
self.cashshuffle_settings_action = QAction("", self.cashshuffle_status_button)
self.cashshuffle_settings_action.triggered.connect(self.show_cashshuffle_settings)
self.cashshuffle_viewpools_action = QAction(_("View pools..."), self.cashshuffle_status_button)
self.cashshuffle_viewpools_action.triggered.connect(self.show_cashshuffle_pools)
self.cashshuffle_status_button.addAction(self.cashshuffle_viewpools_action)
self.cashshuffle_status_button.addAction(self.cashshuffle_settings_action)
self.cashshuffle_separator_action = sep = QAction(self.cashshuffle_status_button); sep.setSeparator(True)
self.cashshuffle_status_button.addAction(sep)
self.cashshuffle_status_button.addAction(self.cashshuffle_toggle_action)
self.cashshuffle_status_button.setContextMenuPolicy(Qt.ActionsContextMenu)
sb.addPermanentWidget(self.cashshuffle_status_button)
sb.addPermanentWidget(StatusBarButton(QIcon(":icons/preferences.svg"), _("Preferences"), self.settings_dialog ) )
self.seed_button = StatusBarButton(QIcon(":icons/seed.png"), _("Seed"), self.show_seed_dialog )
sb.addPermanentWidget(self.seed_button)
weakSelf = Weak.ref(self)
gui_object = self.gui_object
self.status_button = StatusBarButton(QIcon(":icons/status_disconnected.svg"), _("Network"), lambda: gui_object.show_network_dialog(weakSelf()))
sb.addPermanentWidget(self.status_button)
run_hook('create_status_bar', sb)
self.setStatusBar(sb)
def on_update_available(self, b):
self.update_available_button.setVisible(bool(b))
# The popup label won't really be shown unless this window is
# on top.. but regardless we give each label a unique internal name
# so they dont interfere with each other.
lblName = "UpdateAvailable_" + self.diagnostic_name()
if b:
ShowPopupLabel(name = lblName,
text="<center><b>{}</b><br><small>{}</small></center>".format(_("Update Available"),_("Click for details")),
target=self.update_available_button,
timeout=20000, onClick=self.update_available_button.click,
onRightClick=self.update_available_button.click,
dark_mode = ColorScheme.dark_scheme)
else:
# Immediately kills any extant labels
KillPopupLabel(lblName)
def update_lock_icon(self):
icon = QIcon(":icons/lock.svg") if self.wallet.has_password() else QIcon(":icons/unlock.svg")
tip = _('Wallet Password') + ' - '
tip += _('Enabled') if self.wallet.has_password() else _('Disabled')
self.password_button.setIcon(icon)
self.password_button.setStatusTip(tip)
def update_buttons_on_seed(self):
self.seed_button.setVisible(self.wallet.has_seed())
self.password_button.setVisible(self.wallet.can_change_password())
self.send_button.setVisible(not self.wallet.is_watching_only() and not self.payto_e.cointext)
self.preview_button.setVisible(not self.payto_e.cointext)
self.cointext_button.setVisible(bool(self.payto_e.cointext))
def change_password_dialog(self):
from .password_dialog import ChangePasswordDialog
d = ChangePasswordDialog(self.top_level_window(), self.wallet)
ok, password, new_password, encrypt_file = d.run()
if not ok:
return
try:
self.wallet.update_password(password, new_password, encrypt_file)
run_hook("on_new_password", self, password, new_password)
except BaseException as e:
self.show_error(str(e))
return
except:
traceback.print_exc(file=sys.stdout)
self.show_error(_('Failed to update password'))
return
msg = _('Password was updated successfully') if new_password else _('Password is disabled, this wallet is not protected')
self.show_message(msg, title=_("Success"))
self.update_lock_icon()
def toggle_search(self):
self.search_box.setHidden(not self.search_box.isHidden())
if not self.search_box.isHidden():
self.balance_label.setHidden(True)
self.statusBar().insertWidget(0, self._search_box_spacer)
self._search_box_spacer.show()
self.search_box.setFocus(1)
if self.search_box.text():
self.do_search(self.search_box.text())
else:
self._search_box_spacer.hide()
self.statusBar().removeWidget(self._search_box_spacer)
self.balance_label.setHidden(False)
self.do_search('')
def do_search(self, t):
'''Apply search text to all tabs. FIXME: if a plugin later is loaded
it will not receive the search filter -- but most plugins I know about
do not support searchable_list anyway, so hopefully it's a non-issue.'''
for i in range(self.tabs.count()):
tab = self.tabs.widget(i)
if hasattr(tab, 'searchable_list'):
tab.searchable_list.filter(t)
def new_contact_dialog(self):
d = WindowModalDialog(self.top_level_window(), _("New Contact"))
vbox = QVBoxLayout(d)
vbox.addWidget(QLabel(_('New Contact') + ':'))
grid = QGridLayout()
line1 = QLineEdit()
line1.setFixedWidth(350)
line2 = QLineEdit()
line2.setFixedWidth(350)
grid.addWidget(QLabel(_("Name")), 1, 0)
grid.addWidget(line1, 1, 1)
grid.addWidget(QLabel(_("Address")), 2, 0)
grid.addWidget(line2, 2, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if d.exec_():
name = line1.text().strip()
address = line2.text().strip()
prefix = networks.net.CASHADDR_PREFIX.lower() + ':'
if address.lower().startswith(prefix):
address = address[len(prefix):]
self.set_contact(name, address)
def lookup_cash_account_dialog(self):
blurb = "<br><br>" + _('Enter a string of the form <b>name#<i>number</i></b>')
cashacctqt.lookup_cash_account_dialog(self, self.wallet, blurb=blurb,
add_to_contacts_button = True, pay_to_button = True)
def show_master_public_keys(self):
dialog = WindowModalDialog(self.top_level_window(), _("Wallet Information"))
dialog.setMinimumSize(500, 100)
mpk_list = self.wallet.get_master_public_keys()
vbox = QVBoxLayout()
wallet_type = self.wallet.storage.get('wallet_type', '')
grid = QGridLayout()
basename = os.path.basename(self.wallet.storage.path)
grid.addWidget(QLabel(_("Wallet name")+ ':'), 0, 0)
grid.addWidget(QLabel(basename), 0, 1)
grid.addWidget(QLabel(_("Wallet type")+ ':'), 1, 0)
grid.addWidget(QLabel(wallet_type), 1, 1)
grid.addWidget(QLabel(_("Script type")+ ':'), 2, 0)
grid.addWidget(QLabel(self.wallet.txin_type), 2, 1)
vbox.addLayout(grid)
if self.wallet.is_deterministic():
mpk_text = ShowQRTextEdit()
mpk_text.setMaximumHeight(150)
mpk_text.addCopyButton()
def show_mpk(index):
mpk_text.setText(mpk_list[index])
# only show the combobox in case multiple accounts are available
if len(mpk_list) > 1:
def label(key):
if isinstance(self.wallet, Multisig_Wallet):
return _("cosigner") + ' ' + str(key+1)
return ''
labels = [label(i) for i in range(len(mpk_list))]
on_click = lambda clayout: show_mpk(clayout.selected_index())
labels_clayout = ChoicesLayout(_("Master Public Keys"), labels, on_click)
vbox.addLayout(labels_clayout.layout())
else:
vbox.addWidget(QLabel(_("Master Public Key")))
show_mpk(0)
vbox.addWidget(mpk_text)
vbox.addStretch(1)
vbox.addLayout(Buttons(CloseButton(dialog)))
dialog.setLayout(vbox)
dialog.exec_()
def remove_wallet(self):
if self.question('\n'.join([
_('Delete wallet file?'),
"%s"%self.wallet.storage.path,
_('If your wallet contains funds, make sure you have saved its seed.')])):
self._delete_wallet()
@protected
def _delete_wallet(self, password):
wallet_path = self.wallet.storage.path
basename = os.path.basename(wallet_path)
r = self.gui_object.daemon.delete_wallet(wallet_path) # implicitly also calls stop_wallet
self.update_recently_visited(wallet_path) # this ensures it's deleted from the menu
if r:
self.show_error(_("Wallet removed: {}").format(basename))
else:
self.show_error(_("Wallet file not found: {}").format(basename))
self.close()
@protected
def show_seed_dialog(self, password):
if not self.wallet.has_seed():
self.show_message(_('This wallet has no seed'))
return
keystore = self.wallet.get_keystore()
try:
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
except BaseException as e:
self.show_error(str(e))
return
from .seed_dialog import SeedDialog
d = SeedDialog(self.top_level_window(), seed, passphrase)
d.exec_()
def show_qrcode(self, data, title = _("QR code"), parent=None):
if not data:
return
d = QRDialog(data, parent or self, title)
d.exec_()
d.setParent(None) # Help Python GC this sooner rather than later
@protected
def show_private_key(self, address, password):
if not address:
return
try:
pk = self.wallet.export_private_key(address, password)
except Exception as e:
traceback.print_exc(file=sys.stdout)
self.show_message(str(e))
return
xtype = bitcoin.deserialize_privkey(pk)[0]
d = WindowModalDialog(self.top_level_window(), _("Private key"))
d.setMinimumSize(600, 150)
vbox = QVBoxLayout()
vbox.addWidget(QLabel('{}: {}'.format(_("Address"), address)))
vbox.addWidget(QLabel(_("Script type") + ': ' + xtype))
vbox.addWidget(QLabel(_("Private key") + ':'))
keys_e = ShowQRTextEdit(text=pk)
keys_e.addCopyButton()
vbox.addWidget(keys_e)
vbox.addWidget(QLabel(_("Redeem Script") + ':'))
rds_e = ShowQRTextEdit(text=address.to_script().hex())
rds_e.addCopyButton()
vbox.addWidget(rds_e)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
d.exec_()
msg_sign = _("Signing with an address actually means signing with the corresponding "
"private key, and verifying with the corresponding public key. The "
"address you have entered does not have a unique public key, so these "
"operations cannot be performed.") + '\n\n' + \
_('The operation is undefined. Not just in ViLight, but in general.')
@protected
def do_sign(self, address, message, signature, password):
address = address.text().strip()
message = message.toPlainText().strip()
try:
addr = Address.from_string(address)
except:
self.show_message(_('Invalid Vitae address.'))
return
if addr.kind != addr.ADDR_P2PKH:
self.show_message(_('Cannot sign messages with this type of address.') + '\n\n' + self.msg_sign)
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
if not self.wallet.is_mine(addr):
self.show_message(_('Address not in wallet.'))
return
task = partial(self.wallet.sign_message, addr, message, password)
def show_signed_message(sig):
signature.setText(base64.b64encode(sig).decode('ascii'))
self.wallet.thread.add(task, on_success=show_signed_message)
def do_verify(self, address, message, signature):
try:
address = Address.from_string(address.text().strip())
except:
self.show_message(_('Invalid Vitae address.'))
return
message = message.toPlainText().strip().encode('utf-8')
try:
# This can throw on invalid base64
sig = base64.b64decode(signature.toPlainText())
verified = bitcoin.verify_message(address, sig, message)
except:
verified = False
if verified:
self.show_message(_("Signature verified"))
else:
self.show_error(_("Wrong signature"))
def sign_verify_message(self, address=None):
d = WindowModalDialog(self.top_level_window(), _('Sign/verify Message'))
d.setMinimumSize(610, 290)
layout = QGridLayout(d)
message_e = QTextEdit()
message_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
address_e = QLineEdit()
address_e.setText(address.to_ui_string() if address else '')
layout.addWidget(QLabel(_('Address')), 2, 0)
layout.addWidget(address_e, 2, 1)
signature_e = QTextEdit()
signature_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Signature')), 3, 0)
layout.addWidget(signature_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Sign"))
b.clicked.connect(lambda: self.do_sign(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Verify"))
b.clicked.connect(lambda: self.do_verify(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
@protected
def do_decrypt(self, message_e, pubkey_e, encrypted_e, password):
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
cyphertext = encrypted_e.toPlainText()
task = partial(self.wallet.decrypt_message, pubkey_e.text(), cyphertext, password)
self.wallet.thread.add(task, on_success=lambda text: message_e.setText(text.decode('utf-8')))
def do_encrypt(self, message_e, pubkey_e, encrypted_e):
message = message_e.toPlainText()
message = message.encode('utf-8')
try:
encrypted = bitcoin.encrypt_message(message, pubkey_e.text())
encrypted_e.setText(encrypted.decode('ascii'))
except BaseException as e:
traceback.print_exc(file=sys.stdout)
self.show_warning(str(e))
def encrypt_message(self, address=None):
d = WindowModalDialog(self.top_level_window(), _('Encrypt/decrypt Message'))
d.setMinimumSize(610, 490)
layout = QGridLayout(d)
message_e = QTextEdit()
message_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
pubkey_e = QLineEdit()
if address:
pubkey = self.wallet.get_public_key(address)
if not isinstance(pubkey, str):
pubkey = pubkey.to_ui_string()
pubkey_e.setText(pubkey)
layout.addWidget(QLabel(_('Public key')), 2, 0)
layout.addWidget(pubkey_e, 2, 1)
encrypted_e = QTextEdit()
encrypted_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Encrypted')), 3, 0)
layout.addWidget(encrypted_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Encrypt"))
b.clicked.connect(lambda: self.do_encrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Decrypt"))
b.clicked.connect(lambda: self.do_decrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
def password_dialog(self, msg=None, parent=None):
from .password_dialog import PasswordDialog
parent = parent or self
return PasswordDialog(parent, msg).run()
def tx_from_text(self, txt):
from electroncash.transaction import tx_from_str
try:
txt_tx = tx_from_str(txt)
tx = Transaction(txt_tx, sign_schnorr=self.wallet.is_schnorr_enabled())
tx.deserialize()
if self.wallet:
my_coins = self.wallet.get_spendable_coins(None, self.config)
my_outpoints = [vin['prevout_hash'] + ':' + str(vin['prevout_n']) for vin in my_coins]
for i, txin in enumerate(tx.inputs()):
outpoint = txin['prevout_hash'] + ':' + str(txin['prevout_n'])
if outpoint in my_outpoints:
my_index = my_outpoints.index(outpoint)
tx._inputs[i]['value'] = my_coins[my_index]['value']
return tx
except:
traceback.print_exc(file=sys.stdout)
self.show_critical(_("ViLight was unable to parse your transaction"))
return
# Due to the asynchronous nature of the qr reader we need to keep the
# dialog instance as member variable to prevent reentrancy/multiple ones
# from being presented at once.
_qr_dialog = None
def read_tx_from_qrcode(self):
if self._qr_dialog:
# Re-entrancy prevention -- there is some lag between when the user
# taps the QR button and the modal dialog appears. We want to
# prevent multiple instances of the dialog from appearing, so we
# must do this.
self.print_error("Warning: QR dialog is already presented, ignoring.")
return
if self.gui_object.warn_if_cant_import_qrreader(self):
return
from electroncash import get_config
from .qrreader import QrReaderCameraDialog
data = ''
self._qr_dialog = None
try:
self._qr_dialog = QrReaderCameraDialog(parent=self.top_level_window())
def _on_qr_reader_finished(success: bool, error: str, result):
if self._qr_dialog:
self._qr_dialog.deleteLater(); self._qr_dialog = None
if not success:
if error:
self.show_error(error)
return
if not result:
return
# if the user scanned a vitae URI
if result.lower().startswith(networks.net.CASHADDR_PREFIX + ':'):
self.pay_to_URI(result)
return
# else if the user scanned an offline signed tx
try:
result = bh2u(bitcoin.base_decode(result, length=None, base=43))
tx = self.tx_from_text(result) # will show an error dialog on error
if not tx:
return
except BaseException as e:
self.show_error(str(e))
return
self.show_transaction(tx)
self._qr_dialog.qr_finished.connect(_on_qr_reader_finished)
self._qr_dialog.start_scan(get_config().get_video_device())
except BaseException as e:
if util.is_verbose:
import traceback
traceback.print_exc()
self._qr_dialog = None
self.show_error(str(e))
def read_tx_from_file(self, *, fileName = None):
fileName = fileName or self.getOpenFileName(_("Select your transaction file"), "*.txn")
if not fileName:
return
try:
with open(fileName, "r", encoding='utf-8') as f:
file_content = f.read()
file_content = file_content.strip()
tx_file_dict = json.loads(str(file_content))
except (ValueError, IOError, OSError, json.decoder.JSONDecodeError) as reason:
self.show_critical(_("ViLight was unable to open your transaction file") + "\n" + str(reason), title=_("Unable to read file or no transaction found"))
return
tx = self.tx_from_text(file_content)
return tx
def do_process_from_text(self):
from electroncash.transaction import SerializationError
text = text_dialog(self.top_level_window(), _('Input raw transaction'), _("Transaction:"), _("Load transaction"))
if not text:
return
try:
tx = self.tx_from_text(text)
if tx:
self.show_transaction(tx)
except SerializationError as e:
self.show_critical(_("ViLight was unable to deserialize the transaction:") + "\n" + str(e))
def do_process_from_file(self, *, fileName = None):
from electroncash.transaction import SerializationError
try:
tx = self.read_tx_from_file(fileName=fileName)
if tx:
self.show_transaction(tx)
except SerializationError as e:
self.show_critical(_("ViLight was unable to deserialize the transaction:") + "\n" + str(e))
def do_process_from_txid(self, *, txid=None, parent=None, tx_desc=None):
parent = parent or self
if self.gui_object.warn_if_no_network(parent):
return
from electroncash import transaction
ok = txid is not None
if not ok:
txid, ok = QInputDialog.getText(parent, _('Lookup transaction'), _('Transaction ID') + ':')
if ok and txid:
ok, r = self.network.get_raw_tx_for_txid(txid, timeout=10.0)
if not ok:
parent.show_message(_("Error retrieving transaction") + ":\n" + r)
return
tx = transaction.Transaction(r, sign_schnorr=self.wallet.is_schnorr_enabled()) # note that presumably the tx is already signed if it comes from blockchain so this sign_schnorr parameter is superfluous, but here to satisfy my OCD -Calin
self.show_transaction(tx, tx_desc=tx_desc)
@protected
def export_privkeys_dialog(self, password):
if self.wallet.is_watching_only():
self.show_message(_("This is a watching-only wallet"))
return
if isinstance(self.wallet, Multisig_Wallet):
self.show_message(_('WARNING: This is a multi-signature wallet.') + '\n' +
_('It can not be "backed up" by simply exporting these private keys.'))
d = WindowModalDialog(self.top_level_window(), _('Private keys'))
d.setMinimumSize(850, 300)
vbox = QVBoxLayout(d)
msg = "%s\n%s\n%s" % (_("WARNING: ALL your private keys are secret."),
_("Exposing a single private key can compromise your entire wallet!"),
_("In particular, DO NOT use 'redeem private key' services proposed by third parties."))
vbox.addWidget(QLabel(msg))
e = QTextEdit()
e.setReadOnly(True)
vbox.addWidget(e)
defaultname = 'electron-cash-private-keys.csv'
select_msg = _('Select file to export your private keys to')
hbox, filename_e, csv_button = filename_field(self, self.config, defaultname, select_msg)
vbox.addLayout(hbox)
b = OkButton(d, _('Export'))
b.setEnabled(False)
vbox.addLayout(Buttons(CancelButton(d), b))
private_keys = {}
addresses = self.wallet.get_addresses()
done = False
cancelled = False
def privkeys_thread():
for addr in addresses:
time.sleep(0.1)
if done or cancelled:
break
try:
privkey = self.wallet.export_private_key(addr, password)
except InvalidPassword:
# See #921 -- possibly a corrupted wallet or other strangeness
privkey = 'INVALID_PASSWORD'
private_keys[addr.to_ui_string()] = privkey
self.computing_privkeys_signal.emit()
if not cancelled:
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.emit()
def show_privkeys():
s = "\n".join('{}\t{}'.format(addr, privkey)
for addr, privkey in private_keys.items())
e.setText(s)
b.setEnabled(True)
self.show_privkeys_signal.disconnect()
nonlocal done
done = True
def on_dialog_closed(*args):
nonlocal done
nonlocal cancelled
if not done:
cancelled = True
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.disconnect()
self.computing_privkeys_signal.connect(lambda: e.setText(_("Please wait... {num}/{total}").format(num=len(private_keys),total=len(addresses))))
self.show_privkeys_signal.connect(show_privkeys)
d.finished.connect(on_dialog_closed)
threading.Thread(target=privkeys_thread).start()
res = d.exec_()
d.setParent(None) # for python GC
if not res:
done = True
return
filename = filename_e.text()
if not filename:
return
try:
self.do_export_privkeys(filename, private_keys, csv_button.isChecked())
except (IOError, os.error) as reason:
txt = "\n".join([
_("ViLight was unable to produce a private key-export."),
str(reason)
])
self.show_critical(txt, title=_("Unable to create csv"))
except Exception as e:
self.show_message(str(e))
return
self.show_message(_("Private keys exported."))
def do_export_privkeys(self, fileName, pklist, is_csv):
with open(fileName, "w+", encoding='utf-8') as f:
if is_csv:
transaction = csv.writer(f)
transaction.writerow(["address", "private_key"])
for addr, pk in pklist.items():
transaction.writerow(["%34s"%addr,pk])
else:
f.write(json.dumps(pklist, indent = 4))
def do_import_labels(self):
labelsFile = self.getOpenFileName(_("Open labels file"), "*.json")
if not labelsFile: return
try:
with open(labelsFile, 'r', encoding='utf-8') as f: # always ensure UTF-8. See issue #1453.
data = f.read()
data = json.loads(data)
if type(data) is not dict or not len(data) or not all(type(v) is str and type(k) is str for k,v in data.items()):
self.show_critical(_("The file you selected does not appear to contain labels."))
return
for key, value in data.items():
self.wallet.set_label(key, value)
self.show_message(_("Your labels were imported from") + " '%s'" % str(labelsFile))
except (IOError, OSError, json.decoder.JSONDecodeError) as reason:
self.show_critical(_("ViLight was unable to import your labels.") + "\n" + str(reason))
self.address_list.update()
self.history_list.update()
self.utxo_list.update()
self.history_updated_signal.emit() # inform things like address_dialog that there's a new history
def do_export_labels(self):
labels = self.wallet.labels
try:
fileName = self.getSaveFileName(_("Select file to save your labels"), 'electron-cash_labels.json', "*.json")
if fileName:
with open(fileName, 'w+', encoding='utf-8') as f: # always ensure UTF-8. See issue #1453.
json.dump(labels, f, indent=4, sort_keys=True)
self.show_message(_("Your labels were exported to") + " '%s'" % str(fileName))
except (IOError, os.error) as reason:
self.show_critical(_("ViLight was unable to export your labels.") + "\n" + str(reason))
def export_history_dialog(self):
d = WindowModalDialog(self.top_level_window(), _('Export History'))
d.setMinimumSize(400, 200)
vbox = QVBoxLayout(d)
defaultname = os.path.expanduser('~/electron-cash-history.csv')
select_msg = _('Select file to export your wallet transactions to')
hbox, filename_e, csv_button = filename_field(self, self.config, defaultname, select_msg)
vbox.addLayout(hbox)
vbox.addStretch(1)
hbox = Buttons(CancelButton(d), OkButton(d, _('Export')))
vbox.addLayout(hbox)
run_hook('export_history_dialog', self, hbox)
self.update()
res = d.exec_()
d.setParent(None) # for python GC
if not res:
return
filename = filename_e.text()
if not filename:
return
try:
self.do_export_history(self.wallet, filename, csv_button.isChecked())
except (IOError, os.error) as reason:
export_error_label = _("ViLight was unable to produce a transaction export.")
self.show_critical(export_error_label + "\n" + str(reason), title=_("Unable to export history"))
return
self.show_message(_("Your wallet history has been successfully exported."))
def plot_history_dialog(self):
if plot_history is None:
return
wallet = self.wallet
history = wallet.get_history()
if len(history) > 0:
plt = plot_history(self.wallet, history)
plt.show()
def do_export_history(self, wallet, fileName, is_csv):
history = wallet.export_history(fx=self.fx)
ccy = (self.fx and self.fx.get_currency()) or ''
has_fiat_columns = history and self.fx and self.fx.show_history() and 'fiat_value' in history[0] and 'fiat_balance' in history[0]
lines = []
for item in history:
if is_csv:
cols = [item['txid'], item.get('label', ''), item['confirmations'], item['value'], item['date']]
if has_fiat_columns:
cols += [item['fiat_value'], item['fiat_balance']]
lines.append(cols)
else:
if has_fiat_columns and ccy:
item['fiat_currency'] = ccy # add the currency to each entry in the json. this wastes space but json is bloated anyway so this won't hurt too much, we hope
elif not has_fiat_columns:
# No need to include these fields as they will always be 'No Data'
item.pop('fiat_value', None)
item.pop('fiat_balance', None)
lines.append(item)
with open(fileName, "w+", encoding="utf-8") as f: # ensure encoding to utf-8. Avoid Windows cp1252. See #1453.
if is_csv:
transaction = csv.writer(f, lineterminator='\n')
cols = ["transaction_hash","label", "confirmations", "value", "timestamp"]
if has_fiat_columns:
cols += [f"fiat_value_{ccy}", f"fiat_balance_{ccy}"] # in CSV mode, we use column names eg fiat_value_USD, etc
transaction.writerow(cols)
for line in lines:
transaction.writerow(line)
else:
f.write(json.dumps(lines, indent=4))
def sweep_key_dialog(self):
addresses = self.wallet.get_unused_addresses()
if not addresses:
try:
addresses = self.wallet.get_receiving_addresses()
except AttributeError:
addresses = self.wallet.get_addresses()
if not addresses:
self.show_warning(_('Wallet has no address to sweep to'))
return
d = WindowModalDialog(self.top_level_window(), title=_('Sweep private keys'))
d.setMinimumSize(600, 300)
vbox = QVBoxLayout(d)
bip38_warn_label = QLabel(_("<b>BIP38 support is disabled because a requisite library is not installed.</b> Please install 'cryptodomex' or omit BIP38 private keys (private keys starting in 6P...). Decrypt keys to WIF format (starting with 5, K, or L) in order to sweep."))
bip38_warn_label.setWordWrap(True)
bip38_warn_label.setHidden(True)
vbox.addWidget(bip38_warn_label)
extra = ""
if bitcoin.is_bip38_available():
extra += " " + _('or BIP38 keys')
vbox.addWidget(QLabel(_("Enter private keys") + extra + " :"))
keys_e = ScanQRTextEdit(allow_multi=True)
keys_e.setTabChangesFocus(True)
vbox.addWidget(keys_e)
h, addr_combo = address_combo(addresses)
vbox.addLayout(h)
vbox.addStretch(1)
sweep_button = OkButton(d, _('Sweep'))
vbox.addLayout(Buttons(CancelButton(d), sweep_button))
def get_address_text():
return addr_combo.currentText()
def get_priv_keys():
return keystore.get_private_keys(keys_e.toPlainText(), allow_bip38=True)
def has_bip38_keys_but_no_bip38():
if bitcoin.is_bip38_available():
return False
keys = [k for k in keys_e.toPlainText().split() if k]
return any(bitcoin.is_bip38_key(k) for k in keys)
def enable_sweep():
bad_bip38 = has_bip38_keys_but_no_bip38()
sweepok = bool(get_address_text() and not bad_bip38 and get_priv_keys())
sweep_button.setEnabled(sweepok)
bip38_warn_label.setHidden(not bad_bip38)
keys_e.textChanged.connect(enable_sweep)
enable_sweep()
res = d.exec_()
d.setParent(None)
if not res:
return
try:
self.do_clear()
keys = get_priv_keys()
bip38s = {}
for i, k in enumerate(keys):
if bitcoin.is_bip38_key(k):
bip38s[k] = i
if bip38s:
# For all the BIP38s detected, prompt for password
from .bip38_importer import Bip38Importer
d2 = Bip38Importer(bip38s.keys(), parent=self.top_level_window())
d2.exec_()
d2.setParent(None)
if d2.decoded_keys:
for k,tup in d2.decoded_keys.items():
wif, adr = tup
# rewrite the keys they specified with the decrypted WIF in the keys list for sweep_preparations to work below...
i = bip38s[k]
keys[i] = wif
else:
self.show_message(_("User cancelled"))
return
coins, keypairs = sweep_preparations(keys, self.network)
self.tx_external_keypairs = keypairs
self.payto_e.setText(get_address_text())
self.spend_coins(coins)
self.spend_max()
except BaseException as e:
self.show_message(str(e))
return
self.payto_e.setFrozen(True)
self.amount_e.setFrozen(True)
self.warn_if_watching_only()
def _do_import(self, title, msg, func):
text = text_dialog(self.top_level_window(), title, msg + ' :', _('Import'),
allow_multi=True)
if not text:
return
bad, bad_info = [], []
good = []
for key in str(text).split():
try:
addr = func(key)
good.append(addr)
except BaseException as e:
bad.append(key)
bad_info.append("{}: {}".format(key, str(e)))
continue
if good:
self.show_message(_("The following addresses were added") + ':\n' + '\n'.join(good))
if bad:
self.show_warning(_("The following could not be imported") + ':\n' + '\n'.join(bad), detail_text='\n\n'.join(bad_info))
self.address_list.update()
self.history_list.update()
self.history_updated_signal.emit() # inform things like address_dialog that there's a new history
def import_addresses(self):
if not self.wallet.can_import_address():
return
title, msg = _('Import addresses'), _("Enter addresses")
def import_addr(addr):
if self.wallet.import_address(Address.from_string(addr)):
return addr
return ''
self._do_import(title, msg, import_addr)
@protected
def do_import_privkey(self, password):
if not self.wallet.can_import_privkey():
return
title, msg = _('Import private keys'), _("Enter private keys")
if bitcoin.is_bip38_available():
msg += " " + _('or BIP38 keys')
def func(key):
if bitcoin.is_bip38_available() and bitcoin.is_bip38_key(key):
from .bip38_importer import Bip38Importer
d = Bip38Importer([key], parent=self.top_level_window(),
message = _('A BIP38 key was specified, please enter a password to decrypt it'),
show_count = False)
d.exec_()
d.setParent(None) # python GC quicker if this happens
if d.decoded_keys:
wif, adr = d.decoded_keys[key]
return self.wallet.import_private_key(wif, password)
else:
raise util.UserCancelled()
else:
return self.wallet.import_private_key(key, password)
self._do_import(title, msg, func)
def update_fiat(self):
b = self.fx and self.fx.is_enabled()
self.fiat_send_e.setVisible(b)
self.fiat_receive_e.setVisible(b)
self.history_list.refresh_headers()
self.history_list.update()
self.history_updated_signal.emit() # inform things like address_dialog that there's a new history
self.address_list.refresh_headers()
self.address_list.update()
self.update_status()
def toggle_cashaddr_status_bar(self):
self.gui_object.toggle_cashaddr()
self.statusBar().showMessage(self.cashaddr_status_tip(), 2000)
def toggle_cashaddr_settings(self, state):
self.gui_object.toggle_cashaddr(state == Qt.Checked)
def toggle_cashaddr(self, on):
self.print_error('*** WARNING ElectrumWindow.toggle_cashaddr: This function is deprecated. Please do not call it!')
self.gui_object.toggle_cashaddr(on)
def cashshuffle_plugin_if_loaded(self):
return self.gui_object.plugins.get_internal_plugin("shuffle", force_load = False)
def is_cashshuffle_enabled(self):
plugin = self.cashshuffle_plugin_if_loaded()
return bool(plugin and plugin.is_enabled() and plugin.window_has_cashshuffle(self))
def cashshuffle_icon(self):
if self.is_cashshuffle_enabled():
if self._cash_shuffle_flag == 1:
return QIcon(":icons/cashshuffle_on_error.svg")
else:
return QIcon(":icons/cashshuffle_on.svg")
else:
self._cash_shuffle_flag = 0
return QIcon(":icons/cashshuffle_off.svg")
def update_cashshuffle_icon(self):
self.cashshuffle_status_button.setIcon(self.cashshuffle_icon())
loaded = bool(self.cashshuffle_plugin_if_loaded())
en = self.is_cashshuffle_enabled()
if self._cash_shuffle_flag == 0:
self.cashshuffle_status_button.setStatusTip(_("CashShuffle") + " - " + _("ENABLED") if en else _("CashShuffle") + " - " + _("Disabled"))
rcfcm = _("Right-click for context menu")
self.cashshuffle_status_button.setToolTip(
(_("Toggle CashShuffle") + "\n" + rcfcm)
#(_("Left-click to view pools") + "\n" + rcfcm) if en
#else (_("Toggle CashShuffle") + "\n" + rcfcm)
)
self.cashshuffle_toggle_action.setText(_("Enable CashShuffle") if not en else _("Disable CashShuffle"))
self.cashshuffle_settings_action.setText(_("CashShuffle Settings..."))
self.cashshuffle_viewpools_action.setEnabled(True)
elif self._cash_shuffle_flag == 1: # Network server error
self.cashshuffle_status_button.setStatusTip(_('CashShuffle Error: Could not connect to server'))
self.cashshuffle_status_button.setToolTip(_('Right-click to select a different CashShuffle server'))
self.cashshuffle_settings_action.setText(_("Resolve Server Problem..."))
self.cashshuffle_viewpools_action.setEnabled(False)
self.cashshuffle_settings_action.setVisible(en or loaded)
self.cashshuffle_viewpools_action.setVisible(en)
if en:
# ensure 'Disable CashShuffle' appears at the end of the context menu
self.cashshuffle_status_button.removeAction(self.cashshuffle_separator_action)
self.cashshuffle_status_button.removeAction(self.cashshuffle_toggle_action)
self.cashshuffle_status_button.addAction(self.cashshuffle_separator_action)
self.cashshuffle_status_button.addAction(self.cashshuffle_toggle_action)
else:
# ensure 'Enable CashShuffle' appears at the beginning of the context menu
self.cashshuffle_status_button.removeAction(self.cashshuffle_separator_action)
self.cashshuffle_status_button.removeAction(self.cashshuffle_toggle_action)
actions = self.cashshuffle_status_button.actions()
self.cashshuffle_status_button.insertAction(actions[0] if actions else None, self.cashshuffle_separator_action)
self.cashshuffle_status_button.insertAction(self.cashshuffle_separator_action, self.cashshuffle_toggle_action)
def show_cashshuffle_settings(self):
p = self.cashshuffle_plugin_if_loaded()
if p:
msg = None
if self._cash_shuffle_flag == 1:
# had error
msg = _("There was a problem connecting to this server.\nPlease choose a different CashShuffle server.")
p.settings_dialog(self, msg)
#else: # commented-out. Enable this if you want to use the non-modal network settings as the destination for this action
# # no error -- use the free-floating non-modal network dialog
# if not p.show_cashshuffle_tab_in_network_dialog(self):
# # Huh. Network dialog creation/show failed. Fall back to modal window
# p.settings_dialog(self, msg)
def show_cashshuffle_pools(self):
p = self.cashshuffle_plugin_if_loaded()
if p:
p.view_pools(self)
def cashshuffle_icon_leftclick(self):
self.toggle_cashshuffle()
return
# delete the above 2 lines if we want the left-click to revert to
# Josh's suggestion (leaving the code in here for now)
if self.is_cashshuffle_enabled():
if self._cash_shuffle_flag != 0:
# Jump to settings.
self.cashshuffle_settings_action.trigger()
return
if self.cashshuffle_viewpools_action.isVisible():
# New! We just let this icon be the "View pools..." action when
# the plugin is already loaded and enabled. This hopefully will
# discourage disabling. Also it's been found that "View pools..."
# is the most popular action anyway -- might as well make it
# convenient to access with 1-click. (@zquestz suggested this)
self.cashshuffle_viewpools_action.trigger()
return
#else... in all other cases just toggle cashshuffle
self.toggle_cashshuffle()
def toggle_cashshuffle(self):
if not self.is_wallet_cashshuffle_compatible():
self.show_warning(_("This wallet type cannot be used with CashShuffle."), parent=self)
return
plugins = self.gui_object.plugins
p0 = self.cashshuffle_plugin_if_loaded()
p = p0 or plugins.enable_internal_plugin("shuffle")
if not p:
raise RuntimeError("Could not find CashShuffle plugin")
was_enabled = p.window_has_cashshuffle(self)
if was_enabled and not p.warn_if_shuffle_disable_not_ok(self):
# user at nag screen said "no", so abort
self.update_cashshuffle_icon()
return
enable_flag = not was_enabled
self._cash_shuffle_flag = 0
KillPopupLabel("CashShuffleError")
if not p0:
# plugin was not loaded -- so flag window as wanting cashshuffle and do init
p.window_set_wants_cashshuffle(self, enable_flag)
p.init_qt(self.gui_object)
else:
# plugin was already started -- just add the window to the plugin
p.window_set_cashshuffle(self, enable_flag)
self.update_cashshuffle_icon()
self.statusBar().showMessage(self.cashshuffle_status_button.statusTip(), 3000)
if enable_flag and self.config.get("show_utxo_tab") is None:
self.toggle_tab(self.utxo_tab) # toggle utxo tab to 'on' if user never specified it should be off.
def settings_dialog(self):
self.need_restart = False
d = WindowModalDialog(self.top_level_window(), _('Preferences'))
vbox = QVBoxLayout()
tabs = QTabWidget()
gui_widgets = []
fee_widgets = []
global_tx_widgets, per_wallet_tx_widgets = [], []
id_widgets = []
# language
lang_help = _('Select which language is used in the GUI (after restart).')
lang_label = HelpLabel(_('Language') + ':', lang_help)
lang_combo = QComboBox()
from electroncash.i18n import languages, get_system_language_match, match_language
language_names = []
language_keys = []
for (lang_code, lang_def) in languages.items():
language_keys.append(lang_code)
lang_name = []
lang_name.append(lang_def.name)
if lang_code == '':
# System entry in languages list (==''), gets system setting
sys_lang = get_system_language_match()
if sys_lang:
lang_name.append(f' [{languages[sys_lang].name}]')
language_names.append(''.join(lang_name))
lang_combo.addItems(language_names)
conf_lang = self.config.get("language", '')
if conf_lang:
# The below code allows us to rename languages in saved config and
# have them still line up with languages in our languages dict.
# For example we used to save English as en_UK but now it's en_US
# and it will still match
conf_lang = match_language(conf_lang)
try: index = language_keys.index(conf_lang)
except ValueError: index = 0
lang_combo.setCurrentIndex(index)
if not self.config.is_modifiable('language'):
for w in [lang_combo, lang_label]:
w.setEnabled(False)
def on_lang(x):
lang_request = language_keys[lang_combo.currentIndex()]
if lang_request != self.config.get('language'):
self.config.set_key("language", lang_request, True)
self.need_restart = True
lang_combo.currentIndexChanged.connect(on_lang)
gui_widgets.append((lang_label, lang_combo))
nz_help = _('Number of zeros displayed after the decimal point. For example, if this is set to 2, "1." will be displayed as "1.00"')
nz_label = HelpLabel(_('Zeros after decimal point') + ':', nz_help)
nz = QSpinBox()
nz.setMinimum(0)
nz.setMaximum(self.decimal_point)
nz.setValue(self.num_zeros)
if not self.config.is_modifiable('num_zeros'):
for w in [nz, nz_label]: w.setEnabled(False)
def on_nz():
value = nz.value()
if self.num_zeros != value:
self.num_zeros = value
self.config.set_key('num_zeros', value, True)
self.update_tabs()
self.update_status()
nz.valueChanged.connect(on_nz)
gui_widgets.append((nz_label, nz))
def on_customfee(x):
amt = customfee_e.get_amount()
m = int(amt * 1000.0) if amt is not None else None
self.config.set_key('customfee', m)
self.fee_slider.update()
self.fee_slider_mogrifier()
customfee_e = BTCSatsByteEdit()
customfee_e.setAmount(self.config.custom_fee_rate() / 1000.0 if self.config.has_custom_fee_rate() else None)
customfee_e.textChanged.connect(on_customfee)
customfee_label = HelpLabel(_('Custom Fee Rate'), _('Custom Fee Rate in Satoshis per byte'))
fee_widgets.append((customfee_label, customfee_e))
feebox_cb = QCheckBox(_('Edit fees manually'))
feebox_cb.setChecked(self.config.get('show_fee', False))
feebox_cb.setToolTip(_("Show fee edit box in send tab."))
def on_feebox(x):
self.config.set_key('show_fee', x == Qt.Checked)
self.fee_e.setVisible(bool(x))
feebox_cb.stateChanged.connect(on_feebox)
fee_widgets.append((feebox_cb, None))
msg = _('OpenAlias record, used to receive coins and to sign payment requests.') + '\n\n'\
+ _('The following alias providers are available:') + '\n'\
+ '\n'.join(['https://cryptoname.co/', 'http://xmr.link/']) + '\n\n'\
+ _('For more information, see http://openalias.org')
alias_label = HelpLabel(_('OpenAlias') + ':', msg)
alias = self.config.get('alias','')
alias_e = QLineEdit(alias)
def set_alias_color():
if not self.config.get('alias'):
alias_e.setStyleSheet("")
return
if self.alias_info:
alias_addr, alias_name, validated = self.alias_info
alias_e.setStyleSheet((ColorScheme.GREEN if validated else ColorScheme.RED).as_stylesheet(True))
else:
alias_e.setStyleSheet(ColorScheme.RED.as_stylesheet(True))
def on_alias_edit():
alias_e.setStyleSheet("")
alias = str(alias_e.text())
self.config.set_key('alias', alias, True)
if alias:
self.fetch_alias()
set_alias_color()
self.alias_received_signal.connect(set_alias_color)
alias_e.editingFinished.connect(on_alias_edit)
id_widgets.append((alias_label, alias_e))
# SSL certificate
msg = ' '.join([
_('SSL certificate used to sign payment requests.'),
_('Use setconfig to set ssl_chain and ssl_privkey.'),
])
if self.config.get('ssl_privkey') or self.config.get('ssl_chain'):
try:
SSL_identity = paymentrequest.check_ssl_config(self.config)
SSL_error = None
except BaseException as e:
SSL_identity = "error"
SSL_error = str(e)
else:
SSL_identity = ""
SSL_error = None
SSL_id_label = HelpLabel(_('SSL certificate') + ':', msg)
SSL_id_e = QLineEdit(SSL_identity)
SSL_id_e.setStyleSheet((ColorScheme.RED if SSL_error else ColorScheme.GREEN).as_stylesheet(True) if SSL_identity else '')
if SSL_error:
SSL_id_e.setToolTip(SSL_error)
SSL_id_e.setReadOnly(True)
id_widgets.append((SSL_id_label, SSL_id_e))
units = util.base_unit_labels # ( 'VITAE', 'mVITAE', 'bits' )
msg = _('Base unit of your wallet.')\
+ '\n1 VITAE = 1,000 mVITAE = 1,000,000 bits.\n' \
+ _(' These settings affects the fields in the Send tab')+' '
unit_label = HelpLabel(_('Base unit') + ':', msg)
unit_combo = QComboBox()
unit_combo.addItems(units)
unit_combo.setCurrentIndex(units.index(self.base_unit()))
def on_unit(x, nz):
unit_result = units[unit_combo.currentIndex()]
if self.base_unit() == unit_result:
return
edits = self.amount_e, self.fee_e, self.receive_amount_e
amounts = [edit.get_amount() for edit in edits]
dp = util.base_units.get(unit_result)
if dp is not None:
self.decimal_point = dp
else:
raise Exception('Unknown base unit')
self.config.set_key('decimal_point', self.decimal_point, True)
nz.setMaximum(self.decimal_point)
for edit, amount in zip(edits, amounts):
edit.setAmount(amount)
self.update_tabs()
self.update_status()
unit_combo.currentIndexChanged.connect(lambda x: on_unit(x, nz))
gui_widgets.append((unit_label, unit_combo))
block_explorers = web.BE_sorted_list()
msg = _('Choose which online block explorer to use for functions that open a web browser')
block_ex_label = HelpLabel(_('Online Block Explorer') + ':', msg)
block_ex_combo = QComboBox()
block_ex_combo.addItems(block_explorers)
block_ex_combo.setCurrentIndex(block_ex_combo.findText(web.BE_from_config(self.config)))
def on_be(x):
be_result = block_explorers[block_ex_combo.currentIndex()]
self.config.set_key('block_explorer', be_result, True)
block_ex_combo.currentIndexChanged.connect(on_be)
gui_widgets.append((block_ex_label, block_ex_combo))
qr_combo = QComboBox()
qr_combo.addItem(_("Default"),"default")
system_cameras = []
try:
from PyQt5.QtMultimedia import QCameraInfo
system_cameras = QCameraInfo.availableCameras()
qr_label = HelpLabel(_('Video Device') + ':', _("For scanning Qr codes."))
except ImportError as e:
# Older Qt or missing libs -- disable GUI control and inform user why
qr_combo.setEnabled(False)
qr_combo.setToolTip(_("Unable to probe for cameras on this system. QtMultimedia is likely missing."))
qr_label = HelpLabel(_('Video Device') + ' ' + _('(disabled)') + ':', qr_combo.toolTip() + "\n\n" + str(e))
qr_label.setToolTip(qr_combo.toolTip())
for cam in system_cameras:
qr_combo.addItem(cam.description(), cam.deviceName())
video_device = self.config.get("video_device")
video_device_index = 0
if video_device:
video_device_index = qr_combo.findData(video_device)
qr_combo.setCurrentIndex(video_device_index)
on_video_device = lambda x: self.config.set_key("video_device", qr_combo.itemData(x), True)
qr_combo.currentIndexChanged.connect(on_video_device)
gui_widgets.append((qr_label, qr_combo))
colortheme_combo = QComboBox()
colortheme_combo.addItem(_('Light'), 'default')
colortheme_combo.addItem(_('Dark'), 'dark')
theme_name = self.config.get('qt_gui_color_theme', 'default')
dark_theme_available = self.gui_object.is_dark_theme_available()
if theme_name == 'dark' and not dark_theme_available:
theme_name = 'default'
index = colortheme_combo.findData(theme_name)
if index < 0: index = 0
colortheme_combo.setCurrentIndex(index)
msg = ( _("Dark theme support requires the package 'QDarkStyle' (typically installed via the 'pip3' command on Unix & macOS).")
if not dark_theme_available
else '' )
lbltxt = _('Color theme') + ':'
colortheme_label = HelpLabel(lbltxt, msg) if msg else QLabel(lbltxt)
def on_colortheme(x):
item_data = colortheme_combo.itemData(x)
if not dark_theme_available and item_data == 'dark':
self.show_error(_("Dark theme is not available. Please install QDarkStyle to access this feature."))
colortheme_combo.setCurrentIndex(0)
return
self.config.set_key('qt_gui_color_theme', item_data, True)
if theme_name != item_data:
self.need_restart = True
colortheme_combo.currentIndexChanged.connect(on_colortheme)
gui_widgets.append((colortheme_label, colortheme_combo))
if sys.platform not in ('darwin',):
# Enable/Disable HighDPI -- this option makes no sense for macOS
# and thus does not appear on that platform
hidpi_chk = QCheckBox(_('Automatic high DPI scaling'))
if sys.platform in ('linux',):
hidpi_chk.setToolTip(_("Enable/disable this option if you experience graphical glitches (such as overly large status bar icons)"))
else: # windows
hidpi_chk.setToolTip(_("Enable/disable this option if you experience graphical glitches (such as dialog box text being cut off"))
hidpi_chk.setChecked(bool(self.config.get('qt_enable_highdpi', True)))
if self.config.get('qt_disable_highdpi'):
hidpi_chk.setToolTip(_('Automatic high DPI scaling was disabled from the command-line'))
hidpi_chk.setChecked(False)
hidpi_chk.setDisabled(True)
def on_hi_dpi_toggle():
self.config.set_key('qt_enable_highdpi', hidpi_chk.isChecked())
self.need_restart = True
hidpi_chk.stateChanged.connect(on_hi_dpi_toggle)
gui_widgets.append((hidpi_chk, None))
gui_widgets.append((None, None)) # spacer
updatecheck_cb = QCheckBox(_("Automatically check for updates"))
updatecheck_cb.setChecked(self.gui_object.has_auto_update_check())
updatecheck_cb.setToolTip(_("Enable this option if you wish to be notified as soon as a new version of ViLight becomes available"))
def on_set_updatecheck(v):
self.gui_object.set_auto_update_check(v == Qt.Checked)
updatecheck_cb.stateChanged.connect(on_set_updatecheck)
gui_widgets.append((updatecheck_cb, None))
notify_tx_cb = QCheckBox(_('Notify when receiving funds'))
notify_tx_cb.setToolTip(_('If enabled, a system notification will be presented when you receive funds to this wallet.'))
notify_tx_cb.setChecked(bool(self.wallet.storage.get('gui_notify_tx', True)))
def on_notify_tx(b):
self.wallet.storage.put('gui_notify_tx', bool(b))
notify_tx_cb.stateChanged.connect(on_notify_tx)
per_wallet_tx_widgets.append((notify_tx_cb, None))
usechange_cb = QCheckBox(_('Use change addresses'))
if self.force_use_single_change_addr:
usechange_cb.setChecked(True)
usechange_cb.setEnabled(False)
if isinstance(self.force_use_single_change_addr, str):
usechange_cb.setToolTip(self.force_use_single_change_addr)
else:
usechange_cb.setChecked(self.wallet.use_change)
usechange_cb.setToolTip(_('Using change addresses makes it more difficult for other people to track your transactions.'))
def on_usechange(x):
usechange_result = x == Qt.Checked
if self.wallet.use_change != usechange_result:
self.wallet.use_change = usechange_result
self.wallet.storage.put('use_change', self.wallet.use_change)
multiple_cb.setEnabled(self.wallet.use_change)
usechange_cb.stateChanged.connect(on_usechange)
per_wallet_tx_widgets.append((usechange_cb, None))
multiple_change = self.wallet.multiple_change
multiple_cb = QCheckBox(_('Use multiple change addresses'))
if self.force_use_single_change_addr:
multiple_cb.setEnabled(False)
multiple_cb.setChecked(False)
if isinstance(self.force_use_single_change_addr, str):
multiple_cb.setToolTip(self.force_use_single_change_addr)
else:
multiple_cb.setEnabled(self.wallet.use_change)
multiple_cb.setToolTip('\n'.join([
_('In some cases, use up to 3 change addresses in order to break '
'up large coin amounts and obfuscate the recipient address.'),
_('This may result in higher transactions fees.')
]))
multiple_cb.setChecked(multiple_change)
def on_multiple(x):
multiple = x == Qt.Checked
if self.wallet.multiple_change != multiple:
self.wallet.multiple_change = multiple
self.wallet.storage.put('multiple_change', multiple)
multiple_cb.stateChanged.connect(on_multiple)
per_wallet_tx_widgets.append((multiple_cb, None))
def fmt_docs(key, klass):
lines = [ln.lstrip(" ") for ln in klass.__doc__.split("\n")]
return '\n'.join([key, "", " ".join(lines)])
def on_unconf(x):
self.config.set_key('confirmed_only', bool(x))
conf_only = self.config.get('confirmed_only', False)
unconf_cb = QCheckBox(_('Spend only confirmed coins'))
unconf_cb.setToolTip(_('Spend only confirmed inputs.'))
unconf_cb.setChecked(conf_only)
unconf_cb.stateChanged.connect(on_unconf)
global_tx_widgets.append((unconf_cb, None))
# Fiat Currency
hist_checkbox = QCheckBox()
fiat_address_checkbox = QCheckBox()
ccy_combo = QComboBox()
ex_combo = QComboBox()
enable_opreturn = bool(self.config.get('enable_opreturn'))
opret_cb = QCheckBox(_('Enable OP_RETURN output'))
opret_cb.setToolTip(_('Enable posting messages with OP_RETURN.'))
opret_cb.setChecked(enable_opreturn)
opret_cb.stateChanged.connect(self.on_toggled_opreturn)
global_tx_widgets.append((opret_cb,None))
# Schnorr
use_schnorr_cb = QCheckBox(_("Enable Schnorr signatures"))
use_schnorr_cb.setChecked(self.wallet.is_schnorr_enabled())
use_schnorr_cb.stateChanged.connect(self.wallet.set_schnorr_enabled)
no_schnorr_reason = []
if self.wallet.is_schnorr_possible(no_schnorr_reason):
use_schnorr_cb.setEnabled(True)
use_schnorr_cb.setToolTip(_("Sign all transactions using Schnorr signatures."))
else:
# not possible (wallet type not supported); show reason in tooltip
use_schnorr_cb.setEnabled(False)
use_schnorr_cb.setToolTip(no_schnorr_reason[0])
per_wallet_tx_widgets.append((use_schnorr_cb, None))
def update_currencies():
if not self.fx: return
currencies = sorted(self.fx.get_currencies(self.fx.get_history_config()))
ccy_combo.clear()
ccy_combo.addItems([_('None')] + currencies)
if self.fx.is_enabled():
ccy_combo.setCurrentIndex(ccy_combo.findText(self.fx.get_currency()))
def update_history_cb():
if not self.fx: return
hist_checkbox.setChecked(self.fx.get_history_config())
hist_checkbox.setEnabled(self.fx.is_enabled())
def update_fiat_address_cb():
if not self.fx: return
fiat_address_checkbox.setChecked(self.fx.get_fiat_address_config())
def update_exchanges():
if not self.fx: return
b = self.fx.is_enabled()
ex_combo.setEnabled(b)
if b:
c = self.fx.get_currency()
h = self.fx.get_history_config()
else:
c, h = self.fx.default_currency, False
exchanges = self.fx.get_exchanges_by_ccy(c, h)
conf_exchange = self.fx.config_exchange()
ex_combo.clear()
ex_combo.addItems(sorted(exchanges))
idx = ex_combo.findText(conf_exchange) # try and restore previous exchange if in new list
if idx < 0:
# hmm, previous exchange wasn't in new h= setting. Try default exchange.
idx = ex_combo.findText(self.fx.default_exchange)
idx = 0 if idx < 0 else idx # if still no success (idx < 0) -> default to the first exchange in combo
if exchanges: # don't set index if no exchanges, as any index is illegal. this shouldn't happen.
ex_combo.setCurrentIndex(idx) # note this will emit a currentIndexChanged signal if it's changed
def on_currency(hh):
if not self.fx: return
b = bool(ccy_combo.currentIndex())
ccy = str(ccy_combo.currentText()) if b else None
self.fx.set_enabled(b)
if b and ccy != self.fx.ccy:
self.fx.set_currency(ccy)
update_history_cb()
update_exchanges()
self.update_fiat()
def on_exchange(idx):
exchange = str(ex_combo.currentText())
if self.fx and self.fx.is_enabled() and exchange and exchange != self.fx.exchange.name():
self.fx.set_exchange(exchange)
def on_history(checked):
if not self.fx: return
changed = bool(self.fx.get_history_config()) != bool(checked)
self.fx.set_history_config(checked)
update_exchanges()
self.history_list.refresh_headers()
if self.fx.is_enabled() and checked:
# reset timeout to get historical rates
self.fx.timeout = 0
if changed:
self.history_list.update() # this won't happen too often as it's rate-limited
def on_fiat_address(checked):
if not self.fx: return
self.fx.set_fiat_address_config(checked)
self.address_list.refresh_headers()
self.address_list.update()
update_currencies()
update_history_cb()
update_fiat_address_cb()
update_exchanges()
ccy_combo.currentIndexChanged.connect(on_currency)
hist_checkbox.stateChanged.connect(on_history)
fiat_address_checkbox.stateChanged.connect(on_fiat_address)
ex_combo.currentIndexChanged.connect(on_exchange)
fiat_widgets = []
fiat_widgets.append((QLabel(_('Fiat currency')), ccy_combo))
fiat_widgets.append((QLabel(_('Show history rates')), hist_checkbox))
fiat_widgets.append((QLabel(_('Show Fiat balance for addresses')), fiat_address_checkbox))
fiat_widgets.append((QLabel(_('Source')), ex_combo))
tabs_info = [
(gui_widgets, _('General')),
(fee_widgets, _('Fees')),
(OrderedDict([
( _("App-Global Options") , global_tx_widgets ),
( _("Per-Wallet Options") , per_wallet_tx_widgets),
]), _('Transactions')),
(fiat_widgets, _('Fiat')),
(id_widgets, _('Identity')),
]
def add_tabs_info_to_tabs(tabs, tabs_info):
def add_widget_pair(a,b,grid):
i = grid.rowCount()
if b:
if a:
grid.addWidget(a, i, 0)
grid.addWidget(b, i, 1)
else:
if a:
grid.addWidget(a, i, 0, 1, 2)
else:
grid.addItem(QSpacerItem(15, 15), i, 0, 1, 2)
for thing, name in tabs_info:
tab = QWidget()
if isinstance(thing, dict):
# This Prefs tab is laid out as groupboxes one atop another...
d = thing
vbox = QVBoxLayout(tab)
for groupName, widgets in d.items():
gbox = QGroupBox(groupName)
grid = QGridLayout(gbox)
grid.setColumnStretch(0,1)
for a,b in widgets:
add_widget_pair(a,b,grid)
vbox.addWidget(gbox, len(widgets))
else:
# Standard layout.. 1 tab has just a grid of widgets
widgets = thing
grid = QGridLayout(tab)
grid.setColumnStretch(0,1)
for a,b in widgets:
add_widget_pair(a,b,grid)
tabs.addTab(tab, name)
# / add_tabs_info_to_tabs
add_tabs_info_to_tabs(tabs, tabs_info)
vbox.addWidget(tabs)
vbox.addStretch(1)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
# run the dialog
d.exec_()
d.setParent(None) # for Python GC
if self.fx:
self.fx.timeout = 0
self.alias_received_signal.disconnect(set_alias_color)
run_hook('close_settings_dialog')
if self.need_restart:
self.show_message(_('Please restart ViLight to activate the new GUI settings'), title=_('Success'))
def closeEvent(self, event):
# It seems in some rare cases this closeEvent() is called twice
if not self.cleaned_up:
self.cleaned_up = True
self.clean_up()
event.accept()
def is_alive(self): return bool(not self.cleaned_up)
def clean_up_connections(self):
def disconnect_signals():
for attr_name in dir(self):
if attr_name.endswith("_signal") and attr_name != "cashaddr_toggled_signal":
sig = getattr(self, attr_name)
if isinstance(sig, pyqtBoundSignal):
try: sig.disconnect()
except TypeError: pass # no connections
elif attr_name.endswith("__RateLimiter"): # <--- NB: this needs to match the attribute name in util.py rate_limited decorator
rl_obj = getattr(self, attr_name)
if isinstance(rl_obj, RateLimiter):
rl_obj.kill_timer()
try: self.disconnect()
except TypeError: pass
def disconnect_network_callbacks():
if self.network:
self.network.unregister_callback(self.on_network)
self.network.unregister_callback(self.on_quotes)
self.network.unregister_callback(self.on_history)
# /
disconnect_network_callbacks()
disconnect_signals()
def clean_up_children(self):
# Status bar holds references to self, so clear it to help GC this window
self.setStatusBar(None)
# Note that due to quirks on macOS and the shared menu bar, we do *NOT*
# clear the menuBar. Instead, doing this causes the object to get
# deleted and/or its actions (and more importantly menu action hotkeys)
# to go away immediately.
self.setMenuBar(None)
# Disable shortcuts immediately to prevent them from accidentally firing
# on us after we are closed. They will get deleted when this QObject
# is finally deleted by Qt.
for shortcut in self._shortcuts:
shortcut.setEnabled(False)
del shortcut
self._shortcuts.clear()
# Reparent children to 'None' so python GC can clean them up sooner rather than later.
# This also hopefully helps accelerate this window's GC.
children = [c for c in self.children()
if (isinstance(c, (QWidget, QAction, TaskThread))
and not isinstance(c, (QStatusBar, QMenuBar, QFocusFrame, QShortcut)))]
for c in children:
try: c.disconnect()
except TypeError: pass
c.setParent(None)
def clean_up(self):
self.wallet.thread.stop()
self.wallet.thread.wait() # Join the thread to make sure it's really dead.
for w in [self.address_list, self.history_list, self.utxo_list, self.contact_list]:
if w: w.clean_up() # tell relevant widget to clean itself up, unregister callbacks, etc
# We catch these errors with the understanding that there is no recovery at
# this point, given user has likely performed an action we cannot recover
# cleanly from. So we attempt to exit as cleanly as possible.
try:
self.config.set_key("is_maximized", self.isMaximized())
self.config.set_key("console-history", self.console.history[-50:], True)
except (OSError, PermissionError) as e:
self.print_error("unable to write to config (directory removed?)", e)
if not self.isMaximized():
try:
g = self.geometry()
self.wallet.storage.put("winpos-qt", [g.left(),g.top(),g.width(),g.height()])
except (OSError, PermissionError) as e:
self.print_error("unable to write to wallet storage (directory removed?)", e)
# Should be no side-effects in this function relating to file access past this point.
if self.qr_window:
self.qr_window.close()
self.qr_window = None # force GC sooner rather than later.
for d in list(self._tx_dialogs):
# clean up all extant tx dialogs we opened as they hold references
# to us that will be invalidated
d.prompt_if_unsaved = False # make sure to unconditionally close
d.close()
self._close_wallet()
try: self.gui_object.timer.timeout.disconnect(self.timer_actions)
except TypeError: pass # defensive programming: this can happen if we got an exception before the timer action was connected
self.gui_object.close_window(self) # implicitly runs the hook: on_close_window
# Now, actually STOP the wallet's synchronizer and verifiers and remove
# it from the daemon. Note that its addresses will still stay
# 'subscribed' to the ElectrumX server until we connect to a new server,
# (due to ElectrumX protocol limitations).. but this is harmless.
self.gui_object.daemon.stop_wallet(self.wallet.storage.path)
# At this point all plugins should have removed any references to this window.
# Now, just to be paranoid, do some active destruction of signal/slot connections as well as
# Removing child widgets forcefully to speed up Python's own GC of this window.
self.clean_up_connections()
self.clean_up_children()
# And finally, print when we are destroyed by C++ for debug purposes
# We must call this here as above calls disconnected all signals
# involving this widget.
destroyed_print_error(self)
def internal_plugins_dialog(self):
if self.internalpluginsdialog:
# NB: reentrance here is possible due to the way the window menus work on MacOS.. so guard against it
self.internalpluginsdialog.raise_()
return
d = WindowModalDialog(parent=self.top_level_window(), title=_('Optional Features'))
weakD = Weak.ref(d)
gui_object = self.gui_object
plugins = gui_object.plugins
vbox = QVBoxLayout(d)
# plugins
scroll = QScrollArea()
scroll.setEnabled(True)
scroll.setWidgetResizable(True)
scroll.setMinimumSize(400,250)
vbox.addWidget(scroll)
w = QWidget()
scroll.setWidget(w)
w.setMinimumHeight(plugins.get_internal_plugin_count() * 35)
grid = QGridLayout()
grid.setColumnStretch(0,1)
weakGrid = Weak.ref(grid)
w.setLayout(grid)
settings_widgets = Weak.ValueDictionary()
def enable_settings_widget(p, name, i):
widget = settings_widgets.get(name)
grid = weakGrid()
d = weakD()
if d and grid and not widget and p and p.requires_settings():
widget = settings_widgets[name] = p.settings_widget(d)
grid.addWidget(widget, i, 1)
if widget:
widget.setEnabled(bool(p and p.is_enabled()))
if not p:
# Need to delete settings widget because keeping it around causes bugs as it points to a now-dead plugin instance
settings_widgets.pop(name)
widget.hide(); widget.setParent(None); widget.deleteLater(); widget = None
def do_toggle(weakCb, name, i):
cb = weakCb()
if cb:
p = plugins.toggle_internal_plugin(name)
cb.setChecked(bool(p))
enable_settings_widget(p, name, i)
# All plugins get this whenever one is toggled.
run_hook('init_qt', gui_object)
for i, descr in enumerate(plugins.internal_plugin_metadata.values()):
name = descr['__name__']
p = plugins.get_internal_plugin(name)
if descr.get('registers_keystore'):
continue
try:
cb = QCheckBox(descr['fullname'])
weakCb = Weak.ref(cb)
plugin_is_loaded = p is not None
cb_enabled = (not plugin_is_loaded and plugins.is_internal_plugin_available(name, self.wallet)
or plugin_is_loaded and p.can_user_disable())
cb.setEnabled(cb_enabled)
cb.setChecked(plugin_is_loaded and p.is_enabled())
grid.addWidget(cb, i, 0)
enable_settings_widget(p, name, i)
cb.clicked.connect(partial(do_toggle, weakCb, name, i))
msg = descr['description']
if descr.get('requires'):
msg += '\n\n' + _('Requires') + ':\n' + '\n'.join(map(lambda x: x[1], descr.get('requires')))
grid.addWidget(HelpButton(msg), i, 2)
except Exception:
self.print_msg("error: cannot display plugin", name)
traceback.print_exc(file=sys.stdout)
grid.setRowStretch(len(plugins.internal_plugin_metadata.values()), 1)
vbox.addLayout(Buttons(CloseButton(d)))
self.internalpluginsdialog = d
d.exec_()
self.internalpluginsdialog = None # Python GC please!
def external_plugins_dialog(self):
if self.externalpluginsdialog:
# NB: reentrance here is possible due to the way the window menus work on MacOS.. so guard against it
self.externalpluginsdialog.raise_()
return
from . import external_plugins_window
d = external_plugins_window.ExternalPluginsDialog(self, _('Plugin Manager'))
self.externalpluginsdialog = d
d.exec_()
self.externalpluginsdialog = None # allow python to GC
def hardware_wallet_support(self):
if not sys.platform.startswith('linux'):
self.print_error("FIXME! hardware_wallet_support is Linux only!")
return
if self.hardwarewalletdialog:
# NB: reentrance here is possible due to the way the window menus work on MacOS.. so guard against it
self.hardwarewalletdialog.raise_()
return
from .udev_installer import InstallHardwareWalletSupportDialog
d = InstallHardwareWalletSupportDialog(self.top_level_window(), self.gui_object.plugins)
self.hardwarewalletdialog = d
d.exec_()
self.hardwarewalletdialog = None # allow python to GC
def cpfp(self, parent_tx, new_tx):
total_size = parent_tx.estimated_size() + new_tx.estimated_size()
d = WindowModalDialog(self.top_level_window(), _('Child Pays for Parent'))
vbox = QVBoxLayout(d)
msg = (
"A CPFP is a transaction that sends an unconfirmed output back to "
"yourself, with a high fee. The goal is to have miners confirm "
"the parent transaction in order to get the fee attached to the "
"child transaction.")
vbox.addWidget(WWLabel(_(msg)))
msg2 = ("The proposed fee is computed using your "
"fee/kB settings, applied to the total size of both child and "
"parent transactions. After you broadcast a CPFP transaction, "
"it is normal to see a new unconfirmed transaction in your history.")
vbox.addWidget(WWLabel(_(msg2)))
grid = QGridLayout()
grid.addWidget(QLabel(_('Total size') + ':'), 0, 0)
grid.addWidget(QLabel(_('{total_size} bytes').format(total_size=total_size)), 0, 1)
max_fee = new_tx.output_value()
grid.addWidget(QLabel(_('Input amount') + ':'), 1, 0)
grid.addWidget(QLabel(self.format_amount(max_fee) + ' ' + self.base_unit()), 1, 1)
output_amount = QLabel('')
grid.addWidget(QLabel(_('Output amount') + ':'), 2, 0)
grid.addWidget(output_amount, 2, 1)
fee_e = BTCAmountEdit(self.get_decimal_point)
def f(x):
a = max_fee - fee_e.get_amount()
output_amount.setText((self.format_amount(a) + ' ' + self.base_unit()) if a else '')
fee_e.textChanged.connect(f)
fee = self.config.fee_per_kb() * total_size / 1000
fee_e.setAmount(fee)
grid.addWidget(QLabel(_('Fee' + ':')), 3, 0)
grid.addWidget(fee_e, 3, 1)
def on_rate(dyn, pos, fee_rate):
fee = fee_rate * total_size / 1000
fee = min(max_fee, fee)
fee_e.setAmount(fee)
fee_slider = FeeSlider(self, self.config, on_rate)
fee_slider.update()
grid.addWidget(fee_slider, 4, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
result = d.exec_()
d.setParent(None) # So Python can GC
if not result:
return
fee = fee_e.get_amount()
if fee > max_fee:
self.show_error(_('Max fee exceeded'))
return
new_tx = self.wallet.cpfp(parent_tx, fee)
if new_tx is None:
self.show_error(_('CPFP no longer valid'))
return
self.show_transaction(new_tx)
def is_wallet_cashshuffle_compatible(self):
from electroncash.wallet import ImportedWalletBase, Multisig_Wallet
if (self.wallet.is_watching_only()
or self.wallet.is_hardware()
or isinstance(self.wallet, (Multisig_Wallet, ImportedWalletBase))):
# wallet is watching-only, multisig, or hardware so.. not compatible
return False
return False # display disabled (for now)
_cs_reminder_pixmap = None
def do_cash_shuffle_reminder(self):
if not self.remind_cashshuffle_enabled:
# NB: This is now disabled. We return early from this function.
# Amaury recommended we do this prompting/reminder in a future
# release after the initial public release, or we roll it out
# for a subset of users (hence this flag).
return
if self.cleaned_up or not self.wallet or not self.is_wallet_cashshuffle_compatible():
return
from electroncash_plugins.shuffle.conf_keys import ConfKeys
p = self.cashshuffle_plugin_if_loaded()
storage = self.wallet.storage
cashshuffle_flag = storage.get(ConfKeys.PerWallet.ENABLED, False)
enabled = cashshuffle_flag and p and p.is_enabled()
nagger_answer = storage.get(ConfKeys.PerWallet.MAIN_WINDOW_NAGGER_ANSWER, None)
if not enabled:
if nagger_answer is None: # nagger_answer is None if they've never said "Never ask"
if __class__._cs_reminder_pixmap is None:
# lazy init. Cache it to class level.
size = QSize(150, int(150/1.4419)) # Important to preserve aspect ratio in .svg file here
# NB: doing it this way, with a QIcon, will take into account devicePixelRatio and end up possibly producing a very hi quality image from the SVG, larger than size
__class__._cs_reminder_pixmap = QIcon(":icons/CashShuffleLogos/logo-vertical.svg").pixmap(size)
icon = __class__._cs_reminder_pixmap
message = '''
<big>{}</big></b>
<p>{}</p>
'''.format(_("CashShuffle is disabled for this wallet.") if not cashshuffle_flag else _("CashShuffle is disabled."),
_("Would you like to enable CashShuffle for this wallet?"))
info = ' '.join([_("If you enable it, ViLight will shuffle your coins for greater <b>privacy</b>. However, you will pay fractions of a penny per shuffle in transaction fees."),
_("(You can always toggle it later using the CashShuffle button.)")])
res, chkd = self.msg_box(icon=icon,
parent=self.top_level_window(),
title=_('Would you like to turn on CashShuffle?'),
text=message, rich_text=True, informative_text=info,
checkbox_text=_("Never ask for this wallet"),
buttons=(_('Enable CashShuffle'), _("Not now")),
defaultButton=_('Enable CashShuffle'), escapeButton=("Not now") )
if chkd:
# they don't want to be asked again, so just remember what they answered and apply this answer each time.
storage.put(ConfKeys.PerWallet.MAIN_WINDOW_NAGGER_ANSWER, bool(res==0))
else:
# They's specified "Never ask", so apply whatever button they pushed when they said that as the auto-setting.
res = 0 if nagger_answer else 1 # if nagge_answer was True, no prompt, just auto-enable, otherwise leave it disabled.
if res == 0:
self.toggle_cashshuffle()
def restart_cashshuffle(self, msg = None, parent = None):
if (parent or self).question("{}{}".format(msg + "\n\n" if msg else "", _("Restart the CashShuffle plugin now?")),
app_modal=True):
p = self.cashshuffle_plugin_if_loaded()
if p:
p.restart_all()
self.notify(_("CashShuffle restarted"))
else:
self.notify(_("CashShuffle could not be restarted"))
_cash_shuffle_flag = 0
def cashshuffle_set_flag(self, flag):
flag = int(flag)
changed = flag != self._cash_shuffle_flag
if not changed:
return
if flag:
def onClick():
KillPopupLabel("CashShuffleError")
self.show_cashshuffle_settings()
ShowPopupLabel(name = "CashShuffleError",
text="<center><b>{}</b><br><small>{}</small></center>".format(_("Server Error"),_("Right-click to resolve")),
target=self.cashshuffle_status_button,
timeout=20000, onClick=onClick, onRightClick=onClick,
dark_mode = ColorScheme.dark_scheme)
else:
KillPopupLabel("CashShuffleError")
self.print_error("Cash Shuffle flag is now {}".format(flag))
oldTip = self.cashshuffle_status_button.statusTip()
self._cash_shuffle_flag = flag
self.update_status()
newTip = self.cashshuffle_status_button.statusTip()
if newTip != oldTip:
self.statusBar().showMessage(newTip, 7500)
def cashshuffle_get_flag(self):
return self._cash_shuffle_flag
def rebuild_history(self):
if self.gui_object.warn_if_no_network(self):
# Don't allow if offline mode.
return
msg = ' '.join([
_('This feature is intended to allow you to rebuild a wallet if it has become corrupted.'),
"\n\n"+_('Your entire transaction history will be downloaded again from the server and verified from the blockchain.'),
_('Just to be safe, back up your wallet file first!'),
"\n\n"+_("Rebuild this wallet's history now?")
])
if self.question(msg, title=_("Rebuild Wallet History")):
try:
self.wallet.rebuild_history()
except RuntimeError as e:
self.show_error(str(e))
def scan_beyond_gap(self):
if self.gui_object.warn_if_no_network(self):
return
from .scan_beyond_gap import ScanBeyondGap
d = ScanBeyondGap(self)
d.exec_()
d.setParent(None) # help along Python by dropping refct to 0
def copy_to_clipboard(self, text, tooltip=None, widget=None):
tooltip = tooltip or _("Text copied to clipboard")
widget = widget or self
qApp.clipboard().setText(text)
QToolTip.showText(QCursor.pos(), tooltip, widget)
def register_new_cash_account(self, addr = None):
''' Initiates the "Register a new cash account" dialog.
If addr is none, will use self.receive_address. '''
addr = addr or self.receive_address or self.wallet.get_receiving_address()
if not addr:
self.print_error("register_new_cash_account: no receive address specified")
return
def on_link(ignored):
webopen('https://www.vitaeid.com/')
name, placeholder = '', 'Satoshi_Nakamoto'
while True:
lh = self.wallet.get_local_height()
name = line_dialog(self, _("Register A New Vitae ID"),
(_("You are registering a new <a href='ca'>Vitae ID</a> for your address <b><pre>{address}</pre></b>").format(address=addr.to_ui_string())
+ "<<br>" + _("How it works: <a href='ca'>Vitae IDs</a> registrations work by issuing an <b>OP_RETURN</b> transaction to yourself, costing fractions of a penny. "
"You will be offered the opportunity to review the generated transaction before broadcasting it to the blockchain.")
+ "<br><br>" + _("The current block height is <b><i>{block_height}</i></b>, so the new cash account will likely look like: <b><u><i>AccountName<i>#{number}</u></b>.")
.format(block_height=lh or '???', number=max(cashacct.bh2num(lh or 0)+1, 0) or '???')
+ "<br><br>" + _("Specify the <b>account name</b> below (limited to 99 characters):") ),
_("Proceed to Send Tab"), default=name, linkActivated=on_link,
placeholder=placeholder, disallow_empty=True,
icon=QIcon(":icons/cashacct-logo.png"))
if name is None:
# user cancel
return
name = name.strip()
if not cashacct.name_accept_re.match(name):
self.show_error(_("The specified name cannot be used for a Vitae IDs registration. You must specify 1-99 alphanumeric (ASCII) characters, without spaces (underscores are permitted as well)."))
continue
self._reg_new_cash_account(name, addr)
return
def _reg_new_cash_account(self, name, addr):
self.show_send_tab()
self.do_clear()
# Enabled OP_RETURN stuff even if disabled in prefs. Next do_clear call will reset to prefs presets.
self.message_opreturn_e.setVisible(True)
self.opreturn_rawhex_cb.setVisible(True)
self.opreturn_label.setVisible(True)
# Prevent user from modifying required fields, and hide what we
# can as well.
self.message_opreturn_e.setText(cashacct.ScriptOutput.create_registration(name, addr).script[1:].hex())
self.message_opreturn_e.setFrozen(True)
self.opreturn_rawhex_cb.setChecked(True)
self.opreturn_rawhex_cb.setDisabled(True)
self.amount_e.setAmount(0)
self.amount_e.setFrozen(True)
self.max_button.setDisabled(True)
self.payto_e.setHidden(True)
self.payto_label.setHidden(True)
# Set a default description -- this we allow them to edit
self.message_e.setText(
_("Vitae IDs Registration: '{name}' -> {address}").format(
name=name, address=addr.to_ui_string()
)
)
# set up "Helpful Window" informing user registration will
# not be accepted until at least 1 confirmation.
cashaccounts_never_show_send_tab_hint = self.config.get('cashaccounts_never_show_send_tab_hint', False)
if not cashaccounts_never_show_send_tab_hint:
msg1 = (
_("The Send Tab has been filled-in with your <b>Vitae IDs</b> registration data.")
+ "<br><br>" + _("Please review the transaction, save it, and/or broadcast it at your leisure.")
)
msg2 = ( _("After at least <i>1 confirmation</i>, you will be able to use your new <b>Vitae ID</b>, and it will be visible in ViLight in the <b>Addresses</b> tab.")
)
msg3 = _("If you wish to control which specific coins are used to "
"fund this registration transaction, feel free to use the "
"Coins and/or Addresses tabs' Spend-from facility.\n\n"
"('Spend from' is a right-click menu option in either tab.)")
res = self.msg_box(
# TODO: get SVG icon..
parent = self, icon=QIcon(":icons/cashacct-logo.png").pixmap(75, 75),
title=_('Register A New Vitae ID'), rich_text=True,
text = msg1, informative_text = msg2, detail_text = msg3,
checkbox_text=_("Never show this again"), checkbox_ischecked=False
)
if res[1]:
# never ask checked
self.config.set_key('cashaccounts_never_show_send_tab_hint', True)
class TxUpdateMgr(QObject, PrintError):
''' Manages new transaction notifications and transaction verified
notifications from the network thread. It collates them and sends them to
the appropriate GUI controls in the main_window in an efficient manner. '''
def __init__(self, main_window_parent):
assert isinstance(main_window_parent, ElectrumWindow), "TxUpdateMgr must be constructed with an ElectrumWindow as its parent"
super().__init__(main_window_parent)
self.lock = threading.Lock() # used to lock thread-shared attrs below
# begin thread-shared attributes
self.notif_q = []
self.verif_q = []
self.need_process_v, self.need_process_n = False, False
# /end thread-shared attributes
self.weakParent = Weak.ref(main_window_parent)
main_window_parent.history_updated_signal.connect(self.verifs_get_and_clear, Qt.DirectConnection) # immediately clear verif_q on history update because it would be redundant to keep the verify queue around after a history list update
main_window_parent.on_timer_signal.connect(self.do_check, Qt.DirectConnection) # hook into main_window's timer_actions function
def diagnostic_name(self):
return ((self.weakParent() and self.weakParent().diagnostic_name()) or "???") + "." + __class__.__name__
def do_check(self):
''' Called from timer_actions in main_window to check if notifs or
verifs need to update the GUI.
- Checks the need_process_[v|n] flags
- If either flag is set, call the @rate_limited process_verifs
and/or process_notifs functions which update GUI parent in a
rate-limited (collated) fashion (for decent GUI responsiveness). '''
with self.lock:
bV, bN = self.need_process_v, self.need_process_n
self.need_process_v, self.need_process_n = False, False
if bV: self.process_verifs() # rate_limited call (1 per second)
if bN: self.process_notifs() # rate_limited call (1 per 15 seconds)
def verifs_get_and_clear(self):
''' Clears the verif_q. This is called from the network
thread for the 'verified2' event as well as from the below
update_verifs (GUI thread), hence the lock. '''
with self.lock:
ret = self.verif_q
self.verif_q = []
self.need_process_v = False
return ret
def notifs_get_and_clear(self):
with self.lock:
ret = self.notif_q
self.notif_q = []
self.need_process_n = False
return ret
def verif_add(self, args):
# args: [wallet, tx_hash, height, conf, timestamp]
# filter out tx's not for this wallet
parent = self.weakParent()
if not parent or parent.cleaned_up:
return
if args[0] is parent.wallet:
with self.lock:
self.verif_q.append(args[1:])
self.need_process_v = True
def notif_add(self, args):
parent = self.weakParent()
if not parent or parent.cleaned_up:
return
tx, wallet = args
# filter out tx's not for this wallet
if wallet is parent.wallet:
with self.lock:
self.notif_q.append(tx)
self.need_process_n = True
@rate_limited(1.0, ts_after=True)
def process_verifs(self):
''' Update history list with tx's from verifs_q, but limit the
GUI update rate to once per second. '''
parent = self.weakParent()
if not parent or parent.cleaned_up:
return
items = self.verifs_get_and_clear()
if items:
t0 = time.time()
parent.history_list.setUpdatesEnabled(False)
had_sorting = parent.history_list.isSortingEnabled()
if had_sorting:
parent.history_list.setSortingEnabled(False)
n_updates = 0
for item in items:
did_update = parent.history_list.update_item(*item)
n_updates += 1 if did_update else 0
self.print_error("Updated {}/{} verified txs in GUI in {:0.2f} ms"
.format(n_updates, len(items), (time.time()-t0)*1e3))
if had_sorting:
parent.history_list.setSortingEnabled(True)
parent.history_list.setUpdatesEnabled(True)
parent.update_status()
@rate_limited(5.0, classlevel=True)
def process_notifs(self):
parent = self.weakParent()
if not parent or parent.cleaned_up:
return
if parent.network:
txns = self.notifs_get_and_clear()
if txns:
# Combine the transactions
n_ok, n_cashacct, total_amount = 0, 0, 0
last_seen_ca_name = ''
ca_txs = dict() # 'txid' -> ('name', address) -- will be given to contacts_list for "unconfirmed registrations" display
for tx in txns:
if tx:
is_relevant, is_mine, v, fee = parent.wallet.get_wallet_delta(tx)
for _typ, addr, val in tx.outputs():
# Find Vitae ID registrations that are for addresses *in* this wallet
if isinstance(addr, cashacct.ScriptOutput) and parent.wallet.is_mine(addr.address):
n_cashacct += 1
last_seen_ca_name = addr.name
txid = tx.txid_fast()
if txid: ca_txs[txid] = (addr.name, addr.address)
if not is_relevant:
continue
total_amount += v
n_ok += 1
if n_cashacct:
# Unhide the Addresses tab if cash account reg tx seen
# and user never explicitly hid it.
if parent.config.get("show_addresses_tab") is None:
# We unhide it because presumably they want to SEE
# their cash accounts now that they have them --
# and part of the UI is *IN* the Addresses tab.
parent.toggle_tab(parent.addresses_tab)
# Do same for console tab
if parent.config.get("show_contacts_tab") is None:
# We unhide it because presumably they want to SEE
# their cash accounts now that they have them --
# and part of the UI is *IN* the Console tab.
parent.toggle_tab(parent.contacts_tab)
if ca_txs:
# Notify contact_list of potentially unconfirmed txs
parent.contact_list.ca_update_potentially_unconfirmed_registrations(ca_txs)
if parent.wallet.storage.get('gui_notify_tx', True):
ca_text = ''
if n_cashacct > 1:
# plural
ca_text = " + " + _("{number_of_cashaccounts} Vitae IDs registrations").format(number_of_cashaccounts = n_cashacct)
elif n_cashacct == 1:
# singular
ca_text = " + " + _("1 Vitae IDs registration ({cash_accounts_name})").format(cash_accounts_name = last_seen_ca_name)
if total_amount > 0:
self.print_error("Notifying GUI %d tx"%(max(n_ok, n_cashacct)))
if max(n_ok, n_cashacct) > 1:
parent.notify(_("{} new transactions: {}")
.format(n_ok, parent.format_amount_and_units(total_amount, is_diff=True)) + ca_text)
else:
parent.notify(_("New transaction: {}").format(parent.format_amount_and_units(total_amount, is_diff=True)) + ca_text)
elif n_cashacct:
# No total amount (was just a cashacct reg tx)
ca_text = ca_text[3:] # pop off the " + "
if n_cashacct > 1:
parent.notify(_("{} new transactions: {}")
.format(n_cashacct, ca_text))
else:
parent.notify(_("New transaction: {}").format(ca_text))
|
test_InfluxDBClient.py
|
import http.server
import json
import os
import threading
import unittest
from influxdb_client import InfluxDBClient
class InfluxDBClientTest(unittest.TestCase):
def tearDown(self) -> None:
if self.client:
self.client.close()
if hasattr(self, 'httpd'):
self.httpd.shutdown()
if hasattr(self, 'httpd_thread'):
self.httpd_thread.join()
def test_TrailingSlashInUrl(self):
self.client = InfluxDBClient(url="http://localhost:8086", token="my-token", org="my-org")
self.assertEqual('http://localhost:8086', self.client.api_client.configuration.host)
self.client = InfluxDBClient(url="http://localhost:8086/", token="my-token", org="my-org")
self.assertEqual('http://localhost:8086', self.client.api_client.configuration.host)
def test_ConnectToSelfSignedServer(self):
self._start_http_server()
self.client = InfluxDBClient(f"https://localhost:{self.httpd.server_address[1]}",
token="my-token", verify_ssl=False)
health = self.client.health()
self.assertEqual(health.message, 'ready for queries and writes')
self.assertEqual(health.status, "pass")
self.assertEqual(health.name, "influxdb")
def test_certificate_file(self):
self._start_http_server()
self.client = InfluxDBClient(f"https://localhost:{self.httpd.server_address[1]}",
token="my-token", verify_ssl=True,
ssl_ca_cert=f'{os.path.dirname(__file__)}/server.pem')
health = self.client.health()
self.assertEqual(health.message, 'ready for queries and writes')
self.assertEqual(health.status, "pass")
self.assertEqual(health.name, "influxdb")
def test_init_from_file_ssl_default(self):
self.client = InfluxDBClient.from_config_file(f'{os.path.dirname(__file__)}/config.ini')
self.assertTrue(self.client.api_client.configuration.verify_ssl)
def test_init_from_file_ssl(self):
self.client = InfluxDBClient.from_config_file(f'{os.path.dirname(__file__)}/config-disabled-ssl.ini')
self.assertFalse(self.client.api_client.configuration.verify_ssl)
def test_init_from_env_ssl_default(self):
if os.getenv("INFLUXDB_V2_VERIFY_SSL"):
del os.environ["INFLUXDB_V2_VERIFY_SSL"]
self.client = InfluxDBClient.from_env_properties()
self.assertTrue(self.client.api_client.configuration.verify_ssl)
def test_init_from_env_ssl(self):
os.environ["INFLUXDB_V2_SSL_CA_CERT"] = "/my/custom/path"
self.client = InfluxDBClient.from_env_properties()
self.assertEqual("/my/custom/path", self.client.api_client.configuration.ssl_ca_cert)
def test_init_from_file_ssl_ca_cert_default(self):
self.client = InfluxDBClient.from_config_file(f'{os.path.dirname(__file__)}/config.ini')
self.assertIsNone(self.client.api_client.configuration.ssl_ca_cert)
def test_init_from_file_ssl_ca_cert(self):
self.client = InfluxDBClient.from_config_file(f'{os.path.dirname(__file__)}/config-ssl-ca-cert.ini')
self.assertEqual("/path/to/my/cert", self.client.api_client.configuration.ssl_ca_cert)
def test_init_from_env_ssl_ca_cert_default(self):
if os.getenv("INFLUXDB_V2_SSL_CA_CERT"):
del os.environ["INFLUXDB_V2_SSL_CA_CERT"]
self.client = InfluxDBClient.from_env_properties()
self.assertIsNone(self.client.api_client.configuration.ssl_ca_cert)
def test_init_from_env_ssl_ca_cert(self):
os.environ["INFLUXDB_V2_SSL_CA_CERT"] = "/my/custom/path/to/cert"
self.client = InfluxDBClient.from_env_properties()
self.assertEqual("/my/custom/path/to/cert", self.client.api_client.configuration.ssl_ca_cert)
def _start_http_server(self):
import http.server
import ssl
# Disable unverified HTTPS requests
import urllib3
urllib3.disable_warnings()
# Configure HTTP server
self.httpd = http.server.HTTPServer(('localhost', 0), ServerWithSelfSingedSSL)
self.httpd.socket = ssl.wrap_socket(self.httpd.socket, certfile=f'{os.path.dirname(__file__)}/server.pem',
server_side=True)
# Start server at background
self.httpd_thread = threading.Thread(target=self.httpd.serve_forever)
self.httpd_thread.start()
class ServerWithSelfSingedSSL(http.server.SimpleHTTPRequestHandler):
def _set_headers(self):
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.end_headers()
def do_GET(self):
self._set_headers()
response = json.dumps(
dict(name="influxdb", message="ready for queries and writes", status="pass", checks=[], version="2.0.0",
commit="abcdefgh")).encode('utf-8')
self.wfile.write(response)
|
train_ltcn.py
|
import os
import argparse
import torch
import numpy as np
import pickle
import sys
sys.path.append('./utils')
from torch import optim
from torch import nn
from torch import multiprocessing
from torch.optim import lr_scheduler
from torch.autograd import Variable
from torch.utils.data import DataLoader, ConcatDataset
from utils.util import distance, Logger, ensure_folder, collate_fn, resize_frame
from utils.builders import SingleViewDepthTripletRCNNBuilder, SingleViewDepthTripletExtractedBuilder
from utils.vocabulary import Vocabulary
from tcn import define_model_ltcn, define_model_depth,define_model
from ipdb import set_trace
from sklearn.preprocessing import OneHotEncoder
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
from torchvision import transforms
from utils.plot_utils import plot_mean
IMAGE_SIZE = (299, 299)
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID" # see issue #152
os.environ["CUDA_VISIBLE_DEVICES"]= "1,2"
ITERATE_OVER_TRIPLETS = 3
EXP_DIR = '/media/msieb/1e2e903d-5929-40bd-a22a-a94fd9e5bcce/tcn_data/experiments/toy/'
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('--start-epoch', type=int, default=0)
parser.add_argument('--epochs', type=int, default=1000)
parser.add_argument('--save-every', type=int, default=1)
parser.add_argument('--model-folder', type=str, default=EXP_DIR + 'trained_models/ltcn')
parser.add_argument('--load-model', type=str, required=False)
# parser.add_argument('--train-directory', type=str, default='./data/multiview-pouring/train/')
# parser.add_argument('--validation-directory', type=str, default='./data/multiview-pouring/val/')
parser.add_argument('--train-directory', type=str, default=EXP_DIR + 'videos/train/')
parser.add_argument('--train-directory-depth', type=str, default=EXP_DIR + 'depth/train/')
parser.add_argument('--validation-directory', type=str, default=EXP_DIR + 'videos/valid/')
parser.add_argument('--validation-directory-depth', type=str, default=EXP_DIR + 'depth/valid/')
parser.add_argument('--minibatch-size', type=int, default=8)
parser.add_argument('--margin', type=float, default=2.0)
parser.add_argument('--model-name', type=str, default='ltcn')
parser.add_argument('--log-file', type=str, default='./out.log')
parser.add_argument('--lr-start', type=float, default=0.001)
parser.add_argument('--triplets-from-videos', type=int, default=5)
parser.add_argument('--n-views', type=int, default=3)
parser.add_argument('--alpha', type=float, default=0.001, help='weighing factor of language loss to triplet loss')
# parser.add_argument('--model_path', type=str, default='models/' , help='path for saving trained models')
# parser.add_argument('--crop_size', type=int, default=224 , help='size for randomly cropping images')
# parser.add_argument('--vocab_path', type=str, default='data/vocab.pkl', help='path for vocabulary wrapper')
# parser.add_argument('--image_dir', type=str, default='data/resized2014', help='directory for resized images')
# parser.add_argument('--caption_path', type=str, default='data/annotations/captions_train2014.json', help='path for train annotation json file')
# parser.add_argument('--log_step', type=int , default=10, help='step size for prining log info')
# parser.add_argument('--save_step', type=int , default=1000, help='step size for saving trained models')
# Model parameters
parser.add_argument('--embed_size', type=int , default=32, help='dimension of word embedding vectors')
parser.add_argument('--hidden_size', type=int , default=256, help='dimension of lstm hidden states')
parser.add_argument('--num_layers', type=int , default=1, help='number of layers in lstm')
# parser.add_argument('--num_epochs', type=int, default=5)
# parser.add_argument('--batch_size', type=int, default=128)
# parser.add_argument('--num_workers', type=int, default=2)
# parser.add_argument('--learning_rate', type=float, default=0.001)
return parser.parse_args()
args = get_args()
print(args)
builder = SingleViewDepthTripletRCNNBuilder
FULL_FRAME = False
logger = Logger(args.log_file)
def create_model(use_cuda):
tcn = define_model()
# tcn = PosNet()
if args.load_model:
model_path = os.path.join(
args.model_folder,
args.load_model
)
# map_location allows us to load models trained on cuda to cpu.
tcn.load_state_dict(torch.load(model_path, map_location=lambda storage, loc: storage))
if use_cuda:
tcn = tcn.cuda()
return tcn
def batch_size(epoch, max_size):
exponent = epoch // 100
return min(max(2 ** (exponent), 2), max_size)
validation_builder = builder(args.n_views, args.validation_directory, args.validation_directory_depth, IMAGE_SIZE, args, sample_size=50)
validation_set = [validation_builder.build_set(full_frame=FULL_FRAME) for i in range(2)]
validation_set = ConcatDataset(validation_set)
del validation_builder
def validate(tcn, use_cuda, args):
# Run model on validation data and log results
data_loader = DataLoader(
validation_set,
batch_size=32,
shuffle=False,
pin_memory=use_cuda,
)
correct_with_margin = 0
correct_without_margin = 0
losses = []
for frames, features in data_loader:
# frames = Variable(minibatch, require_grad=False)
if use_cuda:
frames = frames.cuda()
features = features.cuda()
anchor_frames = frames[:, 0, :, :, :]
positive_frames = frames[:, 1, :, :, :]
negative_frames = frames[:, 2, :, :, :]
anchor_features = features[:, 0, :, :, :]
positive_features = features[:, 1, :, :, :]
anchor_frames = frames[:, 0, :, :, :]
positive_frames = frames[:, 1, :, :, :]
negative_frames = frames[:, 2, :, :, :]
anchor_features = features[:, 0, :, :, :]
positive_features = features[:, 1, :, :, :]
negative_features = features[:, 2, :, :, :]
# anchor_output, unnormalized, _ = tcn(anchor_features)
# positive_output, _, _ = tcn(positive_features)
# negative_output, _, _ = tcn(negative_features)
anchor_output, unnormalized, _ = tcn(anchor_frames)
positive_output, _, _ = tcn(positive_frames)
negative_output, _, _ = tcn(negative_frames)
d_positive = distance(anchor_output, positive_output)
d_negative = distance(anchor_output, negative_output)
assert(d_positive.size()[0] == frames.size()[0])
correct_with_margin += ((d_positive + args.margin) < d_negative).data.cpu().numpy().sum()
correct_without_margin += (d_positive < d_negative).data.cpu().numpy().sum()
loss_triplet = torch.clamp(args.margin + d_positive - d_negative, min=0.0).mean()
loss = loss_triplet
losses.append(loss.data.cpu().numpy())
loss = np.mean(losses)
logger.info('val loss: ',loss)
message = "Validation score correct with margin {with_margin}/{total} and without margin {without_margin}/{total}".format(
with_margin=correct_with_margin,
without_margin=correct_without_margin,
total=len(validation_set)
)
logger.info(message)
return correct_with_margin, correct_without_margin, loss
def model_filename(model_name, epoch):
return "{model_name}-epoch-{epoch}.pk".format(model_name=model_name, epoch=epoch)
def save_model(model, filename, model_folder):
ensure_folder(model_folder)
model_path = os.path.join(model_folder, filename)
torch.save(model.state_dict(), model_path)
def build_set(queue, triplet_builder, log):
while 1:
datasets = []
for i in range(3):
dataset = triplet_builder.build_set(full_frame=FULL_FRAME)
datasets.append(dataset)
dataset = ConcatDataset(datasets)
# log.info('Created {0} triplets'.format(len(dataset)))
queue.put(dataset)
def main():
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
use_cuda = torch.cuda.is_available()
tcn = create_model(use_cuda)
tcn = torch.nn.DataParallel(tcn, device_ids=range(torch.cuda.device_count()))
triplet_builder = builder(args.n_views, \
args.train_directory, args.train_directory_depth, IMAGE_SIZE, args, sample_size=50)
datasets = []
# for i in range(13):
# dataset = triplet_builder.build_set()
# datasets.append(dataset)
# dataset = ConcatDataset(datasets)
queue = multiprocessing.Queue(1)
dataset_builder_process = multiprocessing.Process(target=build_set, args=(queue, triplet_builder, logger), daemon=True)
dataset_builder_process.start()
optimizer = optim.SGD(tcn.parameters(), lr=args.lr_start, momentum=0.9)
# This will diminish the learning rate at the milestones.
# 0.1, 0.01, 0.001
learning_rate_scheduler = lr_scheduler.MultiStepLR(optimizer, milestones=[30, 50, 100], gamma=0.5)
criterion = nn.CrossEntropyLoss()
trn_losses_ = []
val_losses_= []
val_acc_margin_ = []
val_acc_no_margin_ = []
for epoch in range(args.start_epoch, args.start_epoch + args.epochs):
print("=" * 20)
logger.info("Starting epoch: {0} learning rate: {1}".format(epoch,
learning_rate_scheduler.get_lr()))
learning_rate_scheduler.step()
dataset = queue.get()
data_loader = DataLoader(
dataset=dataset,
batch_size=args.minibatch_size, # batch_size(epoch, args.max_minibatch_size),
shuffle=True,
pin_memory=use_cuda,
)
for _ in range(0, ITERATE_OVER_TRIPLETS):
losses = []
for frames, features in data_loader:
# frames = Variable(minibatch)
if use_cuda:
frames = frames.cuda()
features = features.cuda()
anchor_frames = frames[:, 0, :, :, :]
positive_frames = frames[:, 1, :, :, :]
negative_frames = frames[:, 2, :, :, :]
anchor_features = features[:, 0, :, :, :]
positive_features = features[:, 1, :, :, :]
negative_features = features[:, 2, :, :, :]
# anchor_output, unnormalized, _ = tcn(anchor_features)
# positive_output, _, _ = tcn(positive_features)
# negative_output, _, _ = tcn(negative_features)
anchor_output, unnormalized, _ = tcn(anchor_frames)
positive_output, _, _ = tcn(positive_frames)
negative_output, _, _ = tcn(negative_frames)
d_positive = distance(anchor_output, positive_output)
d_negative = distance(anchor_output, negative_output)
loss_triplet = torch.clamp(args.margin + d_positive - d_negative, min=0.0).mean()
loss = loss_triplet
losses.append(loss.data.cpu().numpy())
optimizer.zero_grad()
loss.backward()
optimizer.step()
trn_losses_.append(np.mean(losses))
logger.info('train loss: ', np.mean(losses))
if epoch % 1 == 0:
acc_margin, acc_no_margin, loss = validate(tcn, use_cuda, args)
val_losses_.append(loss)
val_acc_margin_.append(acc_margin)
val_acc_no_margin_.append(acc_no_margin)
if epoch % args.save_every == 0 and epoch != 0:
logger.info('Saving model.')
save_model(tcn, model_filename(args.model_name, epoch), args.model_folder)
# plot_mean(trn_losses_, args.model_folder, 'train_loss')
# plot_mean(val_losses_, args.model_folder, 'validation_loss')
# # plot_mean(train_acc_, args.model_folder, 'train_acc')
# plot_mean(val_acc_margin_, args.model_folder, 'validation_accuracy_margin')
# plot_mean(val_acc_no_margin_, args.model_folder, 'validation_accuracy_no_margin')
if __name__ == '__main__':
main()
|
retribusiPemerintah.py
|
#!/usr/bin/python
import MySQLdb
import time
import sys
import math
import threading
def check(bot, top, index):
# print 'grader'+str(index)
j = bot
db_tunggak = MySQLdb.connect("10.151.63.12", "retribusi", "retribusi", "retribusi")
cursor_tunggak = db_tunggak.cursor()
db_retribusi = MySQLdb.connect("10.151.63.12", "retribusi", "retribusi", "retribusi")
cursor_retribusi = db_retribusi.cursor()
while (bot <= top):
try:
sql_retribusi = '''select id, pelanggan_id, tgl_lunas, bulan, tahun from retribusipemerintah where status_cek = 0 and id between '''+str(bot)+''' and '''+str(top)
tanda = 0
temp = cursor_retribusi.execute(sql_retribusi)
if (temp != 0):
tanda = 1
elif (temp == 0):
print "TIDAK ADA YANG NOL "+str(index)
j+=1
sys.exit(0)
if (tanda==1):
unchecked = cursor_retribusi.fetchone()
if (unchecked is not None):
id_retribusi = unchecked[0]
no_plg_ret = unchecked[1]
tgl_lunas = unchecked[2]
bulan = unchecked[3]
tahun = unchecked[4]
sql = '''select * from tunggakanpemerintah where pelanggan_id ='''+str(no_plg_ret)+''' having min(id)'''
sql_exec = cursor_tunggak.execute(sql)
tunggak = cursor_tunggak.fetchone()
if (tunggak is not None):
id_tunggak = tunggak[0]
pelanggan_id = tunggak[1]
nama = tunggak[2]
jalan = tunggak[3]
gang = tunggak[4]
nomor = tunggak[5]
notamb = tunggak[6]
da = tunggak[7]
kd_tarif = tunggak[8]
retribusi = tunggak[9]
listrik = tunggak[10]
lbr_jalan = tunggak[11]
periode_tagih = tunggak[12]
ketstatus = tunggak[13]
insert = '''insert into lunaspemerintah (pelanggan_id, nama, jalan, gang, nomor, notamb, da, kd_tarif, retribusi, listrik, lbr_jalan, periode_tagih, ketstatus, tgl_lunas, bulan, tahun) values('''+str(pelanggan_id)+''',"'''+str(nama)+'''","'''+str(jalan)+'''","'''+str(gang)+'''","'''+str(nomor)+'''","'''+str(notamb)+'''","'''+str(da)+'''","'''+str(kd_tarif)+'''",'''+str(retribusi)+''','''+str(listrik)+''','''+str(lbr_jalan)+''','''+str(periode_tagih)+''',"'''+str(ketstatus)+'''","'''+str(tgl_lunas)+'''","'''+str(bulan)+'''","'''+str(tahun)+'''")'''
insert_exec = cursor_retribusi.execute(insert)
delete = '''delete from tunggakanpemerintah where id='''+str(id_tunggak)
delete_exec = cursor_tunggak.execute(delete)
if (insert_exec and delete_exec):
update_status = '''update retribusipemerintah set status_cek = 1 where id = '''+str(id_retribusi)
update_status_exec = cursor_retribusi.execute(update_status)
if (update_status_exec):
print 'updated '+str(id_retribusi)+''' index = '''+str(index)
else:
print 'gagal update status'
elif (not insert_exec and delete_exec):
print 'gagal insert'
elif (insert_exec and not delete_exec):
print 'gagal delete'
else:
update_status = '''update retribusipemerintah set status_cek = 1 where id = '''+str(id_retribusi)
update_status_exec = cursor_retribusi.execute(update_status)
print 'tidak nunggak '+str(id_retribusi)+''' index = '''+str(index)
bot+=1
db_retribusi.commit()
except:
print 'masuk except'
bot+=1
db_retribusi.commit()
sys.exit(0)
try:
i=1
threads = []
db_tunggak = MySQLdb.connect("10.151.63.12", "retribusi", "retribusi", "retribusi")
cursor_tunggak = db_tunggak.cursor()
db_retribusi = MySQLdb.connect("10.151.63.12", "retribusi", "retribusi", "retribusi")
cursor_retribusi = db_retribusi.cursor()
sqlSum = '''select count(*) from retribusipemerintah where status_cek=0'''
sqlSum_exec = cursor_retribusi.execute(sqlSum)
sqlSumData = cursor_retribusi.fetchone()
minID = '''select min(id) from retribusipemerintah where status_cek=0'''
minID_exec = cursor_retribusi.execute(minID)
minIDData = cursor_retribusi.fetchone()
divSum = sqlSumData[0]/4
a = minIDData[0]
b = minIDData[0]
index = 1
while (a<sqlSumData[0]):
b = b + divSum
# print index
if (b > sqlSumData[0]):
b = sqlSumData[0] + 10
# print 'a = '+str(a)+' b = '+ str(b)
t = threading.Thread(target=check, args=(a,b,index,))
threads.append(t)
t.start()
index += 1
a = b
for x in threads:
x.join()
print 'mau exit'
db_tunggak.close()
db_retribusi.close()
sys.exit(0)
except KeyboardInterrupt:
sys.exit(0)
except:
print 'berhenti'
# execfile(retribusi.py)
|
remote_executor.py
|
# Lint as: python3
# Copyright 2019, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A local proxy for a remote executor service hosted on a separate machine."""
import asyncio
import itertools
import queue
import threading
import weakref
import absl.logging as logging
import grpc
from tensorflow_federated.proto.v0 import executor_pb2
from tensorflow_federated.proto.v0 import executor_pb2_grpc
from tensorflow_federated.python.common_libs import anonymous_tuple
from tensorflow_federated.python.common_libs import py_typecheck
from tensorflow_federated.python.common_libs import tracing
from tensorflow_federated.python.core.api import computation_types
from tensorflow_federated.python.core.impl import executor_service_utils
from tensorflow_federated.python.core.impl.executors import execution_context
from tensorflow_federated.python.core.impl.executors import executor_base
from tensorflow_federated.python.core.impl.executors import executor_value_base
_STREAM_CLOSE_WAIT_SECONDS = 10
class RemoteValue(executor_value_base.ExecutorValue):
"""A reference to a value embedded in a remotely deployed executor service."""
def __init__(self, value_ref: executor_pb2.ValueRef, type_spec, executor):
"""Creates the value.
Args:
value_ref: An instance of `executor_pb2.ValueRef` returned by the remote
executor service.
type_spec: An instance of `computation_types.Type`.
executor: The executor that created this value.
"""
py_typecheck.check_type(value_ref, executor_pb2.ValueRef)
py_typecheck.check_type(type_spec, computation_types.Type)
py_typecheck.check_type(executor, RemoteExecutor)
self._value_ref = value_ref
self._type_signature = type_spec
self._executor = executor
# Clean up the value and the memory associated with it on the remote
# worker when no references to it remain.
def finalizer(value_ref, executor):
executor._dispose(value_ref) # pylint: disable=protected-access
weakref.finalize(self, finalizer, value_ref, executor)
@property
def type_signature(self):
return self._type_signature
@tracing.trace(span=True)
async def compute(self):
return await self._executor._compute(self._value_ref) # pylint: disable=protected-access
@property
def value_ref(self):
return self._value_ref
class _BidiStream:
"""A bidi stream connection to the Executor service's Execute method."""
def __init__(self, stub, thread_pool_executor):
self._stub = stub
self._thread_pool_executor = thread_pool_executor
self._is_initialized = False
def _lazy_init(self):
"""Lazily initialize the underlying gRPC stream."""
if self._is_initialized:
return
logging.debug('Initializing bidi stream')
self._request_queue = queue.Queue()
self._response_event_dict = {}
self._stream_closed_event = threading.Event()
def request_iter():
"""Iterator that blocks on the request Queue."""
for seq in itertools.count():
logging.debug('Request thread: blocking for next request')
val = self._request_queue.get()
if val:
py_typecheck.check_type(val[0], executor_pb2.ExecuteRequest)
py_typecheck.check_type(val[1], threading.Event)
req = val[0]
req.sequence_number = seq
logging.debug(
'Request thread: processing request of type %s, seq_no %s',
val[0].WhichOneof('request'), seq)
self._response_event_dict[seq] = val[1]
yield val[0]
else:
logging.debug(
'Request thread: Final request received. Stream will close.')
# None means we are done processing
return
response_iter = self._stub.Execute(request_iter())
def response_thread_fn():
"""Consumes response iter and exposes the value on corresponding Event."""
try:
logging.debug('Response thread: blocking for next response')
for response in response_iter:
logging.debug(
'Response thread: processing response of type %s, seq_no %s',
response.WhichOneof('response'), response.sequence_number)
# Get the corresponding response Event
response_event = self._response_event_dict[response.sequence_number]
# Attach the response as an attribute on the Event
response_event.response = response
response_event.set()
# Set the event indicating the stream has been closed
self._stream_closed_event.set()
except grpc.RpcError as error:
logging.exception('Error calling remote executor: %s', error)
response_thread = threading.Thread(target=response_thread_fn)
response_thread.daemon = True
response_thread.start()
self._is_initialized = True
@tracing.trace(span=True)
async def send_request(self, request):
"""Send a request on the bidi stream."""
self._lazy_init()
py_typecheck.check_type(request, executor_pb2.ExecuteRequest)
request_type = request.WhichOneof('request')
response_event = threading.Event()
# Enqueue a tuple of request and an Event used to return the response
self._request_queue.put((request, response_event))
await asyncio.get_event_loop().run_in_executor(self._thread_pool_executor,
response_event.wait)
response = response_event.response # pytype: disable=attribute-error
if isinstance(response, Exception):
raise response
py_typecheck.check_type(response, executor_pb2.ExecuteResponse)
response_type = response.WhichOneof('response')
if response_type != request_type:
raise ValueError('Request had type: {} but response had type: {}'.format(
request_type, response_type))
return response
def close(self):
if self._is_initialized:
logging.debug('Closing bidi stream')
self._request_queue.put(None)
# Wait for the stream to be closed
self._stream_closed_event.wait(_STREAM_CLOSE_WAIT_SECONDS)
else:
logging.debug('Closing unused bidi stream')
self._is_initialized = False
def _request(rpc_func, request):
with tracing.wrap_rpc_in_trace_context():
try:
return rpc_func(request)
except grpc.RpcError as e:
if _is_retryable_grpc_error(e):
logging.info('Received retryable gRPC error: %s', e)
raise execution_context.RetryableError(e)
else:
raise e
def _is_retryable_grpc_error(error):
"""Predicate defining what is a retryable gRPC error."""
non_retryable_errors = {
grpc.StatusCode.INVALID_ARGUMENT,
grpc.StatusCode.NOT_FOUND,
grpc.StatusCode.ALREADY_EXISTS,
grpc.StatusCode.PERMISSION_DENIED,
grpc.StatusCode.FAILED_PRECONDITION,
grpc.StatusCode.ABORTED,
grpc.StatusCode.OUT_OF_RANGE,
grpc.StatusCode.UNIMPLEMENTED,
grpc.StatusCode.DATA_LOSS,
grpc.StatusCode.UNAUTHENTICATED,
}
return (isinstance(error, grpc.RpcError) and
error.code() not in non_retryable_errors)
class RemoteExecutor(executor_base.Executor):
"""The remote executor is a local proxy for a remote executor instance."""
# TODO(b/134543154): Switch to using an asynchronous gRPC client so we don't
# have to block on all those calls.
def __init__(self,
channel,
rpc_mode='REQUEST_REPLY',
thread_pool_executor=None,
dispose_batch_size=20):
"""Creates a remote executor.
Args:
channel: An instance of `grpc.Channel` to use for communication with the
remote executor service.
rpc_mode: Optional mode of calling the remote executor. Must be either
'REQUEST_REPLY' or 'STREAMING' (defaults to 'REQUEST_REPLY'). This
option will be removed after the request-reply interface is deprecated.
thread_pool_executor: Optional concurrent.futures.Executor used to wait
for the reply to a streaming RPC message. Uses the default Executor if
not specified.
dispose_batch_size: The batch size for requests to dispose of remote
worker values. Lower values will result in more requests to the
remote worker, but will result in values being cleaned up sooner
and therefore may result in lower memory usage on the remote worker.
"""
py_typecheck.check_type(channel, grpc.Channel)
py_typecheck.check_type(rpc_mode, str)
py_typecheck.check_type(dispose_batch_size, int)
if rpc_mode not in ['REQUEST_REPLY', 'STREAMING']:
raise ValueError('Invalid rpc_mode: {}'.format(rpc_mode))
logging.debug('Creating new ExecutorStub with RPC_MODE=%s', rpc_mode)
self._stub = executor_pb2_grpc.ExecutorStub(channel)
self._bidi_stream = None
self._dispose_batch_size = dispose_batch_size
self._dispose_request = executor_pb2.DisposeRequest()
if rpc_mode == 'STREAMING':
logging.debug('Creating Bidi stream')
self._bidi_stream = _BidiStream(self._stub, thread_pool_executor)
def close(self):
if self._bidi_stream is not None:
logging.debug('Closing bidi stream')
self._bidi_stream.close()
def _dispose(self, value_ref: executor_pb2.ValueRef):
"""Disposes of the remote value stored on the worker service."""
self._dispose_request.value_ref.append(value_ref)
if len(self._dispose_request.value_ref) < self._dispose_batch_size:
return
dispose_request = self._dispose_request
self._dispose_request = executor_pb2.DisposeRequest()
if self._bidi_stream is None:
_request(self._stub.Dispose, dispose_request)
else:
send_request_fut = self._bidi_stream.send_request(
executor_pb2.ExecuteRequest(dispose=dispose_request))
# We don't care about the response, and so don't bother to await it.
# Just start it as a task so that it runs at some point.
asyncio.get_event_loop().create_task(send_request_fut)
@tracing.trace(span=True)
async def create_value(self, value, type_spec=None):
@tracing.trace
def serialize_value():
return executor_service_utils.serialize_value(value, type_spec)
value_proto, type_spec = serialize_value()
create_value_request = executor_pb2.CreateValueRequest(value=value_proto)
if self._bidi_stream is None:
response = _request(self._stub.CreateValue, create_value_request)
else:
response = (await self._bidi_stream.send_request(
executor_pb2.ExecuteRequest(create_value=create_value_request)
)).create_value
py_typecheck.check_type(response, executor_pb2.CreateValueResponse)
return RemoteValue(response.value_ref, type_spec, self)
@tracing.trace(span=True)
async def create_call(self, comp, arg=None):
py_typecheck.check_type(comp, RemoteValue)
py_typecheck.check_type(comp.type_signature, computation_types.FunctionType)
if arg is not None:
py_typecheck.check_type(arg, RemoteValue)
create_call_request = executor_pb2.CreateCallRequest(
function_ref=comp.value_ref,
argument_ref=(arg.value_ref if arg is not None else None))
if self._bidi_stream is None:
response = _request(self._stub.CreateCall, create_call_request)
else:
response = (await self._bidi_stream.send_request(
executor_pb2.ExecuteRequest(create_call=create_call_request)
)).create_call
py_typecheck.check_type(response, executor_pb2.CreateCallResponse)
return RemoteValue(response.value_ref, comp.type_signature.result, self)
@tracing.trace(span=True)
async def create_tuple(self, elements):
elem = anonymous_tuple.to_elements(anonymous_tuple.from_container(elements))
proto_elem = []
type_elem = []
for k, v in elem:
py_typecheck.check_type(v, RemoteValue)
proto_elem.append(
executor_pb2.CreateTupleRequest.Element(
name=(k if k else None), value_ref=v.value_ref))
type_elem.append((k, v.type_signature) if k else v.type_signature)
result_type = computation_types.NamedTupleType(type_elem)
request = executor_pb2.CreateTupleRequest(element=proto_elem)
if self._bidi_stream is None:
response = _request(self._stub.CreateTuple, request)
else:
response = (await self._bidi_stream.send_request(
executor_pb2.ExecuteRequest(create_tuple=request))).create_tuple
py_typecheck.check_type(response, executor_pb2.CreateTupleResponse)
return RemoteValue(response.value_ref, result_type, self)
@tracing.trace(span=True)
async def create_selection(self, source, index=None, name=None):
py_typecheck.check_type(source, RemoteValue)
py_typecheck.check_type(source.type_signature,
computation_types.NamedTupleType)
if index is not None:
py_typecheck.check_type(index, int)
py_typecheck.check_none(name)
result_type = source.type_signature[index]
else:
py_typecheck.check_type(name, str)
result_type = getattr(source.type_signature, name)
request = executor_pb2.CreateSelectionRequest(
source_ref=source.value_ref, name=name, index=index)
if self._bidi_stream is None:
response = _request(self._stub.CreateSelection, request)
else:
response = (await self._bidi_stream.send_request(
executor_pb2.ExecuteRequest(create_selection=request)
)).create_selection
py_typecheck.check_type(response, executor_pb2.CreateSelectionResponse)
return RemoteValue(response.value_ref, result_type, self)
@tracing.trace(span=True)
async def _compute(self, value_ref):
py_typecheck.check_type(value_ref, executor_pb2.ValueRef)
request = executor_pb2.ComputeRequest(value_ref=value_ref)
if self._bidi_stream is None:
response = _request(self._stub.Compute, request)
else:
response = (await self._bidi_stream.send_request(
executor_pb2.ExecuteRequest(compute=request))).compute
py_typecheck.check_type(response, executor_pb2.ComputeResponse)
value, _ = executor_service_utils.deserialize_value(response.value)
return value
|
generate_data_2d.py
|
import os
import tensorflow as tf
from scipy import stats
import numpy as np
import math
from sklearn import preprocessing
import concurrent.futures
import logging
import threading
from multiprocessing import Process
# Duration of simulated trajectories (seconds)
T = 15
# Width and height of environment, or diameter for circular environment (meters)
L = 220
# Perimeter region distance to walls (meters)
d = 3
# Forward velocity Rayleigh distribution scale (m/sec)
forward_v_sigma = 13.02
# Rotation velocity Guassian distribution mean (deg/sec)
mu = -0.03
# Rotation velocity Guassian distribution standard deviation (deg/sec)
angular_v_sigma = 330.12
# Velocity reduction factor when located in the perimeter
v_reduction_factor = 0.25
# Change in angle when located in the perimeter (deg)
angle_delta = 90
# Simulation-step time increment (seconds)
dt = 0.02
# Number of place cells
N = 256
# Place cell standard deviation parameter (meters)
pc_std = 0.01
# Number of target head direction cells
M = 12
# Head direction concentration parameter
K = 20
# Gradient clipping threshold
g_c = 10**-5
# Number of trajectories used in the calculation of a stochastic gradient
minibatch_size = 10
# Number of time steps in the trajectories used for the supervised learning task
trajectory_length = 100
# Step size multiplier in the RMSProp algorithm
learning_rate = 10**-5
# Momentum parameter of the RMSProp algorithm
momentum = 0.9
# Regularisation parameter for linear layer
L2_reg = 10**-5
# Total number of gradient descent steps taken
parameter_updates = 300000
def y_rotation(vector,theta):
"""Rotates 3-D vector around y-axis"""
R = np.array([[np.cos(theta),0,np.sin(theta)],[0,1,0],[-np.sin(theta), 0, np.cos(theta)]])
return np.dot(R,vector)
def angle_3d(v1, v2):
"""The acute angle between two vectors"""
angle = np.arctan2(v2[2], v2[0]) - np.arctan2(v1[2], v1[0])
if angle > np.pi:
angle -= 2*np.pi
elif angle <= -np.pi:
angle += 2*np.pi
return angle
def min_dist_angle(position, direction):
"""Distance to the closest wall and its corresponding angle
Keyword arguments:
position -- the position (3-dimensional vector)
direction -- head direction (3-dimensional vector)
"""
# Perpendicular distance to line
# Southern Wall z = 0
s_dist = position[2]
# Northern Wall z = L
n_dist = L - position[2]
# Western Wall x = 0
w_dist = position[0]
# Eastern Wall x = L
e_dist = L - position[0]
wall_dists = [s_dist, n_dist, w_dist, e_dist]
min_pos = np.argmin(wall_dists)
dWall = wall_dists[min_pos]
west_wall = [-1, 0, 0]
north_wall = [0, 0, 1]
east_wall = [1, 0, 0]
south_wall = [0, 0, -1]
walls = [south_wall, north_wall, west_wall, east_wall]
aWall = angle_3d(direction, walls[min_pos])
return [dWall, aWall]
def normalize(vec):
return vec / np.linalg.norm(vec)
def rotation(vector,theta):
"""Rotates 2-D vector around y-axis"""
return np.array([np.cos(theta)*vector[0]-np.sin(theta)*vector[1],np.sin(theta)*vector[0]+np.cos(theta)*vector[1]])
def angle(v1, v2):
"""The acute angle between two vectors"""
angle = np.arctan2(v2[1], v2[0]) - np.arctan2(v1[1], v1[0])
# angle = angle%(2*np.pi)
if angle > np.pi:
angle -= 2*np.pi
elif angle <= -np.pi:
angle += 2*np.pi
return angle
def velocity(vec):
return np.linalg.norm(vec)/0.16
def head_dir(vec):
return angle(np.array([1,0]), vec).astype('float32')
def sample(position_matrix):
assert len(position_matrix) == 816
position_matrix = position_matrix[[True if i%8==0 else False for i in range(816)]]
start_pos = position_matrix[0]
diffs = np.array([position_matrix[i+1] - position_matrix[i] for i in range(101)])
trans_vels = np.apply_along_axis(velocity, 1, diffs)
hds = np.apply_along_axis(head_dir, 1, diffs)
ang_vels = np.array([hds[i+1] - hds[i] for i in range(100)])/0.16
# print(min(hds), max(hds))
# print(len(trans_vels), len(hds), len(ang_vels), len(position_matrix[1:-1]))
ego_vel = np.zeros((100, 3))
ego_vel[:,0] = trans_vels[:-1]
ego_vel[:,1] = np.cos(ang_vels)
ego_vel[:,2] = np.cos(ang_vels)
return [position_matrix[0],
np.array([hds[0]]),
ego_vel,
position_matrix[1:-1],
hds[1:]]
def generate_rat_trajectory(steps):
"""Generate a pseudo-random rat trajectory within a L-size square cage
steps - number of steps for the rat to take
return ->
position - (samples,3)-shaped matrix holding the 3-dim positions overtime
velocity - (samples,3)-shaped matrix holding the 3-dim velocities overtime
"""
# Initialize parameters for velocity and camera
v = 20
dirr = normalize(np.random.rand(3))
up = np.array([0, 1, 0])
dt = 0.02
norm_vec = np.array([1,0,0])
# create random velocity samples
random_turn = np.radians(np.random.normal(mu, angular_v_sigma, steps))
# print(random_turn)
random_velocity = np.random.rayleigh(forward_v_sigma, steps)
hd = np.zeros(steps+1)
hd[0] = angle_3d(norm_vec, dirr).astype('float32')
# allocate memory for x, y, and z-components of position and velocity
position_matrix = np.zeros((steps, 3))
position_matrix[0] = L*np.random.rand(3) # initialize
velocity_matrix = np.zeros((steps, 3))
for step in range(1, steps):
# computes the min distance and corresponding angle for a position
[dWall, aWall] = min_dist_angle(position_matrix[step-1], dirr)
# update speed and turn angle
if dWall<3 and np.absolute(aWall)<np.pi/2:
# print('oups')
angl = aWall/np.absolute(aWall)*(np.pi-np.absolute(aWall)) + random_turn[step]
v = v-0.25*(v) # slow down
else:
v = random_velocity[step]
angl = random_turn[step]
low = np.array([0,0,0])
high = np.array([L,L,L])
# move.
position_matrix[step] = (position_matrix[step-1] + dirr*v*dt) #np.minimum(np.maximum(position_matrix[step-1] + dirr*v*dt, low), high)
velocity_matrix[step] = (dirr*v*dt)
# turn the 3D direction vector around y-axis
dirr = y_rotation(dirr, angl*dt)
hd[step] = angle_3d(norm_vec, dirr)
# return init_pos, init_hd, ego_vel, target_pos, target_hd
return (np.delete(position_matrix,1,1)/100.0 - 1.1).astype('float32')
def filename_generator(root):
"""Generates lists of files for a given dataset version."""
basepath = 'square_room_100steps_2.2m_1000000'
base = os.path.join(root, basepath)
num_files = 100
template = '{:0%d}-of-{:0%d}.tfrecord' % (4, 4)
return [
os.path.join(base, template.format(i, num_files - 1))
for i in range(num_files)
]
filenames = filename_generator('./my_datasets')
def _float32_feature(value):
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
records_per_file = 10000
def write_record(filename):
print('writing record', filename)
tfrecord_writer = tf.io.TFRecordWriter(filename)
data = [sample(generate_rat_trajectory(816)) for _ in range(records_per_file)]
for index in range(records_per_file):
# 1. Convert your data into tf.train.Feature
feature = {
'init_pos': _float32_feature(data[index][0]),
'init_hd': _float32_feature(data[index][1]),
'ego_vel': _float32_feature([val for row in data[index][2] for val in row]), # flatten
'target_pos': _float32_feature([val for row in data[index][3] for val in row]), # flatten
'target_hd': _float32_feature(data[index][4]) # flatten
}
# 2. Create a tf.train.Features
features = tf.train.Features(feature=feature)
# 3. Createan example protocol
example = tf.train.Example(features=features)
# 4. Serialize the Example to string
example_to_string = example.SerializeToString()
# 5. Write to TFRecord
tfrecord_writer.write(example_to_string)
def write_records(filenames):
for filename in filenames:
write_record(filename)
if __name__ == "__main__":
# format = "%(asctime)s: %(message)s"
# logging.basicConfig(format=format, level=logging.INFO,
# datefmt="%H:%M:%S")
# with concurrent.futures.ThreadPoolExecutor(max_workers=4) as executor:
# executor.map(write_record, filenames)
# for filename in filenames:
# write_record(filename)
coord = tf.train.Coordinator()
processes = []
args = np.array_split(filenames, 4)
for thread_index in range(4):
p = Process(target=write_records, args=[args[thread_index]])
p.start()
processes.append(p)
coord.join(processes)
|
cli.py
|
import errno
import json
import logging
import os
import re
import sys
import tarfile
from datetime import datetime
from glob import glob
from shutil import copyfile
from threading import Thread
import re
import yaml
import fnmatch
import click
from sqlalchemy import create_engine
from sqlalchemy.exc import IntegrityError
import gdal
import wget
# from plio.io.io_gdal import GeoDataset
from pathlib import Path
from .. import api
from .. import ingest
from .. import utils
from .. import sql
from .. import pysbatch
from bayleef import config
from bayleef import config_file
from collections import OrderedDict
from sys import stdin
from os import isatty
import subprocess
LOG_FORMAT = '%(name)s::%(asctime)-15s::%(levelname)s || %(message)s'
logging.basicConfig(format=LOG_FORMAT)
logger = logging.getLogger('Bayleef')
logger.setLevel(logging.DEBUG)
def get_node(dataset, node=None):
if node is None:
cur_dir = os.path.dirname(os.path.realpath(__file__))
data_dir = os.path.join(cur_dir, "..", "data")
dataset_path = os.path.join(data_dir, "datasets.json")
with open(dataset_path, "r") as f:
datasets = json.loads(f.read())
node = datasets[dataset].upper()
return node
def to_coordinates(bounds):
xmin, ymin, xmax, ymax = bounds
return [[
[xmin, ymin],
[xmin, ymax],
[xmax, ymax],
[xmax, ymin],
[xmin, ymin]
]]
def to_geojson_feature(entry):
# TODO: This key may not be present in all datasets.
bounds = list(map(float, entry.pop("sceneBounds").split(',')))
coordinates = to_coordinates(bounds)
return {
"type": "Feature",
"properties": entry,
"geometry": {
"type": "Polygon",
"coordinates": coordinates
}
}
def to_geojson(result):
gj = {
'type': 'FeatureCollection'
}
if type(result['data']) is list:
features = list(map(to_geojson_feature, result['data']))
else:
features = list(map(to_geojson_feature, result['data']['results']))
for key in result['data']:
if key == "results":
continue
gj[key] = result['data'][key]
gj['features'] = features
for key in result:
if key == "data":
continue
gj[key] = result[key]
return gj
def explode(coords):
for e in coords:
if isinstance(e, (float, int, long)):
yield coords
break
else:
for f in explode(e):
yield f
def get_bbox(f):
x, y = zip(*list(explode(f['geometry']['coordinates'])))
return min(x), min(y), max(x), max(y)
api_key_opt = click.option("--api-key", help="API key returned from USGS servers after logging in.", default=None)
node_opt = click.option("--node", help="The node corresponding to the dataset (CWIC, EE, HDDS, LPVS).", default=None)
@click.group()
def bayleef():
pass
@click.command()
@click.argument("username", envvar='USGS_USERNAME')
@click.argument("password", envvar='USGS_PASSWORD')
def login(username, password):
"""
Login to the USGS EROs service.
"""
api_key = api.login(username, password)
click.echo(api_key)
@click.command()
def logout():
click.echo(api.logout())
@click.command()
@click.argument("node")
@click.option("--start-date", help="Start date for when a scene has been acquired. In the format of yyyy-mm-dd")
@click.option("--end-date", help="End date for when a scene has been acquired. In the format of yyyy-mm-dd")
def datasets(node, start_date, end_date):
data = api.datasets(None, node, start_date=start_date, end_date=end_date)
click.echo(json.dumps(data))
@click.command()
@click.argument("dataset")
@click.argument("scene-ids", nargs=-1)
@node_opt
@click.option("--extended", is_flag=True, help="Probe for more metadata.")
@click.option('--geojson', is_flag=True)
@api_key_opt
def metadata(dataset, scene_ids, node, extended, geojson, api_key):
"""
Request metadata.
"""
if len(scene_ids) == 0:
scene_ids = map(lambda s: s.strip(), click.open_file('-').readlines())
node = get_node(dataset, node)
result = api.metadata(dataset, node, scene_ids, extended=extended, api_key=api_key)
if geojson:
result = to_geojson(result)
click.echo(json.dumps(result))
@click.command()
@click.argument("dataset")
@node_opt
def dataset_fields(dataset, node):
node = get_node(dataset, node)
data = api.dataset_fields(dataset, node)
click.echo(json.dumps(data))
@click.command()
@click.argument("dataset")
@node_opt
@click.argument("aoi", default="-", required=False)
@click.option("--start-date", help="Start date for when a scene has been acquired. In the format of yyyy-mm-dd")
@click.option("--end-date", help="End date for when a scene has been acquired. In the format of yyyy-mm-dd")
@click.option("--lng", help="Longitude")
@click.option("--lat", help="Latitude")
@click.option("--dist", help="Radius - in units of meters - used to search around the specified longitude/latitude.", default=100)
@click.option("--lower-left", nargs=2, help="Longitude/latitude specifying the lower left of the search window")
@click.option("--upper-right", nargs=2, help="Longitude/latitude specifying the lower left of the search window")
@click.option("--where", nargs=2, multiple=True, help="Supply additional search criteria.")
@click.option('--geojson', is_flag=True)
@click.option("--extended", is_flag=True, help="Probe for more metadata.")
@api_key_opt
def search(dataset, node, aoi, start_date, end_date, lng, lat, dist, lower_left, upper_right, where, geojson, extended, api_key):
"""
Search for images.
"""
node = get_node(dataset, node)
if aoi == "-":
src = click.open_file('-')
if not src.isatty():
lines = src.readlines()
if len(lines) > 0:
aoi = json.loads(''.join([ line.strip() for line in lines ]))
bbox = map(get_bbox, aoi.get('features') or [aoi])[0]
lower_left = bbox[0:2]
upper_right = bbox[2:4]
if where:
# Query the dataset fields endpoint for queryable fields
resp = api.dataset_fields(dataset, node)
def format_fieldname(s):
return ''.join(c for c in s if c.isalnum()).lower()
field_lut = { format_fieldname(field['name']): field['fieldId'] for field in resp['data'] }
where = { field_lut[format_fieldname(k)]: v for k, v in where if format_fieldname(k) in field_lut }
if lower_left:
lower_left = dict(zip(['longitude', 'latitude'], lower_left))
upper_right = dict(zip(['longitude', 'latitude'], upper_right))
result = api.search(dataset, node, lat=lat, lng=lng, distance=dist, ll=lower_left, ur=upper_right, start_date=start_date, end_date=end_date, where=where, extended=extended, api_key=api_key)
if geojson:
result = to_geojson(result)
print(json.dumps(result))
@click.command()
@click.argument("dataset")
@click.argument("scene-ids", nargs=-1)
@node_opt
@api_key_opt
def download_options(dataset, scene_ids, node, api_key):
node = get_node(dataset, node)
data = api.download_options(dataset, node, scene_ids)
print(json.dumps(data))
@click.command()
@click.argument("dataset")
@click.argument("scene_ids", nargs=-1)
@click.option("--product", nargs=1, required=True)
@node_opt
@api_key_opt
def download_url(dataset, scene_ids, product, node, api_key):
node = get_node(dataset, node)
data = api.download(dataset, node, scene_ids, product)
click.echo(json.dumps(data))
@click.command()
@click.argument("root")
@node_opt
def batch_download(root, node):
"""
Download from search result.
"""
def download_from_result(scene, root):
scene_id = scene['entityId']
temp_file = '{}.tar.gz'.format(scene_id)
dataset = re.findall(r'dataset_name=[A-Z0-9_]*', scene['orderUrl'])[0]
dataset = dataset.split('=')[1]
path = utils.get_path(scene, root, dataset)
if os.path.exists(path):
logger.warning('{} already in cache, skipping'.format(path))
return
download_info = api.download(dataset, get_node(dataset, node), scene_id)
download_url = download_info['data'][0]
logger.info('Downloading: {} from {}'.format(scene_id, download_url))
wget.download(download_url, temp_file)
print()
logger.info('Extracting to {}'.format(path))
tar = tarfile.open(temp_file)
tar.extractall(path=path)
tar.close()
logger.info('Removing {}'.format(temp_file))
os.remove(temp_file)
logger.info('{} complete'.format(scene_id))
# get pipped in response
resp = ""
for line in sys.stdin:
resp += line
# convert string into dict
resp = json.loads(resp)
nfiles = resp['data']['numberReturned']
logger.info("Number of files: {}".format(nfiles))
results = resp['data']['results']
for i, result in enumerate(results):
# Run Downloads as threads so they keyboard interupts are
# deferred until download is complete
logger.info('{}/{}'.format(i, nfiles))
job = Thread(target=download_from_result, args=(result, root))
job.start()
job.join()
@click.command()
@click.argument("dataset")
@click.argument("root")
@click.argument("db")
@click.option("--host", default="localhost", help="host id")
@click.option("--port", default="5432", help="port number")
@click.option("--user", help="username for database")
@click.option("--password", help="password for database")
def to_sql(db, dataset, root, host, port, user, password):
"""
Upload the dataset to a database
"""
dataset_root = os.path.join(root, dataset)
# The dirs with important files will always be in leaf nodes
leef_dirs = list()
for root, dirs, files in os.walk(dataset_root):
if "images" in dirs and "original" in dirs and "metadata.json" in files and "index.json" in files:
leef_dirs.append(root)
logger.info("{} Folders found".format(len(leef_dirs)))
# only suppoort postgres for now
engine = create_engine('postgresql://{}:{}@{}:{}/{}'.format(user,password,host,port,db))
try:
sql.serial_upload(dataset, leef_dirs, engine)
except:
for dir in leef_dirs:
logger.info("Uploading {}".format(dir))
try:
sql.func_map[dataset](dir, engine)
except Exception as e:
logger.error("ERROR: {}".format(e))
import traceback
traceback.print_exc()
@click.command(context_settings=dict(
ignore_unknown_options=True,
allow_extra_args=True,
))
@click.argument("input", required=True)
@click.argument("bayleef_data", required=True)
@click.option("--add-option", "-ao", default='', help="Text containing misc. sbatch parameters")
@click.option("--log", "-l", default='.', help="Log output directory, default is redirected to /dev/null")
@click.option("--mem", '-m', default='4', help="Memory per job in gigabytes. Default = 4")
@click.option("--time", "-t", default='01:00:00', help="Max time per job, default = one hour.")
@click.option("--njobs", "-n", default=-1, help="Max number of conccurent jobs, -1 for unlimited. Default = -1")
def sbatch_master(input, bayleef_data, add_option, njobs, **options):
"""
Run load-master command as sbatch jobs. Strongly reccomended that this is run directly on
the slurm master.
"""
if not os.path.exists(options['log']):
raise Exception('Log directory {} is not a directory or does not exist'.format(options['log']))
if not os.path.exists(bayleef_data):
raise Exception('Bayleef data directory {} is not a directory or does not exist'.format(bayleef_data))
files = glob(input+'/**/*.hdf', recursive=True)
logger.info("sbatch options: log={log} mem={mem} time={time} njobs={njobs}".format(**options, njobs=njobs))
logger.info("other options: {}".format(add_option if add_option else None))
for i, file in enumerate(files):
command = "bayleef load-master '{}' '{}'".format(file, bayleef_data)
job_name = 'bayleef_{}_{}'.format(i, os.path.splitext(os.path.basename(file))[0] )
log_file = os.path.join(options['log'], job_name+'.log')
logger.info("{}/{}".format(i, len(files)))
logger.info("Dispatching {}".format(command))
logger.info('Jobname: {}'.format(job_name))
logger.info('Log File: {}'.format(log_file))
out = pysbatch.sbatch(wrap=command, mem=options['mem'], log=log_file, time=options['time'], job_name=job_name, add_option=add_option)
logger.info(out)
if njobs != -1:
pysbatch.limit_jobs(njobs)
@click.command()
@click.argument("input", required=True)
@click.argument("--bayleef_data", "-d", default=config.data)
@click.option("-r", is_flag=True, help="Set to recursively glob .HDF files (Warning: Every .HDF file under the directory will be treated as a Master file)")
def load_master(input, bayleef_data, r):
"""
Load master data.
parameters
----------
in : str
root directory containing master files, .HDFs are recursively globbed.
bayleef_data : str
root of the bayleef data directory
"""
files = input
if not r: # if not recursive
files = [input]
else:
files = glob(input+'/**/*.hdf', recursive=True)
total = len(files)
logger.info("{} Files Found".format(total))
for i, file in enumerate(files):
logger.info('{}/{} ({}) - Proccessing {}'.format(i, total, round(i/total, 2), file))
ingest.master(bayleef_data, file)
def batch_jobs(jobs, log=".", njobs=-1, **sbatch_kwargs):
logger.info("Jobs:")
utils.print_dict(jobs)
if isinstance(jobs, list):
jobs = OrderedDict({"step1" : jobs})
for step in jobs:
joblist = []
commands = jobs[step]
logger.info("Running {} jobs for {}".format(len(commands), step))
for i, command in enumerate(commands):
print(command)
# job_name = 'bayleef_{}_{}'.format("".join(step.split()), i)
# joblist.append(job_name)
# log_file = os.path.join(log, job_name+'.log')
#
# logger.info("{} {}/{}".format(step, i+1, len(commands)))
# logger.info("Dispatching {}".format(command))
# logger.info('Jobname: {}'.format(job_name))
# logger.info('Log File: {}'.format(log_file))
# out = pysbatch.sbatch(wrap=command, job_name=job_name, log=log_file, **sbatch_kwargs)
# logger.info(out.lstrip().rstrip())
# if njobs != -1:
# pysbatch.limit_jobs(njobs)
logger.info("Waiting for jobs in {} to complete.".format(step))
pysbatch.wait_for_jobs(joblist)
@click.command()
@click.argument("id1", required=False)
@click.argument("id2", required=False)
@click.option("--file", "-f", default=None)
@click.option("--log", "-l", default='.', help="Log output directory, default is current working directory.")
@click.option("--mem", '-m', default='4', help="Memory per job in gigabytes. Default = 4")
@click.option("--time", "-t", default='01:00:00', help="Max time per job, default = one hour.")
@click.option("--njobs", "-n", default=-1, help="Max number of conccurent jobs, -1 for unlimited. Default = -1")
@click.option("--bayleef_data", "-d", default=config.data)
def themis_pairs(id1, id2, file, log, mem, time, njobs, bayleef_data):
from bayleef.ingest import themis_pairs
if file:
pairs = open(file).read()
pairs = pairs.split("\n")
commands = {"themis_pairs" : ["bayleef themis-pairs -d {} {}".format(bayleef_data, pair) for pair in pairs]}
batch_jobs(commands, log=log, mem=mem, time=time, njobs=njobs)
else:
if not id1 or not id2:
logger.error("Invalid IDs: {} {}".format(id1, id2))
exit(0)
ingest.themis_pairs(bayleef_data, id1, id2)
@click.command()
def config_call():
logger.info("Config file located in: {}".format(config_file))
utils.print_dict(config)
@click.command(context_settings=dict(
ignore_unknown_options=True,
allow_extra_args=True,
))
@click.argument("job_file", required=False, default=None)
@click.option("--add-option", "-ao", default='', help="Text containing misc. sbatch parameters")
@click.option("--log", "-l", default='.', help="Log output directory, default is current working directory.")
@click.option("--mem", '-m', default='4', help="Memory per job in gigabytes. Default = 4")
@click.option("--time", "-t", default='01:00:00', help="Max time per job, default = one hour.")
@click.option("--njobs", "-n", default=-1, help="Max number of conccurent jobs, -1 for unlimited. Default = -1")
def agility(job_file, add_option, njobs, time, mem, log, **options):
is_pipe = not isatty(stdin.fileno())
if is_pipe:
# get pipped in response
pipestr = ""
for line in sys.stdin:
pipestr += line
if not is_pipe and not job_file:
logger.error("No Valid input. Job File: {}".format(job_file))
exit(1)
if not os.path.exists(log):
raise Exception('Log directory {} is not a directory or does not exist'.format(options['log']))
if is_pipe:
try:
jobs = yaml.load(pipestr, yaml.SafeLoader)
except Exception as e:
logger.error("Not Valid Json\n{}".format(pipestr))
exit(1)
else:
try:
jobs = yaml.load(open(job_file), yaml.SafeLoader)
except Exception as e:
logger.error("Cannot open {} for reading.".format(job_file))
exit(1)
batch_jobs(jobs, log=log, mem=mem, time=time, njobs=njobs)
bayleef.add_command(agility, "agility")
bayleef.add_command(agility, "sbatch")
bayleef.add_command(config_call, "config")
bayleef.add_command(themis_pairs, "themis-pairs")
bayleef.add_command(to_sql, "to-sql")
bayleef.add_command(login)
bayleef.add_command(logout)
bayleef.add_command(datasets)
bayleef.add_command(dataset_fields, "dataset-fields")
bayleef.add_command(metadata)
bayleef.add_command(search)
bayleef.add_command(download_options, "download-options")
bayleef.add_command(download_url, "download-url")
bayleef.add_command(batch_download, "download")
bayleef.add_command(load_master, "load-master")
bayleef.add_command(sbatch_master, "sbatch-master")
|
__init__.py
|
import json
import logging
import threading
import time
import uuid
from contextlib import contextmanager
from typing import Callable
from typing import List
from typing import Union
import pika
from flask import current_app
from flask import Flask
from retry import retry
from .consumer import ReconnectingConsumer
from .encoder import UUIDEncoder
from .exceptions import ExchangeTypeError
from .exceptions import SyncTimeoutError
from .exchange import ExchangeType
__version__ = "1.1.4"
def get_state(app):
"""Gets the state for the application"""
assert "coney" in app.extensions, (
"The coney extension was not registered to the current "
"application. Please make sure to call init_app() first."
)
return app.extensions["coney"]
class _ConeyState:
"""Remembers configuration for the (coney, app) tuple."""
def __init__(self, coney):
self.coney = coney
self.consumer_threads = []
self.data = {}
class Coney:
"""
This class is used to control the Coney integration to one or more Flask
applications. Depending on how you initialize the object it is usable right
away or will attach as needed to a Flask application.
There are two usage modes which work very similarly. One is binding the
instance to a very specific Flask application::
app = Flask(__name__)
coney = Coney(app)
The second possibility is to create the object once and configure the
application later to support it::
coney = Coney()
def create_app():
app = Flask(__name__)
coney.init_app(app)
return app
To listen on a queue use::
coney = Coney(app)
@coney.queue(queue_name="test")
def queue_test(ch, method, props, body):
pass
To publish a message use::
coney = Coney(app)
coney.publish({"test": 1})
:param app: A flask app
:param testing: Setup testing mode. This will not invoke threads
"""
def __init__(self, app: Flask = None, testing: bool = False):
self.app = app
self.thread = None
self.testing = testing
if app is not None:
self.init_app(app)
def init_app(self, app: Flask):
"""
This callback can be used to initialize an application for the use
with Coney.
"""
# We intentionally don't set self.app = app, to support multiple
# applications. If the app is passed in the constructor,
# we set it and don't support multiple applications.
# noqa: B950 CONEY_URI: see https://pika.readthedocs.io/en/stable/modules/parameters.html#pika.connection.URLParameters
if not (app.config.get("CONEY_BROKER_URI")):
raise RuntimeError("CONEY_BROKER_URI needs to be set")
self.broker_uri = app.config.get("CONEY_BROKER_URI")
app.extensions["coney"] = _ConeyState(self)
def get_app(self, reference_app: Flask = None):
"""
Helper method that implements the logic to look up an application.
:param reference_app: A flask app
"""
if reference_app is not None:
return reference_app
if current_app:
return current_app._get_current_object()
if self.app is not None:
return self.app
raise RuntimeError(
"No application found. Either work inside a view function or push"
" an application context. See"
" http://mikebarkmin.github.io/flask-coney/contexts/."
)
@contextmanager
def channel(self, app: Flask = None) -> pika.channel.Channel:
"""Provides context for a channel.
Example::
with channel(app) as ch:
ch.basic_publish()
:param app: A flask app
"""
with self.connection() as c:
yield c.channel()
@contextmanager
@retry(pika.exceptions.AMQPConnectionError, tries=4, delay=1, jitter=3)
def connection(self, app: Flask = None) -> pika.BlockingConnection:
"""Provides context for a connection.
Example::
with connection(app) as c:
c.channel()
:param app: A flask app
"""
app = self.get_app(app)
params = pika.URLParameters(self.broker_uri)
connection = pika.BlockingConnection(params)
try:
yield connection
finally:
connection.close()
def queue(
self,
queue_name: str = "",
exchange_name: str = "",
exchange_type: ExchangeType = ExchangeType.DIRECT,
routing_key: str = None,
routing_keys: List[str] = None,
app: Flask = None,
) -> Callable:
"""
A decorator for consuming a queue. A thread will start in the
background, if no other thread for this purpose was already started.
There will only be one thread for every queue.
Example::
@coney.queue(queue_name="test")
def queue_test(ch, method, props, body):
pass
You can also bind the queue to multiple routing keys::
@coney.queue(routing_keys=["first", "second"])
def queue_multiple(ch, method, props, body):
pass
If routing_keys and a routing_key is provided, they will be
combined.
:param type: ExchangeType
:param queue_name: Name of the queue
:param exchange_name: Name of the exchange
:param exchange_type: Type of the exchange
:param routing_key: The routing key
:param routing_keys: A list of routing keys
:param app: A flask app
"""
app = self.get_app(app)
state = get_state(app)
if not routing_keys:
routing_keys = []
if (
exchange_type == ExchangeType.FANOUT
or exchange_type == ExchangeType.DIRECT
or exchange_type == ExchangeType.TOPIC
or exchange_type == ExchangeType.HEADERS
):
if exchange_name == "" and routing_key and routing_key != queue_name:
# on default exchange it will be automaticaly bound ot quene_name
raise RuntimeError(
"""Routing key mismatch.
Queues on default exchange should
not have a routing key."""
)
else:
raise ExchangeTypeError(f"Exchange type {exchange_type} is not supported")
def decorator(func):
consumer = ReconnectingConsumer(
self.broker_uri,
exchange=exchange_name,
exchange_type=exchange_type,
queue=queue_name,
routing_keys=routing_keys + [routing_key],
on_message=func,
)
thread = threading.Thread(target=consumer.run)
state.consumer_threads.append((consumer, thread))
thread.start()
return func
return decorator
def _accept(self, corr_id: str, result: str, app: Flask = None):
app = self.get_app(app)
data = get_state(app).data
data[corr_id]["is_accept"] = True
data[corr_id]["result"] = result
with self.channel(app) as channel:
channel.queue_delete(data[corr_id]["reply_queue_name"])
def _on_response(
self,
ch: pika.channel.Channel,
method: pika.spec.Basic.Deliver,
props: pika.spec.BasicProperties,
body: str,
app=None,
):
logging.info(f"on response => {body}")
corr_id = props.correlation_id
if props.content_type == "application/json":
body = json.loads(body)
self._accept(corr_id, body, app=app)
def publish(
self,
body: Union[str, dict],
exchange_name: str = "",
routing_key: str = "",
durable: bool = False,
properties: dict = None,
app: Flask = None,
):
"""
Will publish a message
Example::
@app.route('/process'):
def process():
coney.publish({"text": "process me"})
:param body: Body of the message, either a string or a dict
:param exchange_name: The exchange
:param exchange_type: The type of the exchange
:param routing_key: The routing key
:param durable: Should the exchange be durable
:param app: A flask app
"""
with self.channel(app) as channel:
if properties is None:
properties = {"content_type": "text/plain"}
if isinstance(body, dict):
body = json.dumps(body, cls=UUIDEncoder)
properties["content_type"] = "application/json"
channel.basic_publish(
exchange=exchange_name,
routing_key=routing_key,
body=body,
properties=pika.BasicProperties(**properties),
)
def reply_sync(
self,
ch: pika.channel.Channel,
method: pika.spec.Basic.Deliver,
properties: pika.spec.BasicProperties,
body: str,
app=None,
):
"""
Will reply to a message, which was send by :meth:`publish_sync`
Example::
@queue(queue_name="rpc")
def concat_callback(ch, method, props, body):
result = body["a"] + body["b"]
body = {"result": result}
coney.reply_sync(ch, method, props, body)
This is a conveniences short hand method for::
@queue(queue_name="rpc")
def concat_callback(ch, method, props, body):
result = body["a"] + body["b"]
body = {"result": result}
self.publish(
body,
routing_key=properties.reply_to,
properties={"correlation_id": properties.correlation_id},
app=app,
)
:parameter ch:
:parameter method:
:parameter properties:
:parameter body: The message to send
"""
self.publish(
body,
routing_key=properties.reply_to,
properties={"correlation_id": properties.correlation_id},
app=app,
)
ch.basic_ack(delivery_tag=method.delivery_tag)
def publish_sync(
self,
body: Union[str, dict],
exchange_name: str = "",
routing_key: str = "",
properties: dict = None,
timeout: float = 10,
app: Flask = None,
):
"""
Will publish a message and wait for the response
Example::
# client
@app.route('/concat')
def concat():
a = request.args.get('a')
b = request.args.get('b')
body = {'a': a, 'b': b}
result = coney.publish_sync(body, routing_key="rpc")
return result
# server
@queue(queue_name="rpc")
def concat_callback(ch, method, props, body):
result = body["a"] + body["b"]
body = {"result": result}
coney.reply_sync(ch, method, props, body)
:param body: Body of the message, either a string or a dict
:param exchange_name: The exchange
:param routing_key: The routing key
:param properties: see :py:class:`pika.spec.BasicProperties`
:param timeout: Timeout in seconds
:param app: A flask app
:raises:
SyncTimeoutError: if no message received in timeout
"""
app = self.get_app(app)
with self.connection(app) as connection:
corr_id = str(uuid.uuid4())
channel = connection.channel()
result = channel.queue_declare(queue="", exclusive=False, auto_delete=True)
callback_queue = result.method.queue
state = get_state(app)
state.data[corr_id] = {
"is_accept": False,
"result": None,
"reply_queue_name": callback_queue,
}
channel.basic_qos(prefetch_count=1)
channel.basic_consume(callback_queue, self._on_response)
if properties is None:
properties = {"content_type": "text/plain"}
if isinstance(body, dict):
body = json.dumps(body, cls=UUIDEncoder)
properties["content_type"] = "application/json"
channel.basic_publish(
exchange="",
routing_key=routing_key,
body=body,
properties=pika.BasicProperties(
**properties, reply_to=callback_queue, correlation_id=corr_id,
),
)
end = time.time() + timeout
while time.time() < end:
if state.data[corr_id]["is_accept"]:
logging.info("Got the RPC server response")
return state.data[corr_id]["result"]
else:
connection.process_data_events()
time.sleep(0.3)
raise SyncTimeoutError()
|
not_a_dam_fetcher.py
|
"""Flask app to generate imagery that does not contain a dam."""
import shutil
import subprocess
import requests
import queue
import json
import datetime
import sqlite3
import os
import sys
import logging
import threading
import pygeoprocessing
from retrying import retry
import numpy
import taskgraph
import shapely.wkt
import shapely.geometry
from osgeo import gdal
from osgeo import osr
import flask
from flask import Flask
LOGGER = logging.getLogger(__name__)
VALIDATAION_WORKER_DIED = False
logging.basicConfig(
level=logging.DEBUG,
format=(
'%(asctime)s (%(relativeCreated)d) %(levelname)s %(name)s'
' [%(funcName)s:%(lineno)d] %(message)s'),
stream=sys.stdout)
logging.getLogger('taskgraph').setLevel(logging.INFO)
APP = Flask(__name__, static_url_path='', static_folder='')
APP.config['SECRET_KEY'] = b'\xe2\xa9\xd2\x82\xd5r\xef\xdb\xffK\x97\xcfM\xa2WH'
WORKSPACE_DIR = 'workspace_not_a_dam'
PLANET_QUADS_DIR = os.path.join(WORKSPACE_DIR, 'planet_quads')
NOT_DAM_IMAGERY_DIR = os.path.join(WORKSPACE_DIR, 'not_dam_images')
GSW_DIR = os.path.join(WORKSPACE_DIR, 'gsw_tiles')
PLANET_STITCHED_IMAGERY_DIR = os.path.join(PLANET_QUADS_DIR, 'stitched_images')
DATABASE_PATH = os.path.join(WORKSPACE_DIR, 'not_a_dam.db')
DAM_STATUS_DB_PATH = os.path.join(WORKSPACE_DIR, 'dam_status.db')
PLANET_API_KEY_FILE = 'planet_api_key.txt'
ACTIVE_MOSAIC_JSON_PATH = os.path.join(WORKSPACE_DIR, 'active_mosaic.json')
N_WORKERS = -1
REQUEST_TIMEOUT = 1.0
REPORTING_INTERVAL = 5.0
NOT_A_DAM_IMAGES_TO_CACHE = 10
MAX_GSW_TRIES = 4096
BOUNDING_BOX_SIZE_M = 2000.0
PLANET_QUAD_CELL_SIZE = 4.77731
MIN_SURFACE_WATER = 20
MAX_SURFACE_WATER = 80
@APP.route('/favicon.ico')
def favicon():
return flask.send_from_directory(
os.path.join(APP.root_path, 'images'), 'favicon.ico',
mimetype='image/vnd.microsoft.icon')
@APP.route('/')
def get_unvalidated_image():
try:
#connection = get_db_connection()
#cursor = connection.cursor()
image_url = get_unprocessed_image_path()
LOGGER.debug(image_url)
return flask.render_template(
'not_a_dam_validation.html', **{
'image_url': image_url
})
except:
LOGGER.exception('something bad happened')
@APP.route('/update_is_a_dam', methods=['POST'])
def update_is_a_dam():
"""Called when there is a dam image that's classified."""
payload = json.loads(flask.request.data.decode('utf-8'))
LOGGER.debug(payload)
connection = get_db_connection()
cursor = connection.cursor()
cursor.execute(
"UPDATE base_table "
"SET dam_in_image = ? "
"WHERE image_path = ?",
(payload['dam_in_image'], os.path.normpath(payload['image_url'])))
cursor.close()
connection.commit()
return flask.jsonify({'image_url': get_unprocessed_image_path()})
@APP.route('/summary')
def render_summary():
"""Get a point that has not been validated."""
connection = get_db_connection()
cursor = connection.cursor()
cursor.execute(
"SELECT count(1) "
"FROM base_table "
"WHERE dam_in_image is NULL;")
unprocessed_count = int(cursor.fetchone()[0])
cursor.execute(
"SELECT count(1) "
"FROM base_table "
"WHERE dam_in_image is 0;")
with_no_dam_count = int(cursor.fetchone()[0])
cursor.execute(
"SELECT count(1) "
"FROM base_table "
"WHERE dam_in_image is 1;")
with_dam_count = int(cursor.fetchone()[0])
cursor.close()
connection.commit()
return flask.jsonify(
{
'unprocessed_count': unprocessed_count,
'with_no_dam_count': with_no_dam_count,
'with_dam_count': with_dam_count,
});
@APP.route('/unprocessed_image')
@retry(wait_exponential_multiplier=1000, wait_exponential_max=10000)
def get_unprocessed_image_path():
connection = get_db_connection()
cursor = connection.cursor()
cursor.execute(
"SELECT image_path "
"FROM base_table "
"WHERE dam_in_image is NULL "
"ORDER BY RANDOM() LIMIT 1;")
return os.path.normpath(str(cursor.fetchone()[0]))
@retry(wait_exponential_multiplier=1000, wait_exponential_max=10000)
def download_url_op(url, target_path, skip_if_target_exists=False):
"""Download `url` to `target_path`."""
try:
if skip_if_target_exists and os.path.exists(target_path):
LOGGER.info('target exists %s', target_path)
return
LOGGER.info('downloading %s to %s', url, target_path)
try:
os.makedirs(os.path.dirname(target_path))
except:
pass
with open(target_path, 'wb') as target_file:
url_stream = requests.get(url, stream=True, timeout=REQUEST_TIMEOUT)
file_size = int(url_stream.headers["Content-Length"])
LOGGER.info(
"Downloading: %s Bytes: %s" % (target_path, file_size))
downloaded_so_far = 0
block_size = 2**20
for data_buffer in url_stream.iter_content(chunk_size=block_size):
downloaded_so_far += len(data_buffer)
target_file.write(data_buffer)
status = r"%s: %10d [%3.2f%%]" % (
os.path.basename(target_path),
downloaded_so_far, downloaded_so_far * 100. / file_size)
LOGGER.info(status)
except:
LOGGER.exception('exception occured')
raise
def image_candidate_worker():
"""Process validation queue."""
while True:
try:
n_dams_to_fetch = IMAGE_CANDIDATE_QUEUE.get()
LOGGER.debug('%d to fetch', n_dams_to_fetch)
if n_dams_to_fetch == 'STOP':
return
for fetch_index in range(n_dams_to_fetch):
LOGGER.debug(
'working on %d of %d', fetch_index+1, n_dams_to_fetch)
lng = numpy.random.random()*360-180
lat = numpy.random.random()*180-90
# take the ceil to the nearest 10
lng = int(numpy.floor(lng*0.1)*10)
if lng < 0:
lng_dir = 'W'
lng = abs(lng)
else:
lng_dir = 'E'
# take the ceil to the nearest 10
lat = int(numpy.ceil(lat*0.1)*10)
if lat < 0:
lat_dir = 'S'
lat = abs(lat)
else:
lat_dir = 'N'
src_url = (
f'http://storage.googleapis.com/global-surface-water/'
f'downloads/occurrence/occurrence_'
f'{lng}{lng_dir}_{lat}{lat_dir}.tif')
LOGGER.info("download a new GSW tile: %s", src_url)
surface_water_raster_path = os.path.join(
GSW_DIR, os.path.basename(src_url))
download_url_op(
src_url, surface_water_raster_path,
skip_if_target_exists=True)
LOGGER.info('downloaded!')
if not is_a_raster(surface_water_raster_path):
LOGGER.error(
"couldn't open %s, deleting and trying again",
surface_water_raster_path)
try:
os.remove(surface_water_raster_path)
except OSError:
pass
IMAGE_CANDIDATE_QUEUE.put(1)
continue
gsw_raster = gdal.OpenEx(
surface_water_raster_path, gdal.OF_RASTER)
gsw_band = gsw_raster.GetRasterBand(1)
box_size = int((BOUNDING_BOX_SIZE_M / PLANET_QUAD_CELL_SIZE))
# this is the GSW pixel size in degrees time 110km / degree
# at the equator. Good enough for our approximate BB.
box_size = int((BOUNDING_BOX_SIZE_M / (.00025 * 110000)))
tries = 0
while True:
tries += 1
if tries >= MAX_GSW_TRIES:
break
# we expect the raster to be square so it's okay to use XSize for
# both dimensions so pick a point in the range of the quad
ul_x = int(numpy.random.randint(
0, gsw_band.XSize-box_size, dtype=numpy.int32))
ul_y = int(numpy.random.randint(
0, gsw_band.YSize-box_size, dtype=numpy.int32))
sample_block = gsw_band.ReadAsArray(
xoff=ul_x, yoff=ul_y, win_xsize=box_size,
win_ysize=box_size)
# search for pixels there that include edge surface water
partial_samples = numpy.argwhere(
(sample_block > MIN_SURFACE_WATER) &
(sample_block < MAX_SURFACE_WATER))
# if we found at least 20 percent of the pixels are
# partial water samples.
if partial_samples.size > (.2*sample_block.size):
break
if tries >= MAX_GSW_TRIES:
LOGGER.error("COULDN'T FIND A BOUNDING BOX")
IMAGE_CANDIDATE_QUEUE.put(1)
continue
LOGGER.info("now pull a planet quad")
gsw_gt = pygeoprocessing.get_raster_info(
surface_water_raster_path)['geotransform']
min_x, max_y = gdal.ApplyGeoTransform(gsw_gt, ul_x, ul_y)
max_x, min_y = gdal.ApplyGeoTransform(
gsw_gt, ul_x+box_size, ul_y+box_size)
mosaic_quad_response = get_bounding_box_quads(
SESSION, MOSAIC_QUAD_LIST_URL, min_x, min_y, max_x, max_y)
mosaic_quad_response_dict = mosaic_quad_response.json()
quad_download_dict = {
'quad_download_url_list': [],
'quad_target_path_list': [],
'dam_lat_lng_bb': [min_x, min_y, max_x, max_y]
}
if not mosaic_quad_response_dict['items']:
LOGGER.error("NO PLANET COVERAGE HERE, TRYING AGAIN")
IMAGE_CANDIDATE_QUEUE.put(1)
continue
for mosaic_item in mosaic_quad_response_dict['items']:
quad_download_url = (mosaic_item['_links']['download'])
quad_download_raster_path = os.path.join(
PLANET_QUADS_DIR, active_mosaic['id'],
f'{mosaic_item["id"]}.tif')
quad_download_dict['quad_download_url_list'].append(
quad_download_url)
quad_download_dict['quad_target_path_list'].append(
quad_download_raster_path)
download_url_op(
quad_download_url, quad_download_raster_path,
skip_if_target_exists=True)
if not is_a_raster(quad_download_raster_path):
LOGGER.error(
"couldn't open %s, deleting and trying again",
quad_download_raster_path)
try:
os.remove(quad_download_raster_path)
except OSError:
pass
stitched_image_path = os.path.join(
PLANET_STITCHED_IMAGERY_DIR,
'_'.join([
os.path.basename(path).replace('.tif', '')
for path in sorted(
quad_download_dict['quad_target_path_list'])]) + '.tif')
LOGGER.info("stitched image path: %s", stitched_image_path)
stitch_rasters(
quad_download_dict['quad_target_path_list'],
stitched_image_path)
if not is_a_raster(stitched_image_path):
LOGGER.error(
"couldn't open %s, deleting and trying again",
stitched_image_path)
try:
os.remove(stitched_image_path)
except OSError:
pass
IMAGE_CANDIDATE_QUEUE.put(1)
continue
clipped_gsw_tile_path = os.path.normpath(os.path.join(
NOT_DAM_IMAGERY_DIR,
'_'.join([str(_) for _ in quad_download_dict[
'dam_lat_lng_bb']])+'.png'))
LOGGER.debug(
'clipping to %s %s', clipped_gsw_tile_path,
quad_download_dict['dam_lat_lng_bb'])
clip_raster(
stitched_image_path,
quad_download_dict['dam_lat_lng_bb'],
clipped_gsw_tile_path)
LOGGER.debug('clipped %s', clipped_gsw_tile_path)
if not is_a_raster(clipped_gsw_tile_path):
LOGGER.error(
"couldn't open %s, deleting and trying again",
clipped_gsw_tile_path)
try:
os.remove(clipped_gsw_tile_path)
except OSError:
pass
IMAGE_CANDIDATE_QUEUE.put(1)
continue
# insert into the database
LOGGER.debug('putting %s into database', clipped_gsw_tile_path)
connection = get_db_connection()
cursor = connection.cursor()
cursor.execute(
"INSERT INTO base_table (image_path, bounding_box) "
"VALUES (?, ?);", (
clipped_gsw_tile_path,
str(quad_download_dict['dam_lat_lng_bb'])))
cursor.close()
connection.commit()
LOGGER.debug('database updated, next dam!')
except:
LOGGER.exception('validation queue worker crashed.')
global VALIDATAION_WORKER_DIED
VALIDATAION_WORKER_DIED = True
IMAGE_CANDIDATE_QUEUE.put(n_dams_to_fetch-fetch_index)
@APP.after_request
def add_header(r):
"""Force no caching."""
r.headers["Cache-Control"] = "no-cache, no-store, must-revalidate, public, max-age=0"
r.headers["Pragma"] = "no-cache"
r.headers["Expires"] = "0"
return r
def build_image_db(target_database_path, complete_token_path):
"""Build the base database for validation.
Parameters:
target_database_path (str): path to a target database that contains
a table called 'base_table' with columns:
* image path
* bounding_box_bounds text (wkt of geometry?)
* dam_in_image (true/false/NULL) (null means not classified
yet)
complete_token_path (str): path to file that will be created when the
DB is first created. Used to guard taskgraph from remaking if the
DB has changed.
Returns:
None.
"""
sql_create_projects_table = (
"""
CREATE TABLE IF NOT EXISTS base_table (
image_path TEXT NOT NULL PRIMARY KEY,
bounding_box TEXT NOT NULL,
dam_in_image BOOL
);
CREATE UNIQUE INDEX IF NOT EXISTS image_path_index
ON base_table (image_path);
""")
connection = get_db_connection()
cursor = connection.cursor()
cursor.executescript(sql_create_projects_table)
cursor.close()
connection.commit()
with open(complete_token_path, 'w') as token_file:
token_file.write(str(datetime.datetime.now()))
def get_db_connection():
"""Fetch the open database connection for this thread."""
thread_id = threading.get_ident()
if thread_id not in DB_CONN_THREAD_MAP:
DB_CONN_THREAD_MAP[thread_id] = sqlite3.connect(DATABASE_PATH)
connection = DB_CONN_THREAD_MAP[thread_id]
return connection
def stitch_rasters(base_raster_path_list, target_raster_path):
"""Merge base rasters into target."""
try:
os.makedirs(os.path.dirname(target_raster_path))
except OSError:
pass
LOGGER.debug(base_raster_path_list)
if len(base_raster_path_list) == 1:
LOGGER.debug('copying....')
shutil.copyfile(base_raster_path_list[0], target_raster_path)
else:
LOGGER.debug('running a stitch to: %s', target_raster_path)
subprocess.run([
'python', 'gdal_merge.py', '-o', target_raster_path,
*base_raster_path_list])
@retry(wait_exponential_multiplier=1000, wait_exponential_max=10000)
def get_bounding_box_quads(
session, mosaic_quad_list_url, min_x, min_y, max_x, max_y):
"""Query for mosaic via bounding box and retry if necessary."""
try:
mosaic_quad_response = session.get(
f'{mosaic_quad_list_url}?bbox={min_x},{min_y},{max_x},{max_y}',
timeout=REQUEST_TIMEOUT)
return mosaic_quad_response
except:
LOGGER.exception(
f"get_bounding_box_quads {min_x},{min_y},{max_x},{max_y} failed")
raise
def clip_raster(
base_raster_path, lat_lng_bb, target_clipped_raster_path):
"""Clip base against `lat_lng_bb`."""
base_raster_info = pygeoprocessing.get_raster_info(base_raster_path)
wgs84_srs = osr.SpatialReference()
wgs84_srs.ImportFromEPSG(4326)
base_bounding_box = pygeoprocessing.transform_bounding_box(
lat_lng_bb, wgs84_srs.ExportToWkt(),
base_raster_info['projection'], edge_samples=11)
center_y = (base_bounding_box[1]+base_bounding_box[3])/2
center_x = (base_bounding_box[0]+base_bounding_box[2])/2
target_bounding_box = [
center_x-1000,
center_y-1000,
center_x+1000,
center_y+1000]
LOGGER.debug(base_bounding_box)
LOGGER.debug(target_bounding_box)
subprocess.run([
'gdal_translate',
'-projwin',
str(target_bounding_box[0]),
str(target_bounding_box[3]),
str(target_bounding_box[2]),
str(target_bounding_box[1]),
'-of', 'PNG', base_raster_path, target_clipped_raster_path])
def is_a_raster(path):
"""Return true if path is raster."""
try:
if os.path.exists(path):
r = gdal.OpenEx(path, gdal.OF_RASTER)
if r:
return True
return False
return False
except:
return False
if __name__ == '__main__':
DB_CONN_THREAD_MAP = {}
for dir_path in [
PLANET_QUADS_DIR, NOT_DAM_IMAGERY_DIR, GSW_DIR,
PLANET_STITCHED_IMAGERY_DIR]:
try:
os.makedirs(dir_path)
except OSError:
pass
with open(PLANET_API_KEY_FILE, 'r') as planet_api_key_file:
planet_api_key = planet_api_key_file.read().rstrip()
SESSION = requests.Session()
SESSION.auth = (planet_api_key, '')
if not os.path.exists(ACTIVE_MOSAIC_JSON_PATH):
mosaics_json = SESSION.get(
'https://api.planet.com/basemaps/v1/mosaics',
timeout=REQUEST_TIMEOUT)
most_recent_date = ''
active_mosaic = None
for mosaic_data in mosaics_json.json()['mosaics']:
if mosaic_data['interval'] != '3 mons':
continue
last_acquired_date = mosaic_data['last_acquired']
LOGGER.debug(last_acquired_date)
if last_acquired_date > most_recent_date:
most_recent_date = last_acquired_date
active_mosaic = mosaic_data
with open(ACTIVE_MOSAIC_JSON_PATH, 'w') as active_mosaic_file:
active_mosaic_file.write(json.dumps(active_mosaic))
else:
with open(ACTIVE_MOSAIC_JSON_PATH, 'r') as active_mosaic_file:
active_mosaic = json.load(active_mosaic_file)
LOGGER.debug(
'using this mosaic: '
f"""{active_mosaic['last_acquired']} {active_mosaic['interval']} {
active_mosaic['grid']['resolution']}""")
MOSAIC_QUAD_LIST_URL = (
f"""https://api.planet.com/basemaps/v1/mosaics/"""
f"""{active_mosaic['id']}/quads""")
TASK_GRAPH = taskgraph.TaskGraph(
WORKSPACE_DIR, N_WORKERS, reporting_interval=REPORTING_INTERVAL)
dabase_complete_token_path = os.path.join(os.path.dirname(
DATABASE_PATH), f'{os.path.basename(DATABASE_PATH)}_COMPLETE')
build_db_task = TASK_GRAPH.add_task(
func=build_image_db,
args=(DATABASE_PATH, dabase_complete_token_path),
target_path_list=[dabase_complete_token_path],
ignore_path_list=[DATABASE_PATH],
task_name='build the dam database')
build_db_task.join()
IMAGE_CANDIDATE_QUEUE = queue.Queue()
IMAGE_CANDIDATE_QUEUE.put(10000)
image_candidate_thread = threading.Thread(target=image_candidate_worker)
image_candidate_thread.start()
connection = get_db_connection()
cursor = connection.cursor()
cursor.execute(
"SELECT count(1) "
"FROM base_table "
"WHERE dam_in_image is NULL;")
UNVALIDATED_IMAGE_COUNT = int(cursor.fetchone()[0])
LOGGER.debug(UNVALIDATED_IMAGE_COUNT)
cursor.close()
connection.commit()
APP.run(host='0.0.0.0', port=80)
|
scheduler.py
|
from collections import deque
import time
import random
from threading import Thread
from radio.audio import Clip, AudioClip
import radio.library
from math import sqrt
from typing import Deque
class Scheduler:
def __init__(self, library: 'radio.library.ClipLibrary', preload: bool = True):
self.library = library
self._queue: Deque[Clip] = deque()
self._force_song = False
self._other_clips: int = 0
self._last_host_time: float = 0.0
if preload:
Thread(target=self._prepare_next, daemon=True).start()
def next(self) -> Clip:
if len(self._queue) > 0:
return self._queue.popleft()
else:
now = time.localtime()
is_day = (now.tm_hour > 5 and now.tm_hour < 23) or (self.library.night.size() == 0)
night_ratio = 0.75 * sqrt((min(25, self.library.night.size())/25))
assert night_ratio >= 0.0 and night_ratio <= 1.0
day_ratio = 1.0 - night_ratio
music_pool = self.library.music if (is_day or random.uniform(0, 1) < day_ratio) else self.library.night
if self._force_song:
self._other_clips = 0
self._force_song = False
return AudioClip(music_pool.next())
if self._host_time():
self._last_host_time = time.time()
self._other_clips = 0
self._force_song = True
clip = AudioClip(self.library.hosts.next())
clip.hide = True
return clip
force_music = self.library.other.size() == 0 or self._other_clips > 2
if force_music or random.uniform(0, 1) < 0.7:
return AudioClip(music_pool.next())
else:
self._other_clips += 1
return AudioClip(self.library.other.next())
def reset(self, hard: bool = False) -> None:
self._force_song = False
self._other_clips = 0
self._last_host_time = 0.0
if hard:
self._queue.clear()
def schedule(self, clip: Clip, prepend: bool = False) -> None:
assert clip is not None
if prepend:
self._queue.appendleft(clip)
else:
self._queue.append(clip)
def _host_time(self) -> bool:
if self.library.hosts.empty():
return False
t: float = (time.time() - self._last_host_time)/60
r: float = 4.0 + random.uniform(0.0, 6.0)
return t > r
def _prepare_next(self) -> None:
while True:
time.sleep(0.5)
if len(self._queue) == 0:
clip = self.next()
self._queue.append(clip)
if isinstance(clip, AudioClip):
clip.loaded.wait()
|
tilt.py
|
#!/usr/bin/python
from wiiboard import Wiiboard, EventProcessor
from pic_interface import PICInterface
from get_angle import tilt2servo
from get_ip import get_ip
import serial
import time
import threading
import pygame
pygame.mixer.init()
pygame.mixer.music.load('sound/background.mp3')
pygame.mixer.music.play(loops=-1, start=0.0)
pygame.mixer.music.set_volume(0.3)
e_sound = pygame.mixer.Sound("sound/electromagnet.wav")
e_sound_length = e_sound.get_length()
e_sound_last = 0
f_sound = pygame.mixer.Sound("sound/swing.wav")
f_sound_length = f_sound.get_length()
f_sound_last = 0
c_sound = pygame.mixer.Sound("sound/confusion.wav")
c_sound_length = f_sound.get_length()
c_sound_last = 0
coin_sound = pygame.mixer.Sound("sound/coin.wav")
coin_sound_length = f_sound.get_length()
coin_sound_last = 0
def time_now():
return time.time()
def read_cmd(ser):
global e_sound_last, f_sound_last, c_sound_last, coin_sound_last
while ser.isOpen():
try:
now = time_now()
cmd = ser.readline()
l = cmd.split(':')
if len(l) > 0 and l[0] == 'sfx':
sfx_type = l[1]
if 'electromagnet' in sfx_type:
if (now - e_sound_last) > e_sound_length:
e_sound.play()
e_sound_last = now
elif 'flipper' in sfx_type:
if (now - f_sound_last) > f_sound_length:
f_sound.play()
f_sound_last = now
elif 'ctrl_flip' in sfx_type:
if (now - c_sound_last) > c_sound_length:
c_sound.play()
c_sound_last = now
elif 'coin' in sfx_type:
if (now - coin_sound_last) > coin_sound_length:
coin_sound.play()
coin_sound_last = now
else:
print cmd
except Exception as e:
print e
pass
time.sleep(0.04)
#
#def main_v2():
# processor = EventProcessor()
# pic = PICInterface()
# board = Wiiboard(processor, disabled = False)
# while True:
# if pic.connected:
# pic.write_ip(get_ip('wlan0'))
# if board.isConnected():
# pass
# else:
# pic.connect()
# while True:
# if pic.connected:
def main():
processor = EventProcessor()
board = Wiiboard(processor, disabled = False)
## Communicate via UART - debugging
with serial.Serial(
port = '/dev/ttyUSB0',
baudrate = 19200,
parity = serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
timeout=None) as ser:
r_t = threading.Thread(target = read_cmd, args=(ser,))
r_t.daemon = True
r_t.start()
print 'connect with pic now!'
pic = PICInterface()
pic.write_ip(get_ip('wlan0'))
print 'connect to wii board!'
board.connect()
if board.isConnected():
board.async_receive()
while True:
if pic.connected:
t_x, t_y = processor.t_x, processor.t_y # tilt angles
t_x, t_y = -t_y, t_x # this is flipped due to servo position
#s_x, s_y = tilt2servo(t_x, rad=False), tilt2servo(t_y, rad=False) # servo angles
s_x, s_y = 6 * t_x, 6 * t_y # 15 -> 90
wii = board.isConnected()
if not (pic.write_x(s_x) and pic.write_y(s_y) and pic.write_wii(wii)):
pic.connected = False
if not wii: # try to connect to wii again
board.connect() # try connecting again
if board.isConnected():
board.setLight(False)
board.wait(500)
board.setLight(True)
board.async_receive()
else:
print 'pic not connected'
pic.connect()
pic.write_ip(get_ip('wlan0'))
time.sleep(0.05)
pic.close()
if __name__ == "__main__":
main()
|
all.py
|
#!/usr/bin/env python3
# Princípios utilizados:
# Aplica todas as estratégias abaixo nas ações e mostra uma tabela contendo todos os rankings
# A tabela resultante é ordenada pelo resultado da soma dos rankings depois de aplicar essas
# 4 diferentes estratégias
# Graham:
# - [x] 1. Sobrevivência: Sobreviveu nos últimos 10 anos. https://www.estrategista.net/o-fracasso-de-benjamin-graham-na-bolsa-atual/
# - [x] 2. Estabilidade ds Lucros: Lucro > 0 nos últimos 10 anos. https://www.estrategista.net/o-fracasso-de-benjamin-graham-na-bolsa-atual/
# - [x] 3. Crescimento dos Lucros: Lucros crescentes nos últimos 10 anos https://www.estrategista.net/o-fracasso-de-benjamin-graham-na-bolsa-atual/
# - [x] 4. Crescimento dos Lucro Por Ação: LPA atual > 1.33 * LPA 10 anos atrás. (Calculado através da média dos 3 anos do começo e dos 3 anos do fim deste período) http://seuguiadeinvestimentos.com.br/a-tecnica-de-investimento-de-benjamin-graham-ii/
# - [x] 5. Estabilidade dos Dividendos: Dividendos pagos nos últimos 10 anos. http://seuguiadeinvestimentos.com.br/a-tecnica-de-investimento-de-benjamin-graham-ii/
# - [x] 6. raíz_quadrada_de(22.5 * VPA * LPA) => Quanto maior, melhor. Ideal > 1.5 * Preço. https://www.sunoresearch.com.br/artigos/valor-intrinseco/?utm_source=PR&utm_medium=artigo&utm_campaign=investing_05122019
# - [x] 7. P/L (Preço/Lucro) => Quanto menor, melhor (ideal, < 15 E >= 0) http://seuguiadeinvestimentos.com.br/a-tecnica-de-investimento-de-benjamin-graham-ii/
# - [x] 8. P/VP (Preço/Valor Patrimonial) => Quanto menor, melhor (ideal, < 1.5 E >= 0) http://seuguiadeinvestimentos.com.br/a-tecnica-de-investimento-de-benjamin-graham-ii/
# - [x] 9. Crescimento em 5 anos => Quanto maior, melhor (ideal, > 5%) https://daxinvestimentos.com/analise-fundamentalista-mais-de-200-de-rentabilidade-em-2-anos/
# - [x] 10. ROE (Return On Equity) => Quanto maior, melhor (ideal, superior a 20%) https://daxinvestimentos.com/analise-fundamentalista-mais-de-200-de-rentabilidade-em-2-anos/
# - [x] 11. Dividend Yield (Rendimento de Dividendo) => Quanto maior, melhor (ideal, > Taxa Selic (4.5%)) https://foconomilhao.com/acoes-com-dividend-yield-maior-que-a-selic/
# - [x] 12. Liquidez Corrente => Quanto maior, melhor (ideal > 1.5) https://daxinvestimentos.com/analise-fundamentalista-mais-de-200-de-rentabilidade-em-2-anos/
# - [x] 13. Dívida Bruta/Patrimônio => Quanto menor, melhor (ideal < 50%) https://daxinvestimentos.com/analise-fundamentalista-mais-de-200-de-rentabilidade-em-2-anos/
# - [x] 14. Patrimônio Líquido => Quanto maior, melhor (ideal > 2000000000)
# Bazin:
# - [x] 1. Preço Justo (Bazin) > 1.5 * Preço. Preço Justo (Bazin) => Dividend Yield * 16.67 (Por: Décio Bazin)
# - [x] 2. Dívida Bruta/Patrimônio < 0.5 (50%)
# - [x] 3. Dividend Yield > 0.06 (6%)
# - [x] 4. Média do Dividend Yield nos últimos 5 anos > 0.05 (5%)
# - [x] 5. Pagamento constante de dividendos nos últimos 5 anos
# - [x] 6. Pagamento crescente de dividendos nos últimos 5 anos
# - [x] 7. 0 < Payout < 1
# Greenblatt:
# - [x] Fórmula Mágica 1): > ROIC e < EV/EBIT e > ROE e < P/L
# Piotroski:
# - [x] 1. ROA > 0 (ano corrente)
# - [x] 2. FCO > 0 (ano corrente)
# - [x] 3. FCO > Lucro Líquido (ano corrente)
# - [x] 4. ROA atual > ROA ano anterior
# - [x] 5. Alavancagem atual < ano passado (Dívida Líquida / Patrimônio Líquido)
# - [x] 6. Liquidez Corrente atual > Liquidez Corrente ano anterior
# - [x] 7. Nro. Ações atual = Nro. Ações ano anterior
# - [x] 8. Margem Bruta atual > Margem Bruta ano anterior
# - [x] 9. Giro Ativo atual > Giro Ativo ano anterior
import sys, os
sys.path.extend([f'../{name}' for name in os.listdir("..") if os.path.isdir(f'../{name}')])
import fundamentus
import bovespa
import backtest
import browser
import pandas
import numpy
import re
from math import sqrt
from decimal import Decimal
import http.cookiejar
import urllib.request
import json
import threading
import time
import pyperclip
# Populate shares panda dataframe with the provided year
def populate_shares(year):
globals()['year'] = year
globals()['infos'] = {}
if year == current_year():
shares = bovespa.shares()
else:
shares = fundamentus.shares(year)
shares = shares[shares['Cotação'] > 0]
shares = shares[shares['Liquidez 2 meses'] > 0]
shares['Ranking (Graham)'] = 0
shares['Ranking (Bazin)'] = 0
shares['Ranking (Greenblatt)'] = 0
shares['Ranking (Piotroski)'] = 0
shares['Ranking (Sum)'] = 0
shares['Ranking (Final)'] = 0
return setup(shares, year)
def setup(shares, year):
fill_infos(shares)
shares = add_ratings(shares)
return reorder_columns(shares)
# infos = {
# 'TRPL4': {
# "survivability": True/False, # Empresa com no mínimo 10 anos de sobrevivência (Graham olhava lucros e dividendos dos últimos 10 anos) # Benjamin Graham. https://www.estrategista.net/o-fracasso-de-benjamin-graham-na-bolsa-atual/
# "earnings_stability": True/False, # Estabilidade de lucros: Lucro > 0 nos últimos 10 anos # Benjamin Graham. https://www.estrategista.net/o-fracasso-de-benjamin-graham-na-bolsa-atual/
# "earnings_growth": True/False, # Crescimento dos lucros: Lucros crescentes nos últimos 10 anos # Benjamin Graham. https://www.estrategista.net/o-fracasso-de-benjamin-graham-na-bolsa-atual/
# "lpa_growth": True/False, # LPA atual > 1.33 * LPA 10 anos atrás. # Benjamin Graham. (calculado através da média dos 3 anos do começo e dos 3 anos do fim deste período) # http://seuguiadeinvestimentos.com.br/a-tecnica-de-investimento-de-benjamin-graham-ii/
# "dividends_stability": True/False, # Dividendos pagos nos últimos 10 anos. # Benjamin Graham # http://seuguiadeinvestimentos.com.br/a-tecnica-de-investimento-de-benjamin-graham-ii/
# }
# }
def fill_infos(shares):
cookie_jar = http.cookiejar.CookieJar()
opener = urllib.request.build_opener(urllib.request.HTTPCookieProcessor(cookie_jar))
opener.addheaders = [('User-agent', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; rv:2.2) Gecko/20110201'),
('Accept', 'text/html, text/plain, text/css, text/sgml, */*;q=0.01')]
tickers = list(shares.index)
threads = [threading.Thread(target=fill_infos_by_ticker, args=(ticker,opener,)) for ticker in tickers]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
def fill_infos_by_ticker(ticker, opener):
infos[ticker] = {
# Graham
'survivability': False,
'earnings_stability': False,
'earnings_growth': False,
'lpa_growth': False,
'dividends_stability': False,
# Bazin
'ultimos_dy': 0.0,
'constante': False,
'crescente': False,
'healthy_payout': False,
# Piotroski
'roa_positivo': False,
'fco_positivo': False,
'fco_saudavel': False,
'roa_crescente': False,
'alavancagem_decrescente': False,
'liquidez_crescente': False,
'no_acoes_constante': False,
'margem_bruta_crescente': False,
'giro_ativo_crescente': False
}
# Fetching Lucro Liquido
url = f'https://api-analitica.sunoresearch.com.br/api/Statement/GetStatementResultsReportByTicker?type=y&ticker={ticker}&period=999'
with opener.open(url) as link:
company_results = link.read().decode('ISO-8859-1')
company_results = json.loads(company_results)
current_year = year
lucros = [r for r in company_results if r['description'] == 'Lucro LÃ\xadquido'][0]
years = [x for x in lucros.keys() if re.match('C_\w{4}$', x)]
if(len(years) == 0):
return
years = [x for x in years if int(re.findall('C_(\w{4})$', x)[0]) < current_year]
list.sort(years)
lucros = { year: lucros[year] for year in years }
ultimos_lucros = list(lucros.values())[-10:]
# Ugly Fix for missing data :( Looks like the API have missing data -_-
# Fill None values with the Mean earning
present_lucros = [i for i in ultimos_lucros if i]
if (len(present_lucros) == 0):
mean = 0
else:
mean = sum(present_lucros) / len(present_lucros)
ultimos_lucros = [mean if v is None else v for v in ultimos_lucros]
# End of Ugly Fix
infos[ticker]['survivability'] = f'C_{current_year - 10}' in lucros.keys()
infos[ticker]['earnings_stability'] = all(ultimos_lucros[i] > 0 for i in range(len(ultimos_lucros)))
infos[ticker]['earnings_growth'] = all(ultimos_lucros[i] <= ultimos_lucros[i+1] for i in range(len(ultimos_lucros)-1)) # Isso aqui deve virar uma função e devemos ver a tendência dessa função!
# Fetching Previous Years Indicators
yearly_indicators_url = f'https://api-analitica.sunoresearch.com.br/api/Indicator/GetIndicatorsYear?ticker={ticker}'
with opener.open(yearly_indicators_url) as link:
yearly_indicators = link.read().decode('ISO-8859-1')
yearly_indicators = json.loads(yearly_indicators)
# Only consider company indicators before the current_year (robust solution for backtesting purposes)
yearly_filtered_indicators = [ci for ci in yearly_indicators if ci['year'] < current_year]
last_dpas = [fundament['dpa'] for fundament in yearly_filtered_indicators] # Graham
last_lpas = [fundament['lpa'] for fundament in yearly_filtered_indicators] # Graham
last_payouts = [fundament['payout'] for fundament in yearly_filtered_indicators] # Bazin
last_divYields = [fundament['divYeld'] for fundament in yearly_filtered_indicators] # Bazin
# Graham
if (len(last_lpas[:10]) > 0):
infos[ticker]['lpa_growth'] = (sum(last_lpas[:3]) / 3) >= (sum(last_lpas[-3:]) / 3)
if (len(last_dpas[:10]) > 0):
infos[ticker]['dividends_stability'] = all(last_dpas[:10][i] > 0 for i in range(len(last_dpas[:10])))
# Bazin
if (len(last_divYields[:5]) > 0):
infos[ticker]['ultimos_dy'] = (sum(last_divYields[:5]) / len(last_divYields[:5]))
if (len(last_dpas[:5]) > 0):
infos[ticker]['constante'] = all(last_dpas[:5][i] > 0 for i in range(len(last_dpas[:5])))
infos[ticker]['crescente'] = all(last_dpas[:5][i] >= last_dpas[:5][i+1] for i in range(len(last_dpas[:5])-1))
if (len(last_divYields[:5]) > 0):
infos[ticker]['healthy_payout'] = all((last_payouts[:5][i] > 0) & (last_payouts[:5][i] < 1) for i in range(len(last_payouts[:5])))
# Fetching Current Year Indicators
current_indicators_url = f'https://api-analitica.sunoresearch.com.br/api/Indicator/GetIndicatorsDashboard?ticker={ticker}'
with opener.open(current_indicators_url) as link:
company_indicators = link.read().decode('ISO-8859-1')
company_indicators = json.loads(company_indicators)
company_indicators.extend(yearly_indicators)
# Only consider company indicators before OR EQUAL to the current_year (robust solution for backtesting purposes)
company_filtered_indicators = [ci for ci in company_indicators if ci['year'] <= current_year]
# Piotroski
if (len(company_filtered_indicators) > 0):
infos[ticker]['roa_positivo'] = company_filtered_indicators[0]['roa'] > 0
infos[ticker]['fco_positivo'] = company_filtered_indicators[0]['fco'] > 0
infos[ticker]['fco_saudavel'] = company_filtered_indicators[0]['fco'] > company_filtered_indicators[0]['lucroLiquido']
if (len(company_filtered_indicators) > 1):
infos[ticker]['roa_crescente'] = company_filtered_indicators[0]['roa'] > company_filtered_indicators[1]['roa']
infos[ticker]['alavancagem_decrescente'] = company_filtered_indicators[0]['dlpl'] < company_filtered_indicators[1]['dlpl']
infos[ticker]['liquidez_crescente'] = company_filtered_indicators[0]['liqCorrent'] > company_filtered_indicators[1]['liqCorrent']
infos[ticker]['no_acoes_constante'] = company_filtered_indicators[0]['qntAcoes'] == company_filtered_indicators[1]['qntAcoes']
infos[ticker]['margem_bruta_crescente'] = company_filtered_indicators[0]['margBruta'] > company_filtered_indicators[1]['margBruta']
infos[ticker]['giro_ativo_crescente'] = company_filtered_indicators[0]['giroAtivos'] > company_filtered_indicators[1]['giroAtivos']
def add_ratings(shares):
init(shares)
remove_bad_shares(shares)
calculate_greenblatt(shares)
shares = fill_special_infos(shares)
add_bazin_valuation(shares)
fill_fair_price(shares)
fill_score(shares)
fill_score_explanation(shares)
return shares
# Inicializa os índices
def init(shares):
# Graham
shares['Preço Justo (Graham)'] = 0
shares['Graham Score'] = Decimal(0)
shares['Preço Justo (Graham) / Cotação'] = 0
shares['10 Anos de Sobrevivencia'] = False
shares['Lucros Positivos nos Ultimos 10 Anos'] = False
shares['Lucros Crescentes nos Ultimos 10 Anos'] = False
shares['LPA atual > 1.33 * LPA 10 anos atrás'] = False
shares['Dividendos Positivos nos Ultimos 10 Anos'] = False
# Bazin
shares['Bazin Score'] = Decimal(0)
shares['Dividendos > 5% na média dos últimos 5 anos'] = False
shares['Dividendos Constantes Ultimos 5 Anos'] = False
shares['Dividendos Crescentes Ultimos 5 Anos'] = False
shares['Payout Saudavel nos Ultimos 5 Anos'] = False
shares['Media de Dividend Yield dos Últimos 5 anos'] = Decimal(0)
# Greenblatt
shares['ROE placement'] = 0
shares['P/L placement'] = 0
shares['ROIC placement'] = 0
shares['EV/EBIT placement'] = 0
shares['Magic Formula'] = 0
# Piotroski
shares['Piotroski Score'] = 0
shares['ROA positivo'] = False
shares['FCO positivo'] = False
shares['FCO > Lucro Líquido'] = False
shares['ROA crescente'] = False
shares['Alavancagem decrescente'] = False
shares['Liquidez Corrente crescente'] = False
shares['No Ações constante'] = False
shares['Margem Bruta crescente'] = False
shares['Giro Ativo crescente'] = False
def remove_bad_shares(shares):
shares.drop(shares[shares['P/L'] <= 0].index, inplace=True)
shares.drop(shares[shares['ROE'] <= 0].index, inplace=True)
shares.drop(shares[shares['EV/EBIT'] <= 0].index, inplace=True)
shares.drop(shares[shares['ROIC'] <= 0].index, inplace=True)
def calculate_greenblatt(shares):
shares.sort_values(by='ROE', ascending=False, inplace=True)
shares['ROE placement'] = range(0, len(shares))
shares.sort_values(by='P/L', ascending=True, inplace=True)
shares['P/L placement'] = range(0, len(shares))
shares['Magic Formula'] += shares['ROE placement'] + shares['P/L placement']
shares.sort_values(by='ROIC', ascending=False, inplace=True)
shares['ROIC placement'] = range(0, len(shares))
shares.sort_values(by='EV/EBIT', ascending=True, inplace=True)
shares['EV/EBIT placement'] = range(0, len(shares))
shares['Magic Formula'] += shares['ROIC placement'] + shares['EV/EBIT placement']
def fill_special_infos(shares):
for index in range(len(shares)):
ticker = shares.index[index]
# Graham
shares['Graham Score'][index] += int(infos[ticker]['survivability'])
shares['10 Anos de Sobrevivencia'][index] = infos[ticker]['survivability']
shares['Graham Score'][index] += int(infos[ticker]['earnings_stability'])
shares['Lucros Positivos nos Ultimos 10 Anos'][index] = infos[ticker]['earnings_stability']
shares['Graham Score'][index] += int(infos[ticker]['earnings_growth'])
shares['Lucros Crescentes nos Ultimos 10 Anos'][index] = infos[ticker]['earnings_growth']
shares['Graham Score'][index] += int(infos[ticker]['lpa_growth'])
shares['LPA atual > 1.33 * LPA 10 anos atrás'][index] = infos[ticker]['lpa_growth']
shares['Graham Score'][index] += int(infos[ticker]['dividends_stability'])
shares['Dividendos Positivos nos Ultimos 10 Anos'][index] = infos[ticker]['dividends_stability']
# Bazin
shares['Media de Dividend Yield dos Últimos 5 anos'][index] = Decimal(infos[ticker]['ultimos_dy'])
shares['Bazin Score'][index] += int(infos[ticker]['ultimos_dy'] > 0.05)
shares['Dividendos > 5% na média dos últimos 5 anos'][index] = infos[ticker]['ultimos_dy'] > 0.05
shares['Bazin Score'][index] += int(infos[ticker]['constante'])
shares['Dividendos Constantes Ultimos 5 Anos'][index] = infos[ticker]['constante']
shares['Bazin Score'][index] += int(infos[ticker]['crescente'])
shares['Dividendos Crescentes Ultimos 5 Anos'][index] = infos[ticker]['crescente']
shares['Bazin Score'][index] += int(infos[ticker]['healthy_payout'])
shares['Payout Saudavel nos Ultimos 5 Anos'][index] = infos[ticker]['healthy_payout']
# Piotroski
shares['Piotroski Score'][index] += int(infos[ticker]['roa_positivo'])
shares['ROA positivo'][index] = infos[ticker]['roa_positivo']
shares['Piotroski Score'][index] += int(infos[ticker]['fco_positivo'])
shares['FCO positivo'][index] = infos[ticker]['fco_positivo']
shares['Piotroski Score'][index] += int(infos[ticker]['fco_saudavel'])
shares['FCO > Lucro Líquido'][index] = infos[ticker]['fco_saudavel']
shares['Piotroski Score'][index] += int(infos[ticker]['roa_crescente'])
shares['ROA crescente'][index] = infos[ticker]['roa_crescente']
shares['Piotroski Score'][index] += int(infos[ticker]['alavancagem_decrescente'])
shares['Alavancagem decrescente'][index] = infos[ticker]['alavancagem_decrescente']
shares['Piotroski Score'][index] += int(infos[ticker]['liquidez_crescente'])
shares['Liquidez Corrente crescente'][index] = infos[ticker]['liquidez_crescente']
shares['Piotroski Score'][index] += int(infos[ticker]['no_acoes_constante'])
shares['No Ações constante'][index] = infos[ticker]['no_acoes_constante']
shares['Piotroski Score'][index] += int(infos[ticker]['margem_bruta_crescente'])
shares['Margem Bruta crescente'][index] = infos[ticker]['margem_bruta_crescente']
shares['Piotroski Score'][index] += int(infos[ticker]['giro_ativo_crescente'])
shares['Giro Ativo crescente'][index] = infos[ticker]['giro_ativo_crescente']
return shares
def add_bazin_valuation(shares):
shares['Preço Justo (Bazin)'] = shares['Media de Dividend Yield dos Últimos 5 anos'] * 100 * Decimal(16.67)
shares['Preço Justo (Bazin) / Cotação'] = shares['Preço Justo (Bazin)'] / shares['Cotação']
# Benjamin Graham elaborou a seguinte fórmula para calcular o Valor Intríseco (Preço Justo (Graham)):
# => sqrt(22.5 * VPA * LPA)
def fill_fair_price(shares):
for index in range(len(shares)):
if ((shares['P/L'][index] > 0) & (shares['P/VP'][index] > 0)):
shares['Preço Justo (Graham)'][index] = sqrt(Decimal(22.5) * (shares['Cotação'][index] / shares['P/L'][index]) * (shares['Cotação'][index] / shares['P/VP'][index]))
else:
shares['Preço Justo (Graham)'][index] = 0
shares['Preço Justo (Graham) / Cotação'] = shares['Preço Justo (Graham)'] / shares['Cotação'] # Ideal > 1. Quanto maior, melhor! Significa que a ação deveria estar valendo 1 vezes mais, 2 vezes mais, 3 vezes mais, etc.
def fill_score(shares):
shares['Graham Score'] += (shares['Preço Justo (Graham) / Cotação'] > Decimal(1.5)).astype(int)
shares['Graham Score'] += ((shares['P/L'] < 15) & (shares['P/L'] >= 0)).astype(int)
shares['Graham Score'] += ((shares['P/VP'] < 1.5) & (shares['P/VP'] >= 0)).astype(int)
shares['Graham Score'] += (shares['Crescimento em 5 anos'] > 0.05).astype(int)
shares['Graham Score'] += (shares['ROE'] > 0.2).astype(int)
shares['Graham Score'] += (shares['Dividend Yield'] > 0.045).astype(int)
shares['Graham Score'] += (shares['Liquidez Corrente'] > 1.5).astype(int)
shares['Graham Score'] += (shares['Dívida Bruta/Patrimônio'] < 0.5).astype(int)
shares['Graham Score'] += (shares['Patrimônio Líquido'] > 2000000000).astype(int)
shares['Bazin Score'] += (shares['Preço Justo (Bazin)'] > Decimal(1.5) * shares['Cotação']).astype(int)
shares['Bazin Score'] += (shares['Dividend Yield'] > 0.06).astype(int)
shares['Bazin Score'] += ((shares['Dívida Bruta/Patrimônio']).astype(float) < 0.5).astype(int)
# Mostra quais filtros a ação passou para pontuar seu Score
def fill_score_explanation(shares):
shares['Margem de Segurança: Preço Justo (Graham) > 1.5 * Cotação'] = shares['Preço Justo (Graham) / Cotação'] > Decimal(1.5)
shares['P/L < 15 (E não negativo)'] = (shares['P/L'] < 15) & (shares['P/L'] >= 0)
shares['P/VP < 1.5 (E não negativo)'] = (shares['P/VP'] < 1.5) & (shares['P/VP'] >= 0)
shares['Crescimento em 5 anos > 0.05'] = shares['Crescimento em 5 anos'] > 0.05
shares['ROE > 20%'] = shares['ROE'] > 0.2
shares['Dividend Yield > 0.045 (Taxa Selic)'] = shares['Dividend Yield'] > 0.045
shares['Liquidez Corrente > 1.5'] = shares['Liquidez Corrente'] > 1.5
shares['Dívida Bruta/Patrimônio < 0.5'] = shares['Dívida Bruta/Patrimônio'] < 0.5
shares['Patrimônio Líquido > 2 Bilhões'] = shares['Patrimônio Líquido'] > 2000000000
shares['Preço Justo (Bazin) > 1.5 * Cotação'] = shares['Preço Justo (Bazin)'] > Decimal(1.5) * shares['Cotação']
shares['Dividend Yield > 0.06'] = shares['Dividend Yield'] > 0.06
shares['Dívida Bruta/Patrimônio < 0.5'] = (shares['Dívida Bruta/Patrimônio']).astype(float) < 0.5 # https://www.investimentonabolsa.com/2015/07/saiba-analisar-divida-das-empresas.html https://www.sunoresearch.com.br/artigos/5-indicadores-para-avaliar-solidez-de-uma-empresa/
# Reordena a tabela para mostrar a Cotação, o Valor Intríseco e o Graham Score como primeiras colunass
def reorder_columns(shares):
columns = ['Ranking (Final)', 'Cotação', 'Setor', 'Subsetor', 'Segmento', 'Ranking (Graham)', 'Ranking (Bazin)', 'Ranking (Greenblatt)', 'Ranking (Piotroski)', 'Ranking (Sum)', 'Preço Justo (Graham)', 'Preço Justo (Bazin)', 'Graham Score', 'Bazin Score', 'Piotroski Score', 'Preço Justo (Graham) / Cotação', 'Preço Justo (Bazin) / Cotação', 'Media de Dividend Yield dos Últimos 5 anos', 'Dividend Yield']
return shares[columns + [col for col in shares.columns if col not in tuple(columns)]]
# Get the current_year integer value, for example: 2020
def current_year():
return int(time.strftime("%Y"))
# python3 all.py "{ 'year': 2015 }"
if __name__ == '__main__':
year = current_year()
if len(sys.argv) > 1:
year = int(eval(sys.argv[1])['year'])
shares = populate_shares(year)
shares.sort_values(by=['Graham Score', 'Preço Justo (Graham) / Cotação'], ascending=[False, False], inplace=True)
shares['Ranking (Graham)'] = range(1, len(shares) + 1)
shares.sort_values(by=['Bazin Score', 'Preço Justo (Bazin) / Cotação'], ascending=[False, False], inplace=True)
shares['Ranking (Bazin)'] = range(1, len(shares) + 1)
shares.sort_values(by=['Magic Formula', 'Cotação'], ascending=[True, True], inplace=True)
shares['Ranking (Greenblatt)'] = range(1, len(shares) + 1)
shares.sort_values(by=['Piotroski Score', 'Cotação'], ascending=[False, True], inplace=True)
shares['Ranking (Piotroski)'] = range(1, len(shares) + 1)
shares['Ranking (Sum)'] = shares['Ranking (Graham)'] + shares['Ranking (Bazin)'] + shares['Ranking (Greenblatt)'] + shares['Ranking (Piotroski)']
shares.sort_values(by=['Ranking (Sum)', 'Preço Justo (Graham) / Cotação'], ascending=[True, False], inplace=True)
shares['Ranking (Final)'] = range(1, len(shares) + 1)
print(shares)
pyperclip.copy(shares.to_markdown())
if year != current_year():
backtest.run_all(fundamentus.start_date(year), list(shares.index[:20]))
|
app.py
|
## See readme
from flask import Flask
from threading import Thread
import time, json, pickle, os
from datetime import datetime
import praw
app = Flask(__name__)
## RedditParser
tracked = []
class RedditParser:
reddit = praw.Reddit(client_id=os.environ['REDDIT_CLIENT_ID'],
client_secret=os.environ['REDDIT_CLIENT_SECRET'],
password=os.environ['REDDIT_PASSWORD'],
username=os.environ['REDDIT_USERNAME'],
user_agent = 'python')
keys = ['id', 'score', 'upvote_ratio', 'num_comments', 'title', 'is_self', 'selftext','created_utc']
@classmethod
def get_data(self, subreddit, mode='top', limit=1000):
data = [] #pd.DataFrame([],columns=keys)
if mode == 'top':
fun = self.reddit.subreddit(subreddit).top
else:
fun = self.reddit.subreddit(subreddit).new
for identifier in fun(limit=100000):
print(identifier)
data.append(self.get_submission(identifier))
pickle.dump(data,open(f'{subreddit}{mode}1000.p','wb'))
return data
@classmethod
def get_submission(self, identifier):
s=self.reddit.submission(id=identifier)
dex = {k:getattr(s,k) for k in self.keys}
dex['retrieval_timestamp'] = str(datetime.now())
dex['N_comments'] = len(s.comments.list())
return dex
## views ############
@app.route('/track_submission=<identifier>')
def track_submission(identifier):
if identifier in tracked:
return {'status': 'already running'}
else:
tracked.append(identifier)
Thread(target=tracker, args=[identifier]).start()
app.logger.info(f'tracking submission {identifier}')
return {'status': 'success', 'time': str(datetime.now()), 'submission': identifier}
@app.route('/output_submission=<identifier>')
def output_submission(identifier):
filename = f'{identifier}.json'
if os.path.exists(filename):
return {'status': 'success', 'data': json.load(open(filename,'r'))}
else:
return {'status': 'file not found'}
@app.route('/status')
def status():
if tracked:
return {'status': 'running', 'tracking': tracked}
else:
return {'status': 'idle'}
## tasks #############
def tracker(identifier):
data = []
start = datetime.now()
while True:
data.append(RedditParser.get_submission(identifier))
json.dump(data,open(f'{identifier}.json','w'))
time.sleep(60*5)
if (datetime.now() - start).days > 1:
tracked.remove(identifier)
return 0
########## RUN MAIN ##############
if __name__ == '__main__':
print('/status')
print('/track_submission=<identifier> starts tracking')
print('/output_submission=<identifier> outputs data')
app.run(host= '0.0.0.0')
|
test_dp_with_orc8r.py
|
"""
Copyright 2022 The Magma Authors.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import annotations
import operator
from contextlib import contextmanager
from datetime import datetime, timezone
from http import HTTPStatus
from threading import Event, Thread
from time import sleep
from typing import Any, Dict, List, Optional
from uuid import uuid4
import pytest
import requests
from dp.protos.enodebd_dp_pb2 import CBSDRequest, CBSDStateResult, LteChannel
from magma.test_runner.config import TestConfig
from magma.test_runner.tests.integration_testcase import (
DomainProxyIntegrationTestCase,
)
from retrying import retry
config = TestConfig()
DP_HTTP_PREFIX = 'magma/v1/dp'
NETWORK = 'some_network'
SOME_FCC_ID = "some_fcc_id"
OTHER_FCC_ID = "other_fcc_id"
USER_ID = "some_user_id"
SAS = 'SAS'
DP = 'DP'
DATETIME_WAY_BACK = '2010-03-28T09:13:25.407877399+00:00'
@pytest.mark.orc8r
class DomainProxyOrc8rTestCase(DomainProxyIntegrationTestCase):
def setUp(self) -> None:
self.serial_number = self._testMethodName + '_' + str(uuid4())
def test_cbsd_sas_flow(self):
builder = CbsdAPIDataBuilder().with_serial_number(self.serial_number)
cbsd_id = self.given_cbsd_provisioned(builder)
with self.while_cbsd_is_active():
self.then_logs_are(
get_current_sas_filters(self.serial_number),
self.get_sas_provision_messages(),
)
filters = get_filters_for_request_type('heartbeat', self.serial_number)
self.then_message_is_eventually_sent(filters)
self.delete_cbsd(cbsd_id)
def test_cbsd_unregistered_when_requested_by_desired_state(self):
builder = CbsdAPIDataBuilder().with_serial_number(self.serial_number)
cbsd_id = self.given_cbsd_provisioned(builder)
with self.while_cbsd_is_active():
filters = get_filters_for_request_type('deregistration', self.serial_number)
builder = builder.with_desired_state('unregistered')
self.when_cbsd_is_updated(cbsd_id, builder.build_post_data())
# TODO maybe asking for state (cbsd api instead of log api) would be better
self.then_message_is_eventually_sent(filters)
def test_sas_flow_restarted_when_user_requested_deregistration(self):
builder = CbsdAPIDataBuilder().with_serial_number(self.serial_number)
cbsd_id = self.given_cbsd_provisioned(builder)
with self.while_cbsd_is_active():
filters = get_filters_for_request_type('deregistration', self.serial_number)
self.when_cbsd_is_deregistered(cbsd_id)
self.then_message_is_eventually_sent(filters)
self.then_state_is_eventually(builder.build_grant_state_data())
def test_sas_flow_restarted_for_updated_cbsd(self):
builder = CbsdAPIDataBuilder().with_serial_number(self.serial_number)
cbsd_id = self.given_cbsd_provisioned(builder)
with self.while_cbsd_is_active():
builder = builder.with_fcc_id(OTHER_FCC_ID)
self.when_cbsd_is_updated(cbsd_id, builder.build_post_data())
filters = get_filters_for_request_type('deregistration', self.serial_number)
self.then_message_is_eventually_sent(filters)
self.then_state_is_eventually(builder.build_grant_state_data())
cbsd = self.when_cbsd_is_fetched(builder.serial_number)
self.then_cbsd_is(cbsd, builder.build_registered_active_data())
self.delete_cbsd(cbsd_id)
def test_activity_status(self):
builder = CbsdAPIDataBuilder().with_serial_number(self.serial_number)
cbsd_id = self.given_cbsd_provisioned(builder)
cbsd = self.when_cbsd_is_fetched(builder.serial_number)
self.then_cbsd_is(cbsd, builder.build_registered_active_data())
self.when_cbsd_is_inactive()
cbsd = self.when_cbsd_is_fetched(builder.serial_number)
self.then_cbsd_is(cbsd, builder.build_registered_inactive_data())
def test_frequency_preferences(self):
builder = CbsdAPIDataBuilder(). \
with_serial_number(self.serial_number). \
with_frequency_preferences(5, [3625]). \
with_expected_grant(5, 3625, 31)
cbsd_id = self.given_cbsd_provisioned(builder)
self.delete_cbsd(cbsd_id)
def test_creating_cbsd_with_the_same_unique_fields_returns_409(self):
builder = CbsdAPIDataBuilder().with_serial_number(self.serial_number)
self.when_cbsd_is_created(builder.build_post_data())
self.when_cbsd_is_created(builder.build_post_data(), expected_status=HTTPStatus.CONFLICT)
def test_create_cbsd_with_single_step_fields(self):
# TODO extend the test to check if the registration actually works
builder = CbsdAPIDataBuilder().with_serial_number(self.serial_number)
self.when_cbsd_is_created(builder.build_unregistered_single_step_data())
cbsd = self.when_cbsd_is_fetched(builder.serial_number)
self.then_cbsd_is(cbsd, builder.build_unregistered_single_step_data())
def test_updating_cbsd_returns_409_when_setting_existing_serial_num(self):
builder = CbsdAPIDataBuilder()
cbsd1_serial = self.serial_number + "_foo"
cbsd2_serial = self.serial_number + "_bar"
cbsd1_payload = builder.with_serial_number(cbsd1_serial).build_post_data()
cbsd2_payload = builder.with_serial_number(cbsd2_serial).build_post_data()
self.when_cbsd_is_created(cbsd1_payload)
self.when_cbsd_is_created(cbsd2_payload)
cbsd2 = self.when_cbsd_is_fetched(serial_number=cbsd2_serial)
self.when_cbsd_is_updated(
cbsd_id=cbsd2.get("id"),
data=cbsd1_payload,
expected_status=HTTPStatus.CONFLICT,
)
def test_fetch_cbsds_filtered_by_serial_number(self):
cbsd1_serial = self.serial_number + "_foo"
cbsd2_serial = self.serial_number + "_bar"
builder1 = CbsdAPIDataBuilder().with_serial_number(cbsd1_serial)
builder2 = CbsdAPIDataBuilder().with_serial_number(cbsd2_serial)
self.when_cbsd_is_created(builder1.build_post_data())
self.when_cbsd_is_created(builder2.build_post_data())
cbsd1 = self.when_cbsd_is_fetched(serial_number=cbsd1_serial)
cbsd2 = self.when_cbsd_is_fetched(serial_number=cbsd2_serial)
self.then_cbsd_is(cbsd1, builder1.build_unregistered_data())
self.then_cbsd_is(cbsd2, builder2.build_unregistered_data())
def test_fetching_logs_with_custom_filters(self):
builder = CbsdAPIDataBuilder().with_serial_number(self.serial_number)
sas_to_dp_end_date_only = {
'serial_number': self.serial_number,
'from': SAS,
'to': DP,
'end': now(),
}
sas_to_dp_begin_date_only = {
'serial_number': self.serial_number,
'from': SAS,
'to': DP,
'begin': DATETIME_WAY_BACK,
}
sas_to_dp_end_date_too_early = {
'serial_number': self.serial_number,
'from': SAS,
'to': DP,
'end': DATETIME_WAY_BACK,
}
dp_to_sas = {
'serial_number': self.serial_number,
'from': DP,
'to': SAS,
}
dp_to_sas_incorrect_serial_number = {
'serial_number': 'incorrect_serial_number',
'from': DP,
'to': SAS,
}
sas_to_dp_with_limit = {
'limit': '100',
'from': SAS,
'to': DP,
}
sas_to_dp_with_limit_and_too_large_offset = {
'limit': '100',
'offset': '100',
'from': SAS,
'to': DP,
}
scenarios = [
(sas_to_dp_end_date_only, operator.eq, 0),
(sas_to_dp_begin_date_only, operator.gt, 3),
(sas_to_dp_end_date_too_early, operator.eq, 0),
(dp_to_sas, operator.gt, 0),
(dp_to_sas_incorrect_serial_number, operator.eq, 0),
(sas_to_dp_with_limit, operator.gt, 3),
(sas_to_dp_with_limit_and_too_large_offset, operator.eq, 0),
]
self.given_cbsd_provisioned(builder)
with self.while_cbsd_is_active():
self._verify_logs_count(scenarios)
def given_cbsd_provisioned(self, builder: CbsdAPIDataBuilder) -> int:
self.when_cbsd_is_created(builder.build_post_data())
cbsd = self.when_cbsd_is_fetched(builder.serial_number)
self.then_cbsd_is(cbsd, builder.build_unregistered_data())
state = self.when_cbsd_asks_for_state()
self.then_state_is(state, get_empty_state())
self.then_state_is_eventually(builder.build_grant_state_data())
cbsd = self.when_cbsd_is_fetched(builder.serial_number)
self.then_cbsd_is(cbsd, builder.build_registered_active_data())
return cbsd['id']
def when_cbsd_is_created(self, data: Dict[str, Any], expected_status: int = HTTPStatus.CREATED):
r = send_request_to_backend('post', 'cbsds', json=data)
self.assertEqual(r.status_code, expected_status)
def when_cbsd_is_fetched(self, serial_number: str = None) -> Dict[str, Any]:
return self._check_for_cbsd(serial_number=serial_number)
def when_logs_are_fetched(self, params: Dict[str, Any]) -> Dict[str, Any]:
r = send_request_to_backend('get', 'logs', params=params)
self.assertEqual(r.status_code, HTTPStatus.OK)
data = r.json()
return data
def when_cbsd_is_deleted(self, cbsd_id: int):
r = send_request_to_backend('delete', f'cbsds/{cbsd_id}')
self.assertEqual(r.status_code, HTTPStatus.NO_CONTENT)
def when_cbsd_is_updated(self, cbsd_id: int, data: Dict[str, Any], expected_status: int = HTTPStatus.NO_CONTENT):
r = send_request_to_backend('put', f'cbsds/{cbsd_id}', json=data)
self.assertEqual(r.status_code, expected_status)
def when_cbsd_is_deregistered(self, cbsd_id: int):
r = send_request_to_backend('post', f'cbsds/{cbsd_id}/deregister')
self.assertEqual(r.status_code, HTTPStatus.NO_CONTENT)
def when_cbsd_asks_for_state(self) -> CBSDStateResult:
return self.dp_client.GetCBSDState(get_cbsd_request(self.serial_number))
@staticmethod
def when_cbsd_is_inactive():
inactivity = 3
polling = 1
delta = 3
total_wait_time = inactivity + 2 * polling + delta
sleep(total_wait_time)
@contextmanager
def while_cbsd_is_active(self):
done = Event()
def keep_asking_for_state():
while not done.wait(timeout=1):
self.when_cbsd_asks_for_state()
t = Thread(target=keep_asking_for_state)
try:
t.start()
yield
finally:
done.set()
t.join()
def then_cbsd_is(self, actual: Dict[str, Any], expected: Dict[str, Any]):
actual = actual.copy()
del actual['id']
grant = actual.get('grant')
if grant:
del grant['grant_expire_time']
del grant['transmit_expire_time']
self.assertEqual(actual, expected)
def then_cbsd_is_deleted(self, serial_number: str):
self._check_for_cbsd(serial_number=serial_number, should_exist=False)
def then_state_is(self, actual: CBSDStateResult, expected: CBSDStateResult):
self.assertEqual(actual, expected)
@retry(stop_max_attempt_number=30, wait_fixed=1000)
def then_state_is_eventually(self, expected):
actual = self.when_cbsd_asks_for_state()
self.then_state_is(actual, expected)
@retry(stop_max_attempt_number=30, wait_fixed=1000)
def then_logs_are(self, filters: Dict[str, Any], expected: List[str]):
actual = self.when_logs_are_fetched(filters)
actual = [x['type'] for x in actual['logs']]
self.assertEqual(actual, expected)
@retry(stop_max_attempt_number=60, wait_fixed=1000)
def then_message_is_eventually_sent(self, filters: Dict[str, Any]):
logs = self.when_logs_are_fetched(filters)
self.assertEqual(logs["total_count"], 1)
def delete_cbsd(self, cbsd_id: int):
filters = get_filters_for_request_type('deregistration', self.serial_number)
self.when_cbsd_is_deleted(cbsd_id)
self.then_cbsd_is_deleted(self.serial_number)
state = self.when_cbsd_asks_for_state()
self.then_state_is(state, get_empty_state())
self.then_message_is_eventually_sent(filters)
@staticmethod
def get_sas_provision_messages() -> List[str]:
names = ['heartbeat', 'grant', 'spectrumInquiry', 'registration']
return [f'{x}Response' for x in names]
@retry(stop_max_attempt_number=30, wait_fixed=1000)
def _verify_logs_count(self, scenarios):
for params in scenarios:
using_filters, _operator, expected_count = params
logs = self.when_logs_are_fetched(using_filters)
logs_len = len(logs["logs"])
comparison = _operator(logs_len, expected_count)
self.assertTrue(comparison)
def _check_for_cbsd(self, serial_number: str, should_exist: bool = True) -> Optional[Dict[str, Any]]:
params = {'serial_number': serial_number}
expected_count = 1 if should_exist else 0
r = send_request_to_backend('get', 'cbsds', params=params)
self.assertEqual(r.status_code, HTTPStatus.OK)
data = r.json()
total_count = data.get('total_count')
self.assertEqual(total_count, expected_count)
cbsds = data.get('cbsds', [])
self.assertEqual(len(cbsds), expected_count)
if should_exist:
return cbsds[0]
def get_current_sas_filters(serial_number: str) -> Dict[str, Any]:
return {
'serial_number': serial_number,
'from': SAS,
'to': DP,
'end': now(),
}
def get_filters_for_request_type(request_type: str, serial_number: str) -> Dict[str, Any]:
return {
'serial_number': serial_number,
'type': f'{request_type}Response',
'begin': now(),
}
def get_empty_state() -> CBSDStateResult:
return CBSDStateResult(radio_enabled=False)
def get_cbsd_request(serial_number: str) -> CBSDRequest:
return CBSDRequest(serial_number=serial_number)
def now() -> str:
return datetime.now(timezone.utc).isoformat()
def send_request_to_backend(
method: str, url_suffix: str, params: Optional[Dict[str, Any]] = None,
json: Optional[Dict[str, Any]] = None,
) -> requests.Response:
return requests.request(
method,
f'{config.HTTP_SERVER}/{DP_HTTP_PREFIX}/{NETWORK}/{url_suffix}',
cert=(config.DP_CERT_PATH, config.DP_SSL_KEY_PATH),
verify=False, # noqa: S501
params=params,
json=json,
)
class CbsdAPIDataBuilder:
def __init__(self):
self.serial_number = str(uuid4())
self.fcc_id = SOME_FCC_ID
self.preferred_bandwidth_mhz = 20
self.preferred_frequencies_mhz = []
self.frequency_mhz = 3625
self.bandwidth_mhz = 10
self.max_eirp = 28
self.desired_state = 'registered'
def with_serial_number(self, serial_number: str) -> CbsdAPIDataBuilder:
self.serial_number = serial_number
return self
def with_fcc_id(self, fcc_id: str) -> CbsdAPIDataBuilder:
self.fcc_id = fcc_id
return self
def with_frequency_preferences(self, bandwidth_mhz: int, frequencies_mhz: List[int]) -> CbsdAPIDataBuilder:
self.preferred_bandwidth_mhz = bandwidth_mhz
self.preferred_frequencies_mhz = frequencies_mhz
return self
def with_desired_state(self, desired_state: str) -> CbsdAPIDataBuilder:
self.desired_state = desired_state
return self
def with_expected_grant(self, bandwidth_mhz: int, frequency_mhz: int, max_eirp: int) -> CbsdAPIDataBuilder:
self.bandwidth_mhz = bandwidth_mhz
self.frequency_mhz = frequency_mhz
self.max_eirp = max_eirp
return self
def build_post_data(self) -> Dict[str, Any]:
return {
'capabilities': {
'antenna_gain': 15,
'max_power': 20,
'min_power': 0,
'number_of_antennas': 2,
},
'frequency_preferences': {
'bandwidth_mhz': self.preferred_bandwidth_mhz,
'frequencies_mhz': self.preferred_frequencies_mhz,
},
'fcc_id': self.fcc_id,
'serial_number': self.serial_number,
'user_id': USER_ID,
'desired_state': self.desired_state,
"single_step_enabled": False,
"cbsd_category": "b",
}
def build_unregistered_single_step_data(self):
data = self.build_unregistered_data()
data.update({
'single_step_enabled': True,
'cbsd_category': 'a',
})
return data
def build_unregistered_data(self) -> Dict[str, Any]:
data = self.build_post_data()
data.update({
'is_active': False,
'state': 'unregistered',
})
return data
def build_registered_inactive_data(self) -> Dict[str, Any]:
data = self.build_post_data()
data.update({
'cbsd_id': f'{self.fcc_id}/{self.serial_number}',
'is_active': False,
'state': 'registered',
})
return data
def build_registered_active_data(self) -> Dict[str, Any]:
data = self.build_registered_inactive_data()
data.update({
'is_active': True,
'grant': {
'bandwidth_mhz': self.bandwidth_mhz,
'frequency_mhz': self.frequency_mhz,
'max_eirp': self.max_eirp,
'state': 'authorized',
},
})
return data
def build_grant_state_data(self) -> CBSDStateResult:
frequency_hz = int(1e6) * self.frequency_mhz
half_bandwidth_hz = int(5e5) * self.bandwidth_mhz
return CBSDStateResult(
radio_enabled=True,
channel=LteChannel(
low_frequency_hz=frequency_hz - half_bandwidth_hz,
high_frequency_hz=frequency_hz + half_bandwidth_hz,
max_eirp_dbm_mhz=self.max_eirp,
),
)
|
UPS_Main3.py
|
# Universal Power System Controller
# USAID Middle East Water Security Initiative
#
# Developed by: Nathan Webster
# Primary Investigator: Nathan Johnson
#
# Version History (mm_dd_yyyy)
# 1.00 07_13_2018_NW
#
######################################################
# Import Libraries
from threading import Thread
from PWM_Controller import *
from Protection_Controller import *
from VFD_Controller import *
from SCIP_Controller import *
from SQL_Database_Controller import *
from Archive_Controller import *
from Initialization import *
from VFD_Modbus_Wrapper import *
from VFD_Modbus_Registers import *
import Parameters
# Declare Variables
# Run initialization to setup VFD and converter controls
Run_Initialization()
# UPS Control Threads
#PWM_Thread = Thread(target=PWM_Controller_Main, args=("",))
#Protection_Thread = Thread(target=Protection_Controller_Main, args=("",))
#VFD_Thread = Thread(target=VFD_Controller_Main, args=("",))
#SCIP_Thread = Thread(target=SCIP_Controller_Main, args=("",))
#SQL_Thread = Thread(target=SQL_Database_Controller_Main, args=("",))
#Archive_Thread = Thread(target=Archive_Controller_Main,args=("",))
#PWM_Thread.start()
#Protection_Thread.start()
#VFD_Thread.start()
#SCIP_Thread.start()
#SQL_Thread.start()
#Archive_Thread.start()
VFD.VFDWrite(reg.get("WriteFunc", {}).get("Motor_Start_Stop"), 1)
time.sleep(5)
VFD.VFDWrite(reg.get("WriteFunc", {}).get("Frequency_Set"), int(10*100))
time.sleep(5)
VFD.VFDWrite(reg.get("WriteFunc", {}).get("Frequency_Set"), int(20*100))
time.sleep(5)
VFD.VFDWrite(reg.get("WriteFunc", {}).get("Frequency_Set"), int(30*100))
time.sleep(5)
VFD.VFDWrite(reg.get("WriteFunc", {}).get("Frequency_Set"), int(35*100))
VFD.VFDWrite(reg.get("WriteFunc", {}).get("Motor_Start_Stop"), 3)
|
api.py
|
# a few necessary imports
import json
from flask import Flask, make_response
from tendo import singleton
from multiprocessing import Process
import random
# first of all we create the Flask object
api_app = Flask(__name__)
# now we create an API entry point to create the list
@api_app.route('/output', methods=['GET'])
def share_output():
output1 = {"count": "16","text": "There are many variations of passages of Lorem Ipsum available, but the majority have suffered alteration in some form, by injected humour, or randomised words which don't look even slightly believable. If you are going to use a passage of Lorem Ipsum, you need to be sure there isn't anything embarrassing hidden in the middle of text."}
output2 = {"count": "51","text": "All the Lorem Ipsum generators on the Internet tend to repeat predefined chunks as necessary, making this the first true generator on the Internet."}
output3 = {"count": "15","text": "It is a long established fact that a reader will be distracted by the readable content of a page when looking at its layout."}
output4 = {"count": "22","text": "Contrary to popular belief, Lorem Ipsum is not simply random text. It has roots in a piece of classical Latin literature from 45 BC, making it over 2000 years old."}
output5 = {"count": "61","text": "If you are going to use a passage of Lorem Ipsum, you need to be sure there isn't anything embarrassing hidden in the middle of text."}
output6 = {"count": "35","text": "It uses a dictionary of over 200 Latin words, combined with a handful of model sentence structures, to generate Lorem Ipsum which looks reasonable. The generated Lorem Ipsum is therefore always free from repetition, injected humour, or non-characteristic words etc."}
output7 = {"count": "56","text": "This book is a treatise on the theory of ethics, very popular during the Renaissance. The first line of Lorem Ipsum, Lorem ipsum dolor sit amet.., comes from a line in section 1.10.32."}
output8 = {"count": "43","text": "Lorem Ipsum is simply dummy text of the printing and typesetting industry. Lorem Ipsum has been the industry's standard dummy text ever since the 1500s, when an unknown printer took a galley of type and scrambled it to make a type specimen book."}
output9 = {"count": "26","text": "It has survived not only five centuries, but also the leap into electronic typesetting, remaining essentially unchanged."}
output10 = {"count": "76","text": "The standard chunk of Lorem Ipsum used since the 1500s is reproduced below for those interested. Sections 1.10.32 and 1.10.33 from de Finibus Bonorum et Malorum by Cicero are also reproduced in their exact original form"}
outputs = [output1,output2,output3,output4,output5,output6,output7,output8,output9,output10]
result = random.sample(outputs, 1)
data = json.dumps(result)
return data
def run():
# first we check if there are other instances running
myself = singleton.SingleInstance()
# now we populate the local copy of galaxy upon start
populate_process = Process(target=share_output)
populate_process.start()
# finally we run the api service as a daemon
api_app.run(host="0.0.0.0", port=80, threaded=True)
if __name__ == '__main__':
run()
|
leaser.py
|
# Copyright 2017, Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import collections
import copy
import logging
import random
import threading
import time
import six
from google.cloud.pubsub_v1.subscriber._protocol import requests
_LOGGER = logging.getLogger(__name__)
_LEASE_WORKER_NAME = 'Thread-LeaseMaintainer'
_LeasedMessage = collections.namedtuple(
'_LeasedMessage',
['added_time', 'size'])
class Leaser(object):
def __init__(self, subscriber):
self._thread = None
self._subscriber = subscriber
self._leased_messages = {}
"""dict[str, float]: A mapping of ack IDs to the local time when the
ack ID was initially leased in seconds since the epoch."""
self._bytes = 0
"""int: The total number of bytes consumed by leased messages."""
self._stop_event = threading.Event()
@property
def message_count(self):
"""int: The number of leased messages."""
return len(self._leased_messages)
@property
def ack_ids(self):
"""Sequence[str]: The ack IDs of all leased messages."""
return self._leased_messages.keys()
@property
def bytes(self):
"""int: The total size, in bytes, of all leased messages."""
return self._bytes
def add(self, items):
"""Add messages to be managed by the leaser."""
for item in items:
# Add the ack ID to the set of managed ack IDs, and increment
# the size counter.
if item.ack_id not in self._leased_messages:
self._leased_messages[item.ack_id] = _LeasedMessage(
added_time=time.time(),
size=item.byte_size)
self._bytes += item.byte_size
else:
_LOGGER.debug(
'Message %s is already lease managed', item.ack_id)
def remove(self, items):
"""Remove messages from lease management."""
# Remove the ack ID from lease management, and decrement the
# byte counter.
for item in items:
if self._leased_messages.pop(item.ack_id, None) is not None:
self._bytes -= item.byte_size
else:
_LOGGER.debug('Item %s was not managed.', item.ack_id)
if self._bytes < 0:
_LOGGER.debug(
'Bytes was unexpectedly negative: %d', self._bytes)
self._bytes = 0
def maintain_leases(self):
"""Maintain all of the leases being managed by the subscriber.
This method modifies the ack deadline for all of the managed
ack IDs, then waits for most of that time (but with jitter), and
repeats.
"""
while self._subscriber.is_active and not self._stop_event.is_set():
# Determine the appropriate duration for the lease. This is
# based off of how long previous messages have taken to ack, with
# a sensible default and within the ranges allowed by Pub/Sub.
p99 = self._subscriber.ack_histogram.percentile(99)
_LOGGER.debug('The current p99 value is %d seconds.', p99)
# Make a copy of the leased messages. This is needed because it's
# possible for another thread to modify the dictionary while
# we're iterating over it.
leased_messages = copy.copy(self._leased_messages)
# Drop any leases that are well beyond max lease time. This
# ensures that in the event of a badly behaving actor, we can
# drop messages and allow Pub/Sub to resend them.
cutoff = (
time.time() -
self._subscriber.flow_control.max_lease_duration)
to_drop = [
requests.DropRequest(ack_id, item.size)
for ack_id, item
in six.iteritems(leased_messages)
if item.added_time < cutoff]
if to_drop:
_LOGGER.warning(
'Dropping %s items because they were leased too long.',
len(to_drop))
self._subscriber.drop(to_drop)
# Remove dropped items from our copy of the leased messages (they
# have already been removed from the real one by
# self._subscriber.drop(), which calls self.remove()).
for item in to_drop:
leased_messages.pop(item.ack_id)
# Create a streaming pull request.
# We do not actually call `modify_ack_deadline` over and over
# because it is more efficient to make a single request.
ack_ids = leased_messages.keys()
if ack_ids:
_LOGGER.debug('Renewing lease for %d ack IDs.', len(ack_ids))
# NOTE: This may not work as expected if ``consumer.active``
# has changed since we checked it. An implementation
# without any sort of race condition would require a
# way for ``send_request`` to fail when the consumer
# is inactive.
self._subscriber.modify_ack_deadline([
requests.ModAckRequest(ack_id, p99) for ack_id in ack_ids])
# Now wait an appropriate period of time and do this again.
#
# We determine the appropriate period of time based on a random
# period between 0 seconds and 90% of the lease. This use of
# jitter (http://bit.ly/2s2ekL7) helps decrease contention in cases
# where there are many clients.
snooze = random.uniform(0.0, p99 * 0.9)
_LOGGER.debug('Snoozing lease management for %f seconds.', snooze)
time.sleep(snooze)
_LOGGER.info('%s exiting.', _LEASE_WORKER_NAME)
def start(self):
if self._thread is not None:
raise ValueError('Leaser is already running.')
# Create and start the helper thread.
self._stop_event.clear()
thread = threading.Thread(
name=_LEASE_WORKER_NAME,
target=self.maintain_leases)
thread.daemon = True
thread.start()
_LOGGER.debug('Started helper thread %s', thread.name)
self._thread = thread
def stop(self):
self._stop_event.set()
if self._thread is not None:
# The thread should automatically exit when the consumer is
# inactive.
self._thread.join()
self._thread = None
|
process.py
|
import signal
from threading import Thread
from subprocess import Popen, PIPE, STDOUT
import logging
from lib.helper import *
class Process:
def fpipe(self):
try:
for line in self.proc.stdout:
self.logger.info(line.decode("utf-8").rstrip())
self.logfile.write(line)
except ValueError:
pass
def __init__(self, name, log, proc_args, stdout_print=False, proc_kwargs={}, must_kill=False):
self.name = name
self.log = log
self.proc_args = proc_args
self.proc_kwargs = proc_kwargs
self.must_kill = must_kill
self.logfile = None
self.proc = None
self.tpipe = None
self.logger = create_logger(
self.name,
file=False,
level=logging.INFO if stdout_print else logging.ERROR,
print_level=False,
)
def __enter__(self):
self.logfile = open(self.log, "wb")
self.proc = Popen(
self.proc_args,
**self.proc_kwargs,
stdout=PIPE,
stderr=STDOUT,
)
self.tpipe = Thread(target=self.fpipe)
self.tpipe.start()
def kill(self):
self.proc.kill()
def __exit__(self, *args):
if self.must_kill:
self.kill()
else:
self.proc.terminate()
try:
self.proc.wait(timeout=1.0)
except TimeoutExpired:
self.kill()
self.logfile.close()
#self.tpipe.join()
|
train_faster_rcnn_alt_opt.py
|
#!/usr/bin/env python
# --------------------------------------------------------
# Faster R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""Train a Faster R-CNN network using alternating optimization.
This tool implements the alternating optimization algorithm described in our
NIPS 2015 paper ("Faster R-CNN: Towards Real-time Object Detection with Region
Proposal Networks." Shaoqing Ren, Kaiming He, Ross Girshick, Jian Sun.)
"""
import _init_paths
from fast_rcnn.train import get_training_roidb, train_net
from fast_rcnn.config import cfg, cfg_from_file, cfg_from_list, get_output_dir
from datasets.factory import get_imdb
from rpn.generate import imdb_proposals
import argparse
import pprint
import numpy as np
import sys, os
import multiprocessing as mp
import cPickle
import shutil
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='Train a Faster R-CNN network')
parser.add_argument('--gpu', dest='gpu_id',
help='GPU device id to use [0]',
default=0, type=int)
parser.add_argument('--net_name', dest='net_name',
help='network name (e.g., "ZF")',
default=None, type=str)
parser.add_argument('--weights', dest='pretrained_model',
help='initialize with pretrained model weights',
default=None, type=str)
parser.add_argument('--cfg', dest='cfg_file',
help='optional config file',
default=None, type=str)
parser.add_argument('--imdb', dest='imdb_name',
help='dataset to train on',
default='voc_2007_trainval', type=str)
parser.add_argument('--set', dest='set_cfgs',
help='set config keys', default=None,
nargs=argparse.REMAINDER)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
return args
def get_roidb(imdb_name, rpn_file=None):
imdb = get_imdb(imdb_name)
print 'Loaded dataset `{:s}` for training'.format(imdb.name)
imdb.set_proposal_method(cfg.TRAIN.PROPOSAL_METHOD)
print 'Set proposal method: {:s}'.format(cfg.TRAIN.PROPOSAL_METHOD)
if rpn_file is not None:
imdb.config['rpn_file'] = rpn_file
roidb = get_training_roidb(imdb)
return roidb, imdb
def get_solvers(net_name):
# Faster R-CNN Alternating Optimization
n = 'faster_rcnn_alt_opt'
# Solver for each training stage
solvers = [[net_name, n, 'stage1_rpn_solver60k80k.pt'],
[net_name, n, 'stage1_fast_rcnn_solver30k40k.pt'],
[net_name, n, 'stage2_rpn_solver60k80k.pt'],
[net_name, n, 'stage2_fast_rcnn_solver30k40k.pt']]
solvers = [os.path.join(cfg.MODELS_DIR, *s) for s in solvers]
# Iterations for each training stage
max_iters = [180000, 90000, 180000, 90000]
# max_iters = [100, 100, 100, 100]
# Test prototxt for the RPN
rpn_test_prototxt = os.path.join(
cfg.MODELS_DIR, net_name, n, 'rpn_test.pt')
return solvers, max_iters, rpn_test_prototxt
# ------------------------------------------------------------------------------
# Pycaffe doesn't reliably free GPU memory when instantiated nets are discarded
# (e.g. "del net" in Python code). To work around this issue, each training
# stage is executed in a separate process using multiprocessing.Process.
# ------------------------------------------------------------------------------
def _init_caffe(cfg):
"""Initialize pycaffe in a training process.
"""
import caffe
# fix the random seeds (numpy and caffe) for reproducibility
np.random.seed(cfg.RNG_SEED)
caffe.set_random_seed(cfg.RNG_SEED)
# set up caffe
caffe.set_mode_gpu()
caffe.set_device(cfg.GPU_ID)
def train_rpn(queue=None, imdb_name=None, init_model=None, solver=None,
max_iters=None, cfg=None):
"""Train a Region Proposal Network in a separate training process.
"""
# Not using any proposals, just ground-truth boxes
cfg.TRAIN.HAS_RPN = True
cfg.TRAIN.BBOX_REG = False # applies only to Fast R-CNN bbox regression
cfg.TRAIN.PROPOSAL_METHOD = 'gt'
cfg.TRAIN.IMS_PER_BATCH = 1
print 'Init model: {}'.format(init_model)
print('Using config:')
pprint.pprint(cfg)
import caffe
_init_caffe(cfg)
roidb, imdb = get_roidb(imdb_name)
print 'roidb len: {}'.format(len(roidb))
output_dir = get_output_dir(imdb)
print 'Output will be saved to `{:s}`'.format(output_dir)
model_paths = train_net(solver, roidb, output_dir,
pretrained_model=init_model,
max_iters=max_iters)
# Cleanup all but the final model
for i in model_paths[:-1]:
os.remove(i)
rpn_model_path = model_paths[-1]
# Send final model path through the multiprocessing queue
queue.put({'model_path': rpn_model_path})
def rpn_generate(queue=None, imdb_name=None, rpn_model_path=None, cfg=None,
rpn_test_prototxt=None):
"""Use a trained RPN to generate proposals.
"""
cfg.TEST.RPN_PRE_NMS_TOP_N = -1 # no pre NMS filtering
cfg.TEST.RPN_POST_NMS_TOP_N = 2000 # limit top boxes after NMS
print 'RPN model: {}'.format(rpn_model_path)
print('Using config:')
pprint.pprint(cfg)
import caffe
_init_caffe(cfg)
# NOTE: the matlab implementation computes proposals on flipped images, too.
# We compute them on the image once and then flip the already computed
# proposals. This might cause a minor loss in mAP (less proposal jittering).
imdb = get_imdb(imdb_name)
print 'Loaded dataset `{:s}` for proposal generation'.format(imdb.name)
# Load RPN and configure output directory
rpn_net = caffe.Net(rpn_test_prototxt, rpn_model_path, caffe.TEST)
output_dir = get_output_dir(imdb)
print 'Output will be saved to `{:s}`'.format(output_dir)
# Generate proposals on the imdb
rpn_proposals = imdb_proposals(rpn_net, imdb)
# Write proposals to disk and send the proposal file path through the
# multiprocessing queue
rpn_net_name = os.path.splitext(os.path.basename(rpn_model_path))[0]
rpn_proposals_path = os.path.join(
output_dir, rpn_net_name + '_proposals.pkl')
with open(rpn_proposals_path, 'wb') as f:
cPickle.dump(rpn_proposals, f, cPickle.HIGHEST_PROTOCOL)
print 'Wrote RPN proposals to {}'.format(rpn_proposals_path)
queue.put({'proposal_path': rpn_proposals_path})
def train_fast_rcnn(queue=None, imdb_name=None, init_model=None, solver=None,
max_iters=None, cfg=None, rpn_file=None):
"""Train a Fast R-CNN using proposals generated by an RPN.
"""
cfg.TRAIN.HAS_RPN = False # not generating prosals on-the-fly
cfg.TRAIN.PROPOSAL_METHOD = 'rpn' # use pre-computed RPN proposals instead
cfg.TRAIN.IMS_PER_BATCH = 2
print 'Init model: {}'.format(init_model)
print 'RPN proposals: {}'.format(rpn_file)
print('Using config:')
pprint.pprint(cfg)
import caffe
_init_caffe(cfg)
roidb, imdb = get_roidb(imdb_name, rpn_file=rpn_file)
output_dir = get_output_dir(imdb)
print 'Output will be saved to `{:s}`'.format(output_dir)
# Train Fast R-CNN
model_paths = train_net(solver, roidb, output_dir,
pretrained_model=init_model,
max_iters=max_iters)
# Cleanup all but the final model
for i in model_paths[:-1]:
os.remove(i)
fast_rcnn_model_path = model_paths[-1]
# Send Fast R-CNN model path over the multiprocessing queue
queue.put({'model_path': fast_rcnn_model_path})
if __name__ == '__main__':
args = parse_args()
print('Called with args:')
print(args)
if args.cfg_file is not None:
cfg_from_file(args.cfg_file)
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs)
cfg.GPU_ID = args.gpu_id
# --------------------------------------------------------------------------
# Pycaffe doesn't reliably free GPU memory when instantiated nets are
# discarded (e.g. "del net" in Python code). To work around this issue, each
# training stage is executed in a separate process using
# multiprocessing.Process.
# --------------------------------------------------------------------------
# queue for communicated results between processes
mp_queue = mp.Queue()
# solves, iters, etc. for each training stage
solvers, max_iters, rpn_test_prototxt = get_solvers(args.net_name)
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
print 'Stage 1 RPN, init from ImageNet model'
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
cfg.TRAIN.SNAPSHOT_INFIX = 'stage1'
mp_kwargs = dict(
queue=mp_queue,
imdb_name=args.imdb_name,
init_model=args.pretrained_model,
solver=solvers[0],
max_iters=max_iters[0],
cfg=cfg)
p = mp.Process(target=train_rpn, kwargs=mp_kwargs)
p.start()
rpn_stage1_out = mp_queue.get()
p.join()
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
print 'Stage 1 RPN, generate proposals'
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
mp_kwargs = dict(
queue=mp_queue,
imdb_name=args.imdb_name,
rpn_model_path=str(rpn_stage1_out['model_path']),
cfg=cfg,
rpn_test_prototxt=rpn_test_prototxt)
p = mp.Process(target=rpn_generate, kwargs=mp_kwargs)
p.start()
rpn_stage1_out['proposal_path'] = mp_queue.get()['proposal_path']
p.join()
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
print 'Stage 1 Fast R-CNN using RPN proposals, init from ImageNet model'
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
cfg.TRAIN.SNAPSHOT_INFIX = 'stage1'
mp_kwargs = dict(
queue=mp_queue,
imdb_name=args.imdb_name,
init_model=args.pretrained_model,
solver=solvers[1],
max_iters=max_iters[1],
cfg=cfg,
rpn_file=rpn_stage1_out['proposal_path'])
p = mp.Process(target=train_fast_rcnn, kwargs=mp_kwargs)
p.start()
fast_rcnn_stage1_out = mp_queue.get()
p.join()
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
print 'Stage 2 RPN, init from stage 1 Fast R-CNN model'
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
cfg.TRAIN.SNAPSHOT_INFIX = 'stage2'
mp_kwargs = dict(
queue=mp_queue,
imdb_name=args.imdb_name,
init_model=str(fast_rcnn_stage1_out['model_path']),
solver=solvers[2],
max_iters=max_iters[2],
cfg=cfg)
p = mp.Process(target=train_rpn, kwargs=mp_kwargs)
p.start()
rpn_stage2_out = mp_queue.get()
p.join()
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
print 'Stage 2 RPN, generate proposals'
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
mp_kwargs = dict(
queue=mp_queue,
imdb_name=args.imdb_name,
rpn_model_path=str(rpn_stage2_out['model_path']),
cfg=cfg,
rpn_test_prototxt=rpn_test_prototxt)
p = mp.Process(target=rpn_generate, kwargs=mp_kwargs)
p.start()
rpn_stage2_out['proposal_path'] = mp_queue.get()['proposal_path']
p.join()
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
print 'Stage 2 Fast R-CNN, init from stage 2 RPN R-CNN model'
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
cfg.TRAIN.SNAPSHOT_INFIX = 'stage2'
mp_kwargs = dict(
queue=mp_queue,
imdb_name=args.imdb_name,
init_model=str(rpn_stage2_out['model_path']),
solver=solvers[3],
max_iters=max_iters[3],
cfg=cfg,
rpn_file=rpn_stage2_out['proposal_path'])
p = mp.Process(target=train_fast_rcnn, kwargs=mp_kwargs)
p.start()
fast_rcnn_stage2_out = mp_queue.get()
p.join()
# Create final model (just a copy of the last stage)
final_path = os.path.join(
os.path.dirname(fast_rcnn_stage2_out['model_path']),
args.net_name + '_faster_rcnn_final.caffemodel')
print 'cp {} -> {}'.format(
fast_rcnn_stage2_out['model_path'], final_path)
shutil.copy(fast_rcnn_stage2_out['model_path'], final_path)
print 'Final model: {}'.format(final_path)
|
ADS1299_API.py
|
"""
# file: ADS1299_API.py
# author: Frederic Simard ([email protected])
# version: Fall 2017
# descr: This files implements the basic features required to operate the ADS1299 using the SPI port
of a Raspberry Pi (tested on RPi 3, Raspbian Lite Jessie).
The API handles the communication over the SPI port and uses a separate thread - managed by GPIO -
to process samples sent by the ADS1299. Samples received are pushed to a registered callback in
the form of a numpy Array with a length equal to the number of channels (think of observer pattern).
A default Callback that prints out values on screen is provided in this file and registered in the test script.
A stubbed mode is also available to develop with the API offline, in that mode random numbers are
returned at a rate close to the defined sampling rate. Stubbed mode becomes active whenever spidev
cannot be imported properly.
Public methods overview:
Basic operations:
- init, initialise the API
- openDevice, open SPI, power-up/reset sequence the ADS1299 and push default configuration
- closeDevice, close SPI, power down ADS1299
Configuration:
- configure, is the public interface to change system configuration. It uses optional parameters
- nb_channels, sets the number of channels {1,8}, default 8
- sampling_rate, sets the sampling rate {250,500,1000,2000*}, default 500
- bias_enabled, used to enable/disable Bias drive {True,False}, default True
Note: changing any option will interrupt any active stream
Note: 2000Hz sampling rate is unstable, it requires the 24 bits conversion to be done in a different thread
Note: gain is set to 24 and is not configurable, should you add this functionnality, make sure to
- registerClient, add a callback to use for data
Control:
- startEegStreaming, starts streaming of eeg data using active configuration
- startTestStream, starts streaming of test data (generated by ADS1299)
- stopStreaming, stop any on-going stream
- reset ADS1299, toggle reset pin on ADS1299
Hardware configuration:
The Raspberry Pi 3 is used as a reference
Signal | RPi Pin | ADS Pin
--------------------------------
MOSI | 19 | DIN
MISO | 21 | DOUT
SCLK | 23 | SCLK
CS | 24 | CS
--------------------------------
START | 15 | START
RESET | 16 | nRESET
PWRDN | 18 | nPWRDN
DRDY | 22 | DRDY
The pins for the SPI port cannot be changed. CS can be flipped, if using /dev/spidev0.1 instead.
The GPIOS can be reaffected.
Requirements and setup:
- numpy: https://scipy.org/install.html
- spidev: https://pypi.python.org/pypi/spidev
- how to configure SPI on raspberry Pi: https://www.raspberrypi.org/documentation/hardware/raspberrypi/spi/README.md
Note: I had to $sudo chmod 777 /dev/spide0.0 and reboot the raspberry pi to get access to the SPI device
"""
import struct
from threading import Semaphore, Lock, Thread
from time import time, sleep
import random
import platform
import sys
import numpy as np
STUB_SPI = False
try:
import spidev
except ImportError:
STUB_SPI = True
pass
STUB_GPIO = False
try:
import RPi.GPIO as GPIO
except:
STUB_GPIO = True
# eeg data scaling function
# adjusted from (5/Gain)/2^24, where gain is 24
# note: datasheet says 4.5 instead of 5, but this value was determined experimentally
SCALE_TO_UVOLT = 0.0000000121
"""
# conv24bitsToFloat(unpacked)
# @brief utility function that converts signed 24 bits integer to scaled floating point
# the 24 bits representation needs to be provided as a 3 bytes array MSB first
# @param unpacked (bytes array) 24 bits data point
# @return data scaled to uVolt
# @thanks: https://github.com/OpenBCI/OpenBCI_Python/blob/master/open_bci_ganglion.py
"""
def conv24bitsToFloat(unpacked):
""" Convert 24bit data coded on 3 bytes to a proper integer """
if len(unpacked) != 3:
raise ValueError("Input should be 3 bytes long.")
# FIXME: quick'n dirty, unpack wants strings later on
literal_read = struct.pack('3B', unpacked[0], unpacked[1], unpacked[2])
# 3byte int in 2s compliment
if (unpacked[0] > 127):
pre_fix = bytes(bytearray.fromhex('FF'))
else:
pre_fix = bytes(bytearray.fromhex('00'))
literal_read = pre_fix + literal_read;
# unpack little endian(>) signed integer(i) (makes unpacking platform independent)
myInt = struct.unpack('>i', literal_read)[0]
# convert to uVolt
return myInt * SCALE_TO_UVOLT
"""
DefaultCallback
@brief used as default client callback for tests
@data byte array of 1xN, where N is the number of channels
"""
def DefaultCallback(data):
pass
#print repr(data)
""" ADS1299 PINS """
START_PIN = 22
nRESET_PIN = 23
nPWRDN_PIN = 24
DRDY_PIN = 25
""" ADS1299 registers map """
REG_CONFIG1 = 0x01
REG_CONFIG2 = 0x02
REG_CONFIG3 = 0x03
REG_CHnSET_BASE = 0x05
REG_MISC = 0x15
REG_BIAS_SENSP = 0x0D
REG_BIAS_SENSN = 0x0E
""" ADS1299 Commands """
RDATAC = 0x10
SDATAC = 0x11
MAX_NB_CHANNELS = 8
"""
# ADS1299_API
# @brief Encapsulated API, provides basic functionnalities
# to configure and control a ADS1299 connected to the SPI port
"""
class ADS1299_API(object):
# spi port
spi = None
# thread processing inputs
stubThread = None
APIAlive = True
# lock over SPI port
spi_lock = None
# array of client handles
clientUpdateHandles = []
# device configuration
nb_channels = 8 # {1-8}
sampling_rate = 500 # {250,500,1000,2000,4000}
bias_enabled = False # {True, False}
# True when a data stream is active
stream_active = False
""" PUBLIC
# Constructor
# @brief
"""
def __init__(self):
if STUB_SPI == False:
self.spi = spidev.SpiDev()
""" PUBLIC
# openDevice
# @brief open the ADS1299 interface and initialize the chip
"""
def openDevice(self):
if STUB_SPI == False and STUB_GPIO == False:
# open and configure SPI port
self.spi.open(0, 0)
self.spi.max_speed_hz = 4000000
self.spi.mode = 0b01
# using BCM pin numbering scheme
GPIO.setmode(GPIO.BCM)
# setup control pins
GPIO.setup(START_PIN, GPIO.OUT, initial=GPIO.LOW)
GPIO.setup(nRESET_PIN, GPIO.OUT, initial=GPIO.LOW)
GPIO.setup(nPWRDN_PIN, GPIO.OUT, initial=GPIO.LOW)
# setup DRDY callback
GPIO.setup(DRDY_PIN, GPIO.IN)
GPIO.add_event_detect(DRDY_PIN, GPIO.FALLING, callback=self.drdy_callback)
else:
# setup fake data generator
print "stubbed mode"
APIAlive = True
self.stubThread = Thread(target=self.stubTask)
self.stubThread.start()
# spi port mutex
self.spi_lock = Lock()
# init the ADS1299
self.ADS1299StartupSequence()
return
""" PUBLIC
# closeDevice
# @brief close and clean up the SPI, GPIO and running thread
"""
def closeDevice(self):
if STUB_SPI == False and STUB_GPIO == False:
self.spi.close()
GPIO.cleanup()
self.APIAlive = False
return
""" PUBLIC
# startEegStream
# @brief Init an eeg data stream
"""
def startEegStream(self):
# stop any on-going stream
self.resetOngoingState()
# setup EEG mode
self.setupEEGMode()
self.stream_active = True
# start the stream
self.SPI_transmitByte(RDATAC)
""" PUBLIC
# startTestStream
# @brief Init a test data stream
"""
def startTestStream(self):
# stop any on-going stream
self.resetOngoingState()
# setup test mode
self.setupTestMode()
# start the stream
self.stream_active = True
self.SPI_transmitByte(RDATAC)
""" PUBLIC
# stopStream
# @brief shut down any active stream
"""
def stopStream(self):
# stop any on-going ads stream
self.SPI_transmitByte(SDATAC)
self.stream_active = False
""" PUBLIC
# registerClient
# @brief register a client handle to push data
# @param clientHandle, update handle of the client
"""
def registerClient(self, clientHandle):
self.clientUpdateHandles.append(clientHandle)
""" PUBLIC
# configure
# @brief provide the ADS1299 configuration interface, it uses optional parameters
# no parameter validation take place, make sure to provide valid value
# - nb_channels {1-8}
# - sampling_rate {250, 500, 1000, 2000, 4000}
# - bias_enabled {True, False}
"""
def configure(self, nb_channels=None, sampling_rate=None, bias_enabled=None):
self.stopStream()
if nb_channels is not None:
self.nb_channels = nb_channels
if sampling_rate is not None:
self.sampling_rate = sampling_rate
if sampling_rate is not None:
self.bias_enabled = bias_enabled
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# ADS1299 control
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
""" PRIVATE
# ADS1299StartupSequence
# @brief start-up sequence to init the chip
"""
def ADS1299StartupSequence(self):
# pwr and reset goes up
self.setnReset(True)
self.setnPWRDN(True)
# wait
sleep(1)
# toggle reset
self.toggleReset()
# send SDATAC
self.resetOngoingState()
self.setStart(True)
self.SPI_transmitByte(RDATAC)
""" PRIVATE
# setupEEGMode
# @brief setup EEG mode for data streaming
"""
def setupEEGMode(self):
# Write CHnSET 05h (connects test signal)
# (0) normal operation
# (110) PGA gain 24
# (0) SRB2 open
# (000) Normal operations
tx_buf = [0] * self.nb_channels
for i in xrange(0, self.nb_channels):
tx_buf[i] = 0x60;
self.SPI_writeMultipleReg(REG_CHnSET_BASE, tx_buf);
# set the MUX for SRB1 to be connected to all N pins
# MISC register (multiple single-ended electrodes)
self.SPI_writeSingleReg(REG_MISC, 0x20);
# setup bias
if self.bias_enabled:
self.setupBiasDrive()
""" PRIVATE
# setupTestMode
# @brief setup TEST mode for data streaming
"""
def setupTestMode(self):
# stop any on-going ads stream
self.SPI_transmitByte(SDATAC)
# Write CONFIG2 D0h
# (110) reserved
# (1) test signal generated internally
# (0) reserved
# (0) signal amplitude: 1 x -(VREFP - VREFN) / 2400
# (00) test signal pulsed at fCLK / 2^21
self.SPI_writeSingleReg(REG_CONFIG2, 0xD0)
# Write CHnSET 05h (connects test signal)
tx_buf = [0] * self.nb_channels
for i in xrange(0, self.nb_channels):
tx_buf[i] = 0x65
self.SPI_writeMultipleReg(REG_CHnSET_BASE, tx_buf)
""" PRIVATE
# resetOngoingState
# @brief reset the registers configuration
"""
def resetOngoingState(self):
# send SDATAC
self.SPI_transmitByte(SDATAC)
# setup CONFIG3 register
self.SPI_writeSingleReg(REG_CONFIG3, 0xE0)
# setup CONFIG1 register
self.setSamplingRate()
# setup CONFIG2 register
self.SPI_writeSingleReg(REG_CONFIG2, 0xC0)
# disable any bias
self.SPI_writeSingleReg(REG_BIAS_SENSP, 0x00)
self.SPI_writeSingleReg(REG_BIAS_SENSN, 0x00)
# setup CHnSET registers
tx_buf = [0] * MAX_NB_CHANNELS
for i in xrange(0, MAX_NB_CHANNELS):
# input shorted
tx_buf[i] = 0x01
self.SPI_writeMultipleReg(REG_CHnSET_BASE, tx_buf)
""" PRIVATE
# setSamplingRate
# @brief set CONFIG1 register, which defines the sampling rate
"""
def setSamplingRate(self):
temp_reg_value = 0x90 # base value
# chip in sampling rate
if self.sampling_rate == 2000:
temp_reg_value |= 0x03
elif self.sampling_rate == 1000:
temp_reg_value |= 0x04
elif self.sampling_rate == 500:
temp_reg_value |= 0x05
else:
temp_reg_value |= 0x06
self.SPI_writeSingleReg(REG_CONFIG1, temp_reg_value)
""" PRIVATE
# setupBiasDrive
# @brief enable the bias drive by configuring the appropriate registers
# @ref ADS1299 datasheet, see figure 73, p.67
"""
def setupBiasDrive(self):
if self.bias_enabled:
temp_reg_value = 0x00
for i in xrange(0, self.nb_channels):
temp_reg_value |= 0x01 << i
self.SPI_writeSingleReg(REG_BIAS_SENSP, temp_reg_value)
self.SPI_writeSingleReg(REG_BIAS_SENSN, temp_reg_value)
self.SPI_writeSingleReg(REG_CONFIG3, 0xEC)
""" PRIVATE
# stubTask
# @brief activated in stub mode, will generate fake data
"""
def stubTask(self):
while self.APIAlive:
if self.stream_active:
for handle in self.clientUpdateHandles:
handle(np.random.rand(self.nb_channels))
sleep(1.0 / float(self.sampling_rate))
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# GPIO Interface
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
""" PRIVATE
# drdy_callback
# @brief callback triggered on DRDY falling edge. When this happens, if the stream
is active, will get all the sample from the ADS1299 and update all
clients
# @param state, state of the pin to read (not used)
"""
def drdy_callback(self, state):
# on event, read the data from ADS
# read 24 + n*24 bits or 3+n*3 bytes
bit_values = self.SPI_readMultipleBytes(3 + self.nb_channels * 3)
# skip is no stream active
if self.stream_active == False:
return
data_array = np.zeros(self.nb_channels)
for i in xrange(0, self.nb_channels):
data_array[i] = conv24bitsToFloat(bit_values[(i * 3 + 3):((i + 1) * 3 + 3)])
# broadcast results
for handle in self.clientUpdateHandles:
handle(data_array)
""" PRIVATE
# setStart
# @brief control the START pin
# @param state, state of the pin to set
"""
def setStart(self, state):
if STUB_GPIO == False:
if state:
GPIO.output(START_PIN, GPIO.HIGH)
else:
GPIO.output(START_PIN, GPIO.LOW)
""" PRIVATE
# toggleReset
# @brief toggle the nRESET pin while respecting the timing
"""
def toggleReset(self):
# toggle reset
self.setnReset(False)
sleep(0.2)
self.setnReset(True)
sleep(0.2)
""" PRIVATE
# setnReset
# @brief control the nRESET pin
# @param state, state of the pin to set
"""
def setnReset(self, state):
if STUB_GPIO == False:
if state:
GPIO.output(nRESET_PIN, GPIO.HIGH)
else:
GPIO.output(nRESET_PIN, GPIO.LOW)
""" PRIVATE
# setnPWRDN
# @brief control the nPWRDN pin
# @param state, state of the pin to set
"""
def setnPWRDN(self, state):
if STUB_GPIO == False:
if state:
GPIO.output(nPWRDN_PIN, GPIO.HIGH)
else:
GPIO.output(nPWRDN_PIN, GPIO.LOW)
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# SPI Interface
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
""" PRIVATE
# SPI_transmitByte
# @brief push a single byte on the SPI port
# @param byte, value to push on the port
"""
def SPI_transmitByte(self, byte):
if STUB_SPI == False:
self.spi_lock.acquire()
self.spi.xfer2([byte])
self.spi_lock.release()
""" PRIVATE
# SPI_writeSingleReg
# @brief write a value to a single register
# @param reg, register address to write to
# @param byte, value to write
"""
def SPI_writeSingleReg(self, reg, byte):
if STUB_SPI == False:
self.spi_lock.acquire()
self.spi.xfer2([reg | 0x40, 0x00, byte])
self.spi_lock.release()
""" PRIVATE
# SPI_writeMultipleReg
# @brief write a series of values to a series of adjacent registers
# the number of adjacent registers to write is defined by the length
# of the value array
# @param start_reg, base address from where to start writing
# @param byte_array, array of bytes containing registers values
"""
def SPI_writeMultipleReg(self, start_reg, byte_array):
if STUB_SPI == False:
tmp = [start_reg | 0x40]
tmp.append(len(byte_array) - 1)
for i in xrange(0, len(byte_array)):
tmp.append(byte_array[i])
self.spi_lock.acquire()
self.spi.xfer2(tmp)
self.spi_lock.release()
""" PRIVATE
# SPI_readMultipleBytes
# @brief read multiple bytes from the SPI port
# @param nb_bytes, nb of bytes to read
"""
def SPI_readMultipleBytes(self, nb_bytes):
r = []
if STUB_SPI == False:
self.spi_lock.acquire()
r = self.spi.xfer2([0x00] * nb_bytes)
self.spi_lock.release()
for i in xrange(0, nb_bytes):
r[i]
return r
def _test():
print "Starting validation sequence"
# init ads api
ads = ADS1299_API()
# init device
ads.openDevice()
# attach default callback
ads.registerClient(DefaultCallback)
# configure ads
ads.configure(sampling_rate=1000)
print "ADS1299 API test stream starting"
# begin test streaming
ads.startEegStream()
# wait
sleep(10)
print "ADS1299 API test stream stopping"
# stop device
ads.stopStream()
# clean up
ads.closeDevice()
sleep(1)
print "Test Over"
if __name__ == "__main__":
_test()
|
analysis_submission.py
|
#####################################################################
# #
# /analysis_submission.py #
# #
# Copyright 2013, Monash University #
# #
# This file is part of the program BLACS, in the labscript suite #
# (see http://labscriptsuite.org), and is licensed under the #
# Simplified BSD License. See the license.txt file in the root of #
# the project for the full license. #
# #
#####################################################################
import logging
import os
import Queue
import threading
import time
import sys
from qtutils.qt.QtCore import *
from qtutils.qt.QtGui import *
from qtutils.qt.QtWidgets import *
from qtutils import *
from zprocess import zmq_get, TimeoutError, raise_exception_in_thread
from socket import gaierror
import labscript_utils.shared_drive
from labscript_utils.qtwidgets.elide_label import elide_label
class AnalysisSubmission(object):
def __init__(self, BLACS, blacs_ui):
self.inqueue = Queue.Queue()
self.BLACS = BLACS
self.port = int(self.BLACS.exp_config.get('ports', 'lyse'))
self._ui = UiLoader().load(os.path.join(os.path.dirname(os.path.realpath(__file__)),'analysis_submission.ui'))
blacs_ui.analysis.addWidget(self._ui)
self._ui.frame.setMinimumWidth(blacs_ui.queue_controls_frame.sizeHint().width())
elide_label(self._ui.resend_shots_label, self._ui.failed_to_send_frame.layout(), Qt.ElideRight)
# connect signals
self._ui.send_to_server.toggled.connect(lambda state: self._set_send_to_server(state))
self._ui.server.editingFinished.connect(lambda: self._set_server(self._ui.server.text()))
self._ui.clear_unsent_shots_button.clicked.connect(lambda _: self.clear_waiting_files())
self._ui.retry_button.clicked.connect(lambda _: self.check_retry())
self._waiting_for_submission = []
self.server_online = 'offline'
self.send_to_server = False
self.server = ''
self.time_of_last_connectivity_check = 0
self.mainloop_thread = threading.Thread(target=self.mainloop)
self.mainloop_thread.daemon = True
self.mainloop_thread.start()
# self.checking_thread = threading.Thread(target=self.check_connectivity_loop)
# self.checking_thread.daemon = True
# self.checking_thread.start()
def restore_save_data(self,data):
if "server" in data:
self.server = data["server"]
if "send_to_server" in data:
self.send_to_server = data["send_to_server"]
if "waiting_for_submission" in data:
self._waiting_for_submission = list(data["waiting_for_submission"])
self.inqueue.put(['save data restored', None])
self.check_retry()
def get_save_data(self):
return {"waiting_for_submission":list(self._waiting_for_submission),
"server":self.server,
"send_to_server":self.send_to_server
}
def _set_send_to_server(self,value):
self.send_to_server = value
def _set_server(self,server):
self.server = server
self.check_retry()
@property
@inmain_decorator(True)
def send_to_server(self):
return self._send_to_server
@send_to_server.setter
@inmain_decorator(True)
def send_to_server(self, value):
self._send_to_server = bool(value)
self._ui.send_to_server.setChecked(self.send_to_server)
if self.send_to_server:
self._ui.server.setEnabled(True)
self._ui.server_online.show()
self.check_retry()
else:
self.clear_waiting_files()
self._ui.server.setEnabled(False)
self._ui.server_online.hide()
@property
@inmain_decorator(True)
def server(self):
return str(self._server)
@server.setter
@inmain_decorator(True)
def server(self,value):
self._server = value
self._ui.server.setText(self.server)
@property
@inmain_decorator(True)
def server_online(self):
return self._server_online
@server_online.setter
@inmain_decorator(True)
def server_online(self,value):
self._server_online = str(value)
icon_names = {'checking': ':/qtutils/fugue/hourglass',
'online': ':/qtutils/fugue/tick',
'offline': ':/qtutils/fugue/exclamation',
'': ':/qtutils/fugue/status-offline'}
tooltips = {'checking': 'Checking...',
'online': 'Server is responding',
'offline': 'Server not responding',
'': 'Disabled'}
icon = QIcon(icon_names.get(self._server_online, ':/qtutils/fugue/exclamation-red'))
pixmap = icon.pixmap(QSize(16, 16))
tooltip = tooltips.get(self._server_online, "Invalid server status: %s" % self._server_online)
# Update GUI:
self._ui.server_online.setPixmap(pixmap)
self._ui.server_online.setToolTip(tooltip)
self.update_waiting_files_message()
@inmain_decorator(True)
def update_waiting_files_message(self):
# if there is only one shot and we haven't encountered failure yet, do
# not show the error frame:
if (self.server_online == 'checking') and (len(self._waiting_for_submission) == 1) and not self._ui.failed_to_send_frame.isVisible():
return
if self._waiting_for_submission:
self._ui.failed_to_send_frame.show()
if self.server_online == 'checking':
self._ui.retry_button.hide()
text = 'Sending %s shot(s)...' % len(self._waiting_for_submission)
else:
self._ui.retry_button.show()
text = '%s shot(s) to send' % len(self._waiting_for_submission)
self._ui.resend_shots_label.setText(text)
else:
self._ui.failed_to_send_frame.hide()
def get_queue(self):
return self.inqueue
@inmain_decorator(True)
def clear_waiting_files(self):
self._waiting_for_submission = []
self.update_waiting_files_message()
@inmain_decorator(True)
def check_retry(self):
self.inqueue.put(['check/retry', None])
def mainloop(self):
self._mainloop_logger = logging.getLogger('BLACS.AnalysisSubmission.mainloop')
# Ignore signals until save data is restored:
while self.inqueue.get()[0] != 'save data restored':
pass
timeout = 10
while True:
try:
try:
signal, data = self.inqueue.get(timeout=timeout)
except Queue.Empty:
timeout = 10
# Periodic checking of connectivity and resending of files.
# Don't trigger a re-check if we already failed a connectivity
# check within the last second:
if (time.time() - self.time_of_last_connectivity_check) > 1:
signal = 'check/retry'
else:
continue
if signal == 'check/retry':
self.check_connectivity()
if self.server_online == 'online':
self.submit_waiting_files()
elif signal == 'file':
if self.send_to_server:
self._waiting_for_submission.append(data)
if self.server_online != 'online':
# Don't stack connectivity checks if many files are
# arriving. If we failed a connectivity check less
# than a second ago then don't check again.
if (time.time() - self.time_of_last_connectivity_check) > 1:
self.check_connectivity()
else:
# But do queue up a check for when we have
# been idle for one second:
timeout = 1
if self.server_online == 'online':
self.submit_waiting_files()
elif signal == 'close':
break
elif signal == 'save data restored':
continue
else:
raise ValueError('Invalid signal: %s'%str(signal))
self._mainloop_logger.info('Processed signal: %s'%str(signal))
except Exception:
# Raise in a thread for visibility, but keep going
raise_exception_in_thread(sys.exc_info())
self._mainloop_logger.exception("Exception in mainloop, continuing")
def check_connectivity(self):
host = self.server
send_to_server = self.send_to_server
if host and send_to_server:
self.server_online = 'checking'
try:
response = zmq_get(self.port, host, 'hello', timeout=1)
except (TimeoutError, gaierror):
success = False
else:
success = (response == 'hello')
# update GUI
self.server_online = 'online' if success else 'offline'
else:
self.server_online = ''
self.time_of_last_connectivity_check = time.time()
def submit_waiting_files(self):
success = True
while self._waiting_for_submission and success:
path = self._waiting_for_submission[0]
self._mainloop_logger.info('Submitting run file %s.\n'%os.path.basename(path))
data = {'filepath': labscript_utils.shared_drive.path_to_agnostic(path)}
self.server_online = 'checking'
try:
response = zmq_get(self.port, self.server, data, timeout=1)
except (TimeoutError, gaierror):
success = False
else:
success = (response == 'added successfully')
try:
self._waiting_for_submission.pop(0)
except IndexError:
# Queue has been cleared
pass
if not success:
break
# update GUI
self.server_online = 'online' if success else 'offline'
self.time_of_last_connectivity_check = time.time()
|
test_logging.py
|
# Copyright 2001-2017 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Vinay Sajip
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""Test harness for the logging module. Run all tests.
Copyright (C) 2001-2017 Vinay Sajip. All Rights Reserved.
"""
import logging
import logging.handlers
import logging.config
import codecs
import configparser
import datetime
import pathlib
import pickle
import io
import gc
import json
import os
import queue
import random
import re
import socket
import struct
import sys
import tempfile
from test.support.script_helper import assert_python_ok
from test import support
import textwrap
import time
import unittest
import warnings
import weakref
try:
import threading
# The following imports are needed only for tests which
# require threading
import asyncore
from http.server import HTTPServer, BaseHTTPRequestHandler
import smtpd
from urllib.parse import urlparse, parse_qs
from socketserver import (ThreadingUDPServer, DatagramRequestHandler,
ThreadingTCPServer, StreamRequestHandler)
except ImportError:
threading = None
try:
import win32evtlog, win32evtlogutil, pywintypes
except ImportError:
win32evtlog = win32evtlogutil = pywintypes = None
try:
import zlib
except ImportError:
pass
class BaseTest(unittest.TestCase):
"""Base class for logging tests."""
log_format = "%(name)s -> %(levelname)s: %(message)s"
expected_log_pat = r"^([\w.]+) -> (\w+): (\d+)$"
message_num = 0
def setUp(self):
"""Setup the default logging stream to an internal StringIO instance,
so that we can examine log output as we want."""
logger_dict = logging.getLogger().manager.loggerDict
logging._acquireLock()
try:
self.saved_handlers = logging._handlers.copy()
self.saved_handler_list = logging._handlerList[:]
self.saved_loggers = saved_loggers = logger_dict.copy()
self.saved_name_to_level = logging._nameToLevel.copy()
self.saved_level_to_name = logging._levelToName.copy()
self.logger_states = logger_states = {}
for name in saved_loggers:
logger_states[name] = getattr(saved_loggers[name],
'disabled', None)
finally:
logging._releaseLock()
# Set two unused loggers
self.logger1 = logging.getLogger("\xab\xd7\xbb")
self.logger2 = logging.getLogger("\u013f\u00d6\u0047")
self.root_logger = logging.getLogger("")
self.original_logging_level = self.root_logger.getEffectiveLevel()
self.stream = io.StringIO()
self.root_logger.setLevel(logging.DEBUG)
self.root_hdlr = logging.StreamHandler(self.stream)
self.root_formatter = logging.Formatter(self.log_format)
self.root_hdlr.setFormatter(self.root_formatter)
if self.logger1.hasHandlers():
hlist = self.logger1.handlers + self.root_logger.handlers
raise AssertionError('Unexpected handlers: %s' % hlist)
if self.logger2.hasHandlers():
hlist = self.logger2.handlers + self.root_logger.handlers
raise AssertionError('Unexpected handlers: %s' % hlist)
self.root_logger.addHandler(self.root_hdlr)
self.assertTrue(self.logger1.hasHandlers())
self.assertTrue(self.logger2.hasHandlers())
def tearDown(self):
"""Remove our logging stream, and restore the original logging
level."""
self.stream.close()
self.root_logger.removeHandler(self.root_hdlr)
while self.root_logger.handlers:
h = self.root_logger.handlers[0]
self.root_logger.removeHandler(h)
h.close()
self.root_logger.setLevel(self.original_logging_level)
logging._acquireLock()
try:
logging._levelToName.clear()
logging._levelToName.update(self.saved_level_to_name)
logging._nameToLevel.clear()
logging._nameToLevel.update(self.saved_name_to_level)
logging._handlers.clear()
logging._handlers.update(self.saved_handlers)
logging._handlerList[:] = self.saved_handler_list
loggerDict = logging.getLogger().manager.loggerDict
loggerDict.clear()
loggerDict.update(self.saved_loggers)
logger_states = self.logger_states
for name in self.logger_states:
if logger_states[name] is not None:
self.saved_loggers[name].disabled = logger_states[name]
finally:
logging._releaseLock()
def assert_log_lines(self, expected_values, stream=None, pat=None):
"""Match the collected log lines against the regular expression
self.expected_log_pat, and compare the extracted group values to
the expected_values list of tuples."""
stream = stream or self.stream
pat = re.compile(pat or self.expected_log_pat)
actual_lines = stream.getvalue().splitlines()
self.assertEqual(len(actual_lines), len(expected_values))
for actual, expected in zip(actual_lines, expected_values):
match = pat.search(actual)
if not match:
self.fail("Log line does not match expected pattern:\n" +
actual)
self.assertEqual(tuple(match.groups()), expected)
s = stream.read()
if s:
self.fail("Remaining output at end of log stream:\n" + s)
def next_message(self):
"""Generate a message consisting solely of an auto-incrementing
integer."""
self.message_num += 1
return "%d" % self.message_num
class BuiltinLevelsTest(BaseTest):
"""Test builtin levels and their inheritance."""
def test_flat(self):
#Logging levels in a flat logger namespace.
m = self.next_message
ERR = logging.getLogger("ERR")
ERR.setLevel(logging.ERROR)
INF = logging.LoggerAdapter(logging.getLogger("INF"), {})
INF.setLevel(logging.INFO)
DEB = logging.getLogger("DEB")
DEB.setLevel(logging.DEBUG)
# These should log.
ERR.log(logging.CRITICAL, m())
ERR.error(m())
INF.log(logging.CRITICAL, m())
INF.error(m())
INF.warning(m())
INF.info(m())
DEB.log(logging.CRITICAL, m())
DEB.error(m())
DEB.warning(m())
DEB.info(m())
DEB.debug(m())
# These should not log.
ERR.warning(m())
ERR.info(m())
ERR.debug(m())
INF.debug(m())
self.assert_log_lines([
('ERR', 'CRITICAL', '1'),
('ERR', 'ERROR', '2'),
('INF', 'CRITICAL', '3'),
('INF', 'ERROR', '4'),
('INF', 'WARNING', '5'),
('INF', 'INFO', '6'),
('DEB', 'CRITICAL', '7'),
('DEB', 'ERROR', '8'),
('DEB', 'WARNING', '9'),
('DEB', 'INFO', '10'),
('DEB', 'DEBUG', '11'),
])
def test_nested_explicit(self):
# Logging levels in a nested namespace, all explicitly set.
m = self.next_message
INF = logging.getLogger("INF")
INF.setLevel(logging.INFO)
INF_ERR = logging.getLogger("INF.ERR")
INF_ERR.setLevel(logging.ERROR)
# These should log.
INF_ERR.log(logging.CRITICAL, m())
INF_ERR.error(m())
# These should not log.
INF_ERR.warning(m())
INF_ERR.info(m())
INF_ERR.debug(m())
self.assert_log_lines([
('INF.ERR', 'CRITICAL', '1'),
('INF.ERR', 'ERROR', '2'),
])
def test_nested_inherited(self):
#Logging levels in a nested namespace, inherited from parent loggers.
m = self.next_message
INF = logging.getLogger("INF")
INF.setLevel(logging.INFO)
INF_ERR = logging.getLogger("INF.ERR")
INF_ERR.setLevel(logging.ERROR)
INF_UNDEF = logging.getLogger("INF.UNDEF")
INF_ERR_UNDEF = logging.getLogger("INF.ERR.UNDEF")
UNDEF = logging.getLogger("UNDEF")
# These should log.
INF_UNDEF.log(logging.CRITICAL, m())
INF_UNDEF.error(m())
INF_UNDEF.warning(m())
INF_UNDEF.info(m())
INF_ERR_UNDEF.log(logging.CRITICAL, m())
INF_ERR_UNDEF.error(m())
# These should not log.
INF_UNDEF.debug(m())
INF_ERR_UNDEF.warning(m())
INF_ERR_UNDEF.info(m())
INF_ERR_UNDEF.debug(m())
self.assert_log_lines([
('INF.UNDEF', 'CRITICAL', '1'),
('INF.UNDEF', 'ERROR', '2'),
('INF.UNDEF', 'WARNING', '3'),
('INF.UNDEF', 'INFO', '4'),
('INF.ERR.UNDEF', 'CRITICAL', '5'),
('INF.ERR.UNDEF', 'ERROR', '6'),
])
def test_nested_with_virtual_parent(self):
# Logging levels when some parent does not exist yet.
m = self.next_message
INF = logging.getLogger("INF")
GRANDCHILD = logging.getLogger("INF.BADPARENT.UNDEF")
CHILD = logging.getLogger("INF.BADPARENT")
INF.setLevel(logging.INFO)
# These should log.
GRANDCHILD.log(logging.FATAL, m())
GRANDCHILD.info(m())
CHILD.log(logging.FATAL, m())
CHILD.info(m())
# These should not log.
GRANDCHILD.debug(m())
CHILD.debug(m())
self.assert_log_lines([
('INF.BADPARENT.UNDEF', 'CRITICAL', '1'),
('INF.BADPARENT.UNDEF', 'INFO', '2'),
('INF.BADPARENT', 'CRITICAL', '3'),
('INF.BADPARENT', 'INFO', '4'),
])
def test_regression_22386(self):
"""See issue #22386 for more information."""
self.assertEqual(logging.getLevelName('INFO'), logging.INFO)
self.assertEqual(logging.getLevelName(logging.INFO), 'INFO')
def test_issue27935(self):
fatal = logging.getLevelName('FATAL')
self.assertEqual(fatal, logging.FATAL)
def test_regression_29220(self):
"""See issue #29220 for more information."""
logging.addLevelName(logging.INFO, '')
self.addCleanup(logging.addLevelName, logging.INFO, 'INFO')
self.assertEqual(logging.getLevelName(logging.INFO), '')
self.assertEqual(logging.getLevelName(logging.NOTSET), 'NOTSET')
self.assertEqual(logging.getLevelName('NOTSET'), logging.NOTSET)
class BasicFilterTest(BaseTest):
"""Test the bundled Filter class."""
def test_filter(self):
# Only messages satisfying the specified criteria pass through the
# filter.
filter_ = logging.Filter("spam.eggs")
handler = self.root_logger.handlers[0]
try:
handler.addFilter(filter_)
spam = logging.getLogger("spam")
spam_eggs = logging.getLogger("spam.eggs")
spam_eggs_fish = logging.getLogger("spam.eggs.fish")
spam_bakedbeans = logging.getLogger("spam.bakedbeans")
spam.info(self.next_message())
spam_eggs.info(self.next_message()) # Good.
spam_eggs_fish.info(self.next_message()) # Good.
spam_bakedbeans.info(self.next_message())
self.assert_log_lines([
('spam.eggs', 'INFO', '2'),
('spam.eggs.fish', 'INFO', '3'),
])
finally:
handler.removeFilter(filter_)
def test_callable_filter(self):
# Only messages satisfying the specified criteria pass through the
# filter.
def filterfunc(record):
parts = record.name.split('.')
prefix = '.'.join(parts[:2])
return prefix == 'spam.eggs'
handler = self.root_logger.handlers[0]
try:
handler.addFilter(filterfunc)
spam = logging.getLogger("spam")
spam_eggs = logging.getLogger("spam.eggs")
spam_eggs_fish = logging.getLogger("spam.eggs.fish")
spam_bakedbeans = logging.getLogger("spam.bakedbeans")
spam.info(self.next_message())
spam_eggs.info(self.next_message()) # Good.
spam_eggs_fish.info(self.next_message()) # Good.
spam_bakedbeans.info(self.next_message())
self.assert_log_lines([
('spam.eggs', 'INFO', '2'),
('spam.eggs.fish', 'INFO', '3'),
])
finally:
handler.removeFilter(filterfunc)
def test_empty_filter(self):
f = logging.Filter()
r = logging.makeLogRecord({'name': 'spam.eggs'})
self.assertTrue(f.filter(r))
#
# First, we define our levels. There can be as many as you want - the only
# limitations are that they should be integers, the lowest should be > 0 and
# larger values mean less information being logged. If you need specific
# level values which do not fit into these limitations, you can use a
# mapping dictionary to convert between your application levels and the
# logging system.
#
SILENT = 120
TACITURN = 119
TERSE = 118
EFFUSIVE = 117
SOCIABLE = 116
VERBOSE = 115
TALKATIVE = 114
GARRULOUS = 113
CHATTERBOX = 112
BORING = 111
LEVEL_RANGE = range(BORING, SILENT + 1)
#
# Next, we define names for our levels. You don't need to do this - in which
# case the system will use "Level n" to denote the text for the level.
#
my_logging_levels = {
SILENT : 'Silent',
TACITURN : 'Taciturn',
TERSE : 'Terse',
EFFUSIVE : 'Effusive',
SOCIABLE : 'Sociable',
VERBOSE : 'Verbose',
TALKATIVE : 'Talkative',
GARRULOUS : 'Garrulous',
CHATTERBOX : 'Chatterbox',
BORING : 'Boring',
}
class GarrulousFilter(logging.Filter):
"""A filter which blocks garrulous messages."""
def filter(self, record):
return record.levelno != GARRULOUS
class VerySpecificFilter(logging.Filter):
"""A filter which blocks sociable and taciturn messages."""
def filter(self, record):
return record.levelno not in [SOCIABLE, TACITURN]
class CustomLevelsAndFiltersTest(BaseTest):
"""Test various filtering possibilities with custom logging levels."""
# Skip the logger name group.
expected_log_pat = r"^[\w.]+ -> (\w+): (\d+)$"
def setUp(self):
BaseTest.setUp(self)
for k, v in my_logging_levels.items():
logging.addLevelName(k, v)
def log_at_all_levels(self, logger):
for lvl in LEVEL_RANGE:
logger.log(lvl, self.next_message())
def test_logger_filter(self):
# Filter at logger level.
self.root_logger.setLevel(VERBOSE)
# Levels >= 'Verbose' are good.
self.log_at_all_levels(self.root_logger)
self.assert_log_lines([
('Verbose', '5'),
('Sociable', '6'),
('Effusive', '7'),
('Terse', '8'),
('Taciturn', '9'),
('Silent', '10'),
])
def test_handler_filter(self):
# Filter at handler level.
self.root_logger.handlers[0].setLevel(SOCIABLE)
try:
# Levels >= 'Sociable' are good.
self.log_at_all_levels(self.root_logger)
self.assert_log_lines([
('Sociable', '6'),
('Effusive', '7'),
('Terse', '8'),
('Taciturn', '9'),
('Silent', '10'),
])
finally:
self.root_logger.handlers[0].setLevel(logging.NOTSET)
def test_specific_filters(self):
# Set a specific filter object on the handler, and then add another
# filter object on the logger itself.
handler = self.root_logger.handlers[0]
specific_filter = None
garr = GarrulousFilter()
handler.addFilter(garr)
try:
self.log_at_all_levels(self.root_logger)
first_lines = [
# Notice how 'Garrulous' is missing
('Boring', '1'),
('Chatterbox', '2'),
('Talkative', '4'),
('Verbose', '5'),
('Sociable', '6'),
('Effusive', '7'),
('Terse', '8'),
('Taciturn', '9'),
('Silent', '10'),
]
self.assert_log_lines(first_lines)
specific_filter = VerySpecificFilter()
self.root_logger.addFilter(specific_filter)
self.log_at_all_levels(self.root_logger)
self.assert_log_lines(first_lines + [
# Not only 'Garrulous' is still missing, but also 'Sociable'
# and 'Taciturn'
('Boring', '11'),
('Chatterbox', '12'),
('Talkative', '14'),
('Verbose', '15'),
('Effusive', '17'),
('Terse', '18'),
('Silent', '20'),
])
finally:
if specific_filter:
self.root_logger.removeFilter(specific_filter)
handler.removeFilter(garr)
class HandlerTest(BaseTest):
def test_name(self):
h = logging.Handler()
h.name = 'generic'
self.assertEqual(h.name, 'generic')
h.name = 'anothergeneric'
self.assertEqual(h.name, 'anothergeneric')
self.assertRaises(NotImplementedError, h.emit, None)
def test_builtin_handlers(self):
# We can't actually *use* too many handlers in the tests,
# but we can try instantiating them with various options
if sys.platform in ('linux', 'darwin'):
for existing in (True, False):
fd, fn = tempfile.mkstemp()
os.close(fd)
if not existing:
os.unlink(fn)
h = logging.handlers.WatchedFileHandler(fn, delay=True)
if existing:
dev, ino = h.dev, h.ino
self.assertEqual(dev, -1)
self.assertEqual(ino, -1)
r = logging.makeLogRecord({'msg': 'Test'})
h.handle(r)
# Now remove the file.
os.unlink(fn)
self.assertFalse(os.path.exists(fn))
# The next call should recreate the file.
h.handle(r)
self.assertTrue(os.path.exists(fn))
else:
self.assertEqual(h.dev, -1)
self.assertEqual(h.ino, -1)
h.close()
if existing:
os.unlink(fn)
if sys.platform == 'darwin':
sockname = '/var/run/syslog'
else:
sockname = '/dev/log'
try:
h = logging.handlers.SysLogHandler(sockname)
self.assertEqual(h.facility, h.LOG_USER)
self.assertTrue(h.unixsocket)
h.close()
except OSError: # syslogd might not be available
pass
for method in ('GET', 'POST', 'PUT'):
if method == 'PUT':
self.assertRaises(ValueError, logging.handlers.HTTPHandler,
'localhost', '/log', method)
else:
h = logging.handlers.HTTPHandler('localhost', '/log', method)
h.close()
h = logging.handlers.BufferingHandler(0)
r = logging.makeLogRecord({})
self.assertTrue(h.shouldFlush(r))
h.close()
h = logging.handlers.BufferingHandler(1)
self.assertFalse(h.shouldFlush(r))
h.close()
def test_path_objects(self):
"""
Test that Path objects are accepted as filename arguments to handlers.
See Issue #27493.
"""
fd, fn = tempfile.mkstemp()
os.close(fd)
os.unlink(fn)
pfn = pathlib.Path(fn)
cases = (
(logging.FileHandler, (pfn, 'w')),
(logging.handlers.RotatingFileHandler, (pfn, 'a')),
(logging.handlers.TimedRotatingFileHandler, (pfn, 'h')),
)
if sys.platform in ('linux', 'darwin'):
cases += ((logging.handlers.WatchedFileHandler, (pfn, 'w')),)
for cls, args in cases:
h = cls(*args)
self.assertTrue(os.path.exists(fn))
h.close()
os.unlink(fn)
@unittest.skipIf(os.name == 'nt', 'WatchedFileHandler not appropriate for Windows.')
@unittest.skipUnless(threading, 'Threading required for this test.')
def test_race(self):
# Issue #14632 refers.
def remove_loop(fname, tries):
for _ in range(tries):
try:
os.unlink(fname)
self.deletion_time = time.time()
except OSError:
pass
time.sleep(0.004 * random.randint(0, 4))
del_count = 500
log_count = 500
self.handle_time = None
self.deletion_time = None
for delay in (False, True):
fd, fn = tempfile.mkstemp('.log', 'test_logging-3-')
os.close(fd)
remover = threading.Thread(target=remove_loop, args=(fn, del_count))
remover.daemon = True
remover.start()
h = logging.handlers.WatchedFileHandler(fn, delay=delay)
f = logging.Formatter('%(asctime)s: %(levelname)s: %(message)s')
h.setFormatter(f)
try:
for _ in range(log_count):
time.sleep(0.005)
r = logging.makeLogRecord({'msg': 'testing' })
try:
self.handle_time = time.time()
h.handle(r)
except Exception:
print('Deleted at %s, '
'opened at %s' % (self.deletion_time,
self.handle_time))
raise
finally:
remover.join()
h.close()
if os.path.exists(fn):
os.unlink(fn)
class BadStream(object):
def write(self, data):
raise RuntimeError('deliberate mistake')
class TestStreamHandler(logging.StreamHandler):
def handleError(self, record):
self.error_record = record
class StreamHandlerTest(BaseTest):
def test_error_handling(self):
h = TestStreamHandler(BadStream())
r = logging.makeLogRecord({})
old_raise = logging.raiseExceptions
try:
h.handle(r)
self.assertIs(h.error_record, r)
h = logging.StreamHandler(BadStream())
with support.captured_stderr() as stderr:
h.handle(r)
msg = '\nRuntimeError: deliberate mistake\n'
self.assertIn(msg, stderr.getvalue())
logging.raiseExceptions = False
with support.captured_stderr() as stderr:
h.handle(r)
self.assertEqual('', stderr.getvalue())
finally:
logging.raiseExceptions = old_raise
# -- The following section could be moved into a server_helper.py module
# -- if it proves to be of wider utility than just test_logging
if threading:
class TestSMTPServer(smtpd.SMTPServer):
"""
This class implements a test SMTP server.
:param addr: A (host, port) tuple which the server listens on.
You can specify a port value of zero: the server's
*port* attribute will hold the actual port number
used, which can be used in client connections.
:param handler: A callable which will be called to process
incoming messages. The handler will be passed
the client address tuple, who the message is from,
a list of recipients and the message data.
:param poll_interval: The interval, in seconds, used in the underlying
:func:`select` or :func:`poll` call by
:func:`asyncore.loop`.
:param sockmap: A dictionary which will be used to hold
:class:`asyncore.dispatcher` instances used by
:func:`asyncore.loop`. This avoids changing the
:mod:`asyncore` module's global state.
"""
def __init__(self, addr, handler, poll_interval, sockmap):
smtpd.SMTPServer.__init__(self, addr, None, map=sockmap,
decode_data=True)
self.port = self.socket.getsockname()[1]
self._handler = handler
self._thread = None
self.poll_interval = poll_interval
def process_message(self, peer, mailfrom, rcpttos, data):
"""
Delegates to the handler passed in to the server's constructor.
Typically, this will be a test case method.
:param peer: The client (host, port) tuple.
:param mailfrom: The address of the sender.
:param rcpttos: The addresses of the recipients.
:param data: The message.
"""
self._handler(peer, mailfrom, rcpttos, data)
def start(self):
"""
Start the server running on a separate daemon thread.
"""
self._thread = t = threading.Thread(target=self.serve_forever,
args=(self.poll_interval,))
t.setDaemon(True)
t.start()
def serve_forever(self, poll_interval):
"""
Run the :mod:`asyncore` loop until normal termination
conditions arise.
:param poll_interval: The interval, in seconds, used in the underlying
:func:`select` or :func:`poll` call by
:func:`asyncore.loop`.
"""
try:
asyncore.loop(poll_interval, map=self._map)
except OSError:
# On FreeBSD 8, closing the server repeatably
# raises this error. We swallow it if the
# server has been closed.
if self.connected or self.accepting:
raise
def stop(self, timeout=None):
"""
Stop the thread by closing the server instance.
Wait for the server thread to terminate.
:param timeout: How long to wait for the server thread
to terminate.
"""
self.close()
self._thread.join(timeout)
self._thread = None
class ControlMixin(object):
"""
This mixin is used to start a server on a separate thread, and
shut it down programmatically. Request handling is simplified - instead
of needing to derive a suitable RequestHandler subclass, you just
provide a callable which will be passed each received request to be
processed.
:param handler: A handler callable which will be called with a
single parameter - the request - in order to
process the request. This handler is called on the
server thread, effectively meaning that requests are
processed serially. While not quite Web scale ;-),
this should be fine for testing applications.
:param poll_interval: The polling interval in seconds.
"""
def __init__(self, handler, poll_interval):
self._thread = None
self.poll_interval = poll_interval
self._handler = handler
self.ready = threading.Event()
def start(self):
"""
Create a daemon thread to run the server, and start it.
"""
self._thread = t = threading.Thread(target=self.serve_forever,
args=(self.poll_interval,))
t.setDaemon(True)
t.start()
def serve_forever(self, poll_interval):
"""
Run the server. Set the ready flag before entering the
service loop.
"""
self.ready.set()
super(ControlMixin, self).serve_forever(poll_interval)
def stop(self, timeout=None):
"""
Tell the server thread to stop, and wait for it to do so.
:param timeout: How long to wait for the server thread
to terminate.
"""
self.shutdown()
if self._thread is not None:
self._thread.join(timeout)
self._thread = None
self.server_close()
self.ready.clear()
class TestHTTPServer(ControlMixin, HTTPServer):
"""
An HTTP server which is controllable using :class:`ControlMixin`.
:param addr: A tuple with the IP address and port to listen on.
:param handler: A handler callable which will be called with a
single parameter - the request - in order to
process the request.
:param poll_interval: The polling interval in seconds.
:param log: Pass ``True`` to enable log messages.
"""
def __init__(self, addr, handler, poll_interval=0.5,
log=False, sslctx=None):
class DelegatingHTTPRequestHandler(BaseHTTPRequestHandler):
def __getattr__(self, name, default=None):
if name.startswith('do_'):
return self.process_request
raise AttributeError(name)
def process_request(self):
self.server._handler(self)
def log_message(self, format, *args):
if log:
super(DelegatingHTTPRequestHandler,
self).log_message(format, *args)
HTTPServer.__init__(self, addr, DelegatingHTTPRequestHandler)
ControlMixin.__init__(self, handler, poll_interval)
self.sslctx = sslctx
def get_request(self):
try:
sock, addr = self.socket.accept()
if self.sslctx:
sock = self.sslctx.wrap_socket(sock, server_side=True)
except OSError as e:
# socket errors are silenced by the caller, print them here
sys.stderr.write("Got an error:\n%s\n" % e)
raise
return sock, addr
class TestTCPServer(ControlMixin, ThreadingTCPServer):
"""
A TCP server which is controllable using :class:`ControlMixin`.
:param addr: A tuple with the IP address and port to listen on.
:param handler: A handler callable which will be called with a single
parameter - the request - in order to process the request.
:param poll_interval: The polling interval in seconds.
:bind_and_activate: If True (the default), binds the server and starts it
listening. If False, you need to call
:meth:`server_bind` and :meth:`server_activate` at
some later time before calling :meth:`start`, so that
the server will set up the socket and listen on it.
"""
allow_reuse_address = True
def __init__(self, addr, handler, poll_interval=0.5,
bind_and_activate=True):
class DelegatingTCPRequestHandler(StreamRequestHandler):
def handle(self):
self.server._handler(self)
ThreadingTCPServer.__init__(self, addr, DelegatingTCPRequestHandler,
bind_and_activate)
ControlMixin.__init__(self, handler, poll_interval)
def server_bind(self):
super(TestTCPServer, self).server_bind()
self.port = self.socket.getsockname()[1]
class TestUDPServer(ControlMixin, ThreadingUDPServer):
"""
A UDP server which is controllable using :class:`ControlMixin`.
:param addr: A tuple with the IP address and port to listen on.
:param handler: A handler callable which will be called with a
single parameter - the request - in order to
process the request.
:param poll_interval: The polling interval for shutdown requests,
in seconds.
:bind_and_activate: If True (the default), binds the server and
starts it listening. If False, you need to
call :meth:`server_bind` and
:meth:`server_activate` at some later time
before calling :meth:`start`, so that the server will
set up the socket and listen on it.
"""
def __init__(self, addr, handler, poll_interval=0.5,
bind_and_activate=True):
class DelegatingUDPRequestHandler(DatagramRequestHandler):
def handle(self):
self.server._handler(self)
def finish(self):
data = self.wfile.getvalue()
if data:
try:
super(DelegatingUDPRequestHandler, self).finish()
except OSError:
if not self.server._closed:
raise
ThreadingUDPServer.__init__(self, addr,
DelegatingUDPRequestHandler,
bind_and_activate)
ControlMixin.__init__(self, handler, poll_interval)
self._closed = False
def server_bind(self):
super(TestUDPServer, self).server_bind()
self.port = self.socket.getsockname()[1]
def server_close(self):
super(TestUDPServer, self).server_close()
self._closed = True
if hasattr(socket, "AF_UNIX"):
class TestUnixStreamServer(TestTCPServer):
address_family = socket.AF_UNIX
class TestUnixDatagramServer(TestUDPServer):
address_family = socket.AF_UNIX
# - end of server_helper section
@unittest.skipUnless(threading, 'Threading required for this test.')
class SMTPHandlerTest(BaseTest):
TIMEOUT = 8.0
def test_basic(self):
sockmap = {}
server = TestSMTPServer((support.HOST, 0), self.process_message, 0.001,
sockmap)
server.start()
addr = (support.HOST, server.port)
h = logging.handlers.SMTPHandler(addr, 'me', 'you', 'Log',
timeout=self.TIMEOUT)
self.assertEqual(h.toaddrs, ['you'])
self.messages = []
r = logging.makeLogRecord({'msg': 'Hello \u2713'})
self.handled = threading.Event()
h.handle(r)
self.handled.wait(self.TIMEOUT) # 14314: don't wait forever
server.stop()
self.assertTrue(self.handled.is_set())
self.assertEqual(len(self.messages), 1)
peer, mailfrom, rcpttos, data = self.messages[0]
self.assertEqual(mailfrom, 'me')
self.assertEqual(rcpttos, ['you'])
self.assertIn('\nSubject: Log\n', data)
self.assertTrue(data.endswith('\n\nHello \u2713'))
h.close()
def process_message(self, *args):
self.messages.append(args)
self.handled.set()
class MemoryHandlerTest(BaseTest):
"""Tests for the MemoryHandler."""
# Do not bother with a logger name group.
expected_log_pat = r"^[\w.]+ -> (\w+): (\d+)$"
def setUp(self):
BaseTest.setUp(self)
self.mem_hdlr = logging.handlers.MemoryHandler(10, logging.WARNING,
self.root_hdlr)
self.mem_logger = logging.getLogger('mem')
self.mem_logger.propagate = 0
self.mem_logger.addHandler(self.mem_hdlr)
def tearDown(self):
self.mem_hdlr.close()
BaseTest.tearDown(self)
def test_flush(self):
# The memory handler flushes to its target handler based on specific
# criteria (message count and message level).
self.mem_logger.debug(self.next_message())
self.assert_log_lines([])
self.mem_logger.info(self.next_message())
self.assert_log_lines([])
# This will flush because the level is >= logging.WARNING
self.mem_logger.warning(self.next_message())
lines = [
('DEBUG', '1'),
('INFO', '2'),
('WARNING', '3'),
]
self.assert_log_lines(lines)
for n in (4, 14):
for i in range(9):
self.mem_logger.debug(self.next_message())
self.assert_log_lines(lines)
# This will flush because it's the 10th message since the last
# flush.
self.mem_logger.debug(self.next_message())
lines = lines + [('DEBUG', str(i)) for i in range(n, n + 10)]
self.assert_log_lines(lines)
self.mem_logger.debug(self.next_message())
self.assert_log_lines(lines)
def test_flush_on_close(self):
"""
Test that the flush-on-close configuration works as expected.
"""
self.mem_logger.debug(self.next_message())
self.assert_log_lines([])
self.mem_logger.info(self.next_message())
self.assert_log_lines([])
self.mem_logger.removeHandler(self.mem_hdlr)
# Default behaviour is to flush on close. Check that it happens.
self.mem_hdlr.close()
lines = [
('DEBUG', '1'),
('INFO', '2'),
]
self.assert_log_lines(lines)
# Now configure for flushing not to be done on close.
self.mem_hdlr = logging.handlers.MemoryHandler(10, logging.WARNING,
self.root_hdlr,
False)
self.mem_logger.addHandler(self.mem_hdlr)
self.mem_logger.debug(self.next_message())
self.assert_log_lines(lines) # no change
self.mem_logger.info(self.next_message())
self.assert_log_lines(lines) # no change
self.mem_logger.removeHandler(self.mem_hdlr)
self.mem_hdlr.close()
# assert that no new lines have been added
self.assert_log_lines(lines) # no change
class ExceptionFormatter(logging.Formatter):
"""A special exception formatter."""
def formatException(self, ei):
return "Got a [%s]" % ei[0].__name__
class ConfigFileTest(BaseTest):
"""Reading logging config from a .ini-style config file."""
expected_log_pat = r"^(\w+) \+\+ (\w+)$"
# config0 is a standard configuration.
config0 = """
[loggers]
keys=root
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=hand1
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
# config1 adds a little to the standard configuration.
config1 = """
[loggers]
keys=root,parser
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=
[logger_parser]
level=DEBUG
handlers=hand1
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
# config1a moves the handler to the root.
config1a = """
[loggers]
keys=root,parser
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=hand1
[logger_parser]
level=DEBUG
handlers=
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
# config2 has a subtle configuration error that should be reported
config2 = config1.replace("sys.stdout", "sys.stbout")
# config3 has a less subtle configuration error
config3 = config1.replace("formatter=form1", "formatter=misspelled_name")
# config4 specifies a custom formatter class to be loaded
config4 = """
[loggers]
keys=root
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=NOTSET
handlers=hand1
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
class=""" + __name__ + """.ExceptionFormatter
format=%(levelname)s:%(name)s:%(message)s
datefmt=
"""
# config5 specifies a custom handler class to be loaded
config5 = config1.replace('class=StreamHandler', 'class=logging.StreamHandler')
# config6 uses ', ' delimiters in the handlers and formatters sections
config6 = """
[loggers]
keys=root,parser
[handlers]
keys=hand1, hand2
[formatters]
keys=form1, form2
[logger_root]
level=WARNING
handlers=
[logger_parser]
level=DEBUG
handlers=hand1
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[handler_hand2]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stderr,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
[formatter_form2]
format=%(message)s
datefmt=
"""
# config7 adds a compiler logger.
config7 = """
[loggers]
keys=root,parser,compiler
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=hand1
[logger_compiler]
level=DEBUG
handlers=
propagate=1
qualname=compiler
[logger_parser]
level=DEBUG
handlers=
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
disable_test = """
[loggers]
keys=root
[handlers]
keys=screen
[formatters]
keys=
[logger_root]
level=DEBUG
handlers=screen
[handler_screen]
level=DEBUG
class=StreamHandler
args=(sys.stdout,)
formatter=
"""
def apply_config(self, conf, **kwargs):
file = io.StringIO(textwrap.dedent(conf))
logging.config.fileConfig(file, **kwargs)
def test_config0_ok(self):
# A simple config file which overrides the default settings.
with support.captured_stdout() as output:
self.apply_config(self.config0)
logger = logging.getLogger()
# Won't output anything
logger.info(self.next_message())
# Outputs a message
logger.error(self.next_message())
self.assert_log_lines([
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config0_using_cp_ok(self):
# A simple config file which overrides the default settings.
with support.captured_stdout() as output:
file = io.StringIO(textwrap.dedent(self.config0))
cp = configparser.ConfigParser()
cp.read_file(file)
logging.config.fileConfig(cp)
logger = logging.getLogger()
# Won't output anything
logger.info(self.next_message())
# Outputs a message
logger.error(self.next_message())
self.assert_log_lines([
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config1_ok(self, config=config1):
# A config file defining a sub-parser as well.
with support.captured_stdout() as output:
self.apply_config(config)
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config2_failure(self):
# A simple config file which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config2)
def test_config3_failure(self):
# A simple config file which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config3)
def test_config4_ok(self):
# A config file specifying a custom formatter class.
with support.captured_stdout() as output:
self.apply_config(self.config4)
logger = logging.getLogger()
try:
raise RuntimeError()
except RuntimeError:
logging.exception("just testing")
sys.stdout.seek(0)
self.assertEqual(output.getvalue(),
"ERROR:root:just testing\nGot a [RuntimeError]\n")
# Original logger output is empty
self.assert_log_lines([])
def test_config5_ok(self):
self.test_config1_ok(config=self.config5)
def test_config6_ok(self):
self.test_config1_ok(config=self.config6)
def test_config7_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config1a)
logger = logging.getLogger("compiler.parser")
# See issue #11424. compiler-hyphenated sorts
# between compiler and compiler.xyz and this
# was preventing compiler.xyz from being included
# in the child loggers of compiler because of an
# overzealous loop termination condition.
hyphenated = logging.getLogger('compiler-hyphenated')
# All will output a message
logger.info(self.next_message())
logger.error(self.next_message())
hyphenated.critical(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
('CRITICAL', '3'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
with support.captured_stdout() as output:
self.apply_config(self.config7)
logger = logging.getLogger("compiler.parser")
self.assertFalse(logger.disabled)
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
logger = logging.getLogger("compiler.lexer")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
# Will not appear
hyphenated.critical(self.next_message())
self.assert_log_lines([
('INFO', '4'),
('ERROR', '5'),
('INFO', '6'),
('ERROR', '7'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_logger_disabling(self):
self.apply_config(self.disable_test)
logger = logging.getLogger('some_pristine_logger')
self.assertFalse(logger.disabled)
self.apply_config(self.disable_test)
self.assertTrue(logger.disabled)
self.apply_config(self.disable_test, disable_existing_loggers=False)
self.assertFalse(logger.disabled)
@unittest.skipUnless(threading, 'Threading required for this test.')
class SocketHandlerTest(BaseTest):
"""Test for SocketHandler objects."""
if threading:
server_class = TestTCPServer
address = ('localhost', 0)
def setUp(self):
"""Set up a TCP server to receive log messages, and a SocketHandler
pointing to that server's address and port."""
BaseTest.setUp(self)
# Issue #29177: deal with errors that happen during setup
self.server = self.sock_hdlr = self.server_exception = None
try:
self.server = server = self.server_class(self.address,
self.handle_socket, 0.01)
server.start()
# Uncomment next line to test error recovery in setUp()
# raise OSError('dummy error raised')
except OSError as e:
self.server_exception = e
return
server.ready.wait()
hcls = logging.handlers.SocketHandler
if isinstance(server.server_address, tuple):
self.sock_hdlr = hcls('localhost', server.port)
else:
self.sock_hdlr = hcls(server.server_address, None)
self.log_output = ''
self.root_logger.removeHandler(self.root_logger.handlers[0])
self.root_logger.addHandler(self.sock_hdlr)
self.handled = threading.Semaphore(0)
def tearDown(self):
"""Shutdown the TCP server."""
try:
if self.server:
self.server.stop(2.0)
if self.sock_hdlr:
self.root_logger.removeHandler(self.sock_hdlr)
self.sock_hdlr.close()
finally:
BaseTest.tearDown(self)
def handle_socket(self, request):
conn = request.connection
while True:
chunk = conn.recv(4)
if len(chunk) < 4:
break
slen = struct.unpack(">L", chunk)[0]
chunk = conn.recv(slen)
while len(chunk) < slen:
chunk = chunk + conn.recv(slen - len(chunk))
obj = pickle.loads(chunk)
record = logging.makeLogRecord(obj)
self.log_output += record.msg + '\n'
self.handled.release()
def test_output(self):
# The log message sent to the SocketHandler is properly received.
if self.server_exception:
self.skipTest(self.server_exception)
logger = logging.getLogger("tcp")
logger.error("spam")
self.handled.acquire()
logger.debug("eggs")
self.handled.acquire()
self.assertEqual(self.log_output, "spam\neggs\n")
def test_noserver(self):
if self.server_exception:
self.skipTest(self.server_exception)
# Avoid timing-related failures due to SocketHandler's own hard-wired
# one-second timeout on socket.create_connection() (issue #16264).
self.sock_hdlr.retryStart = 2.5
# Kill the server
self.server.stop(2.0)
# The logging call should try to connect, which should fail
try:
raise RuntimeError('Deliberate mistake')
except RuntimeError:
self.root_logger.exception('Never sent')
self.root_logger.error('Never sent, either')
now = time.time()
self.assertGreater(self.sock_hdlr.retryTime, now)
time.sleep(self.sock_hdlr.retryTime - now + 0.001)
self.root_logger.error('Nor this')
def _get_temp_domain_socket():
fd, fn = tempfile.mkstemp(prefix='test_logging_', suffix='.sock')
os.close(fd)
# just need a name - file can't be present, or we'll get an
# 'address already in use' error.
os.remove(fn)
return fn
@unittest.skipUnless(hasattr(socket, "AF_UNIX"), "Unix sockets required")
@unittest.skipUnless(threading, 'Threading required for this test.')
class UnixSocketHandlerTest(SocketHandlerTest):
"""Test for SocketHandler with unix sockets."""
if threading and hasattr(socket, "AF_UNIX"):
server_class = TestUnixStreamServer
def setUp(self):
# override the definition in the base class
self.address = _get_temp_domain_socket()
SocketHandlerTest.setUp(self)
def tearDown(self):
SocketHandlerTest.tearDown(self)
support.unlink(self.address)
@unittest.skipUnless(threading, 'Threading required for this test.')
class DatagramHandlerTest(BaseTest):
"""Test for DatagramHandler."""
if threading:
server_class = TestUDPServer
address = ('localhost', 0)
def setUp(self):
"""Set up a UDP server to receive log messages, and a DatagramHandler
pointing to that server's address and port."""
BaseTest.setUp(self)
# Issue #29177: deal with errors that happen during setup
self.server = self.sock_hdlr = self.server_exception = None
try:
self.server = server = self.server_class(self.address,
self.handle_datagram, 0.01)
server.start()
# Uncomment next line to test error recovery in setUp()
# raise OSError('dummy error raised')
except OSError as e:
self.server_exception = e
return
server.ready.wait()
hcls = logging.handlers.DatagramHandler
if isinstance(server.server_address, tuple):
self.sock_hdlr = hcls('localhost', server.port)
else:
self.sock_hdlr = hcls(server.server_address, None)
self.log_output = ''
self.root_logger.removeHandler(self.root_logger.handlers[0])
self.root_logger.addHandler(self.sock_hdlr)
self.handled = threading.Event()
def tearDown(self):
"""Shutdown the UDP server."""
try:
if self.server:
self.server.stop(2.0)
if self.sock_hdlr:
self.root_logger.removeHandler(self.sock_hdlr)
self.sock_hdlr.close()
finally:
BaseTest.tearDown(self)
def handle_datagram(self, request):
slen = struct.pack('>L', 0) # length of prefix
packet = request.packet[len(slen):]
obj = pickle.loads(packet)
record = logging.makeLogRecord(obj)
self.log_output += record.msg + '\n'
self.handled.set()
def test_output(self):
# The log message sent to the DatagramHandler is properly received.
if self.server_exception:
self.skipTest(self.server_exception)
logger = logging.getLogger("udp")
logger.error("spam")
self.handled.wait()
self.handled.clear()
logger.error("eggs")
self.handled.wait()
self.assertEqual(self.log_output, "spam\neggs\n")
@unittest.skipUnless(hasattr(socket, "AF_UNIX"), "Unix sockets required")
@unittest.skipUnless(threading, 'Threading required for this test.')
class UnixDatagramHandlerTest(DatagramHandlerTest):
"""Test for DatagramHandler using Unix sockets."""
if threading and hasattr(socket, "AF_UNIX"):
server_class = TestUnixDatagramServer
def setUp(self):
# override the definition in the base class
self.address = _get_temp_domain_socket()
DatagramHandlerTest.setUp(self)
def tearDown(self):
DatagramHandlerTest.tearDown(self)
support.unlink(self.address)
@unittest.skipUnless(threading, 'Threading required for this test.')
class SysLogHandlerTest(BaseTest):
"""Test for SysLogHandler using UDP."""
if threading:
server_class = TestUDPServer
address = ('localhost', 0)
def setUp(self):
"""Set up a UDP server to receive log messages, and a SysLogHandler
pointing to that server's address and port."""
BaseTest.setUp(self)
# Issue #29177: deal with errors that happen during setup
self.server = self.sl_hdlr = self.server_exception = None
try:
self.server = server = self.server_class(self.address,
self.handle_datagram, 0.01)
server.start()
# Uncomment next line to test error recovery in setUp()
# raise OSError('dummy error raised')
except OSError as e:
self.server_exception = e
return
server.ready.wait()
hcls = logging.handlers.SysLogHandler
if isinstance(server.server_address, tuple):
self.sl_hdlr = hcls(('localhost', server.port))
else:
self.sl_hdlr = hcls(server.server_address)
self.log_output = ''
self.root_logger.removeHandler(self.root_logger.handlers[0])
self.root_logger.addHandler(self.sl_hdlr)
self.handled = threading.Event()
def tearDown(self):
"""Shutdown the server."""
try:
if self.server:
self.server.stop(2.0)
if self.sl_hdlr:
self.root_logger.removeHandler(self.sl_hdlr)
self.sl_hdlr.close()
finally:
BaseTest.tearDown(self)
def handle_datagram(self, request):
self.log_output = request.packet
self.handled.set()
def test_output(self):
if self.server_exception:
self.skipTest(self.server_exception)
# The log message sent to the SysLogHandler is properly received.
logger = logging.getLogger("slh")
logger.error("sp\xe4m")
self.handled.wait()
self.assertEqual(self.log_output, b'<11>sp\xc3\xa4m\x00')
self.handled.clear()
self.sl_hdlr.append_nul = False
logger.error("sp\xe4m")
self.handled.wait()
self.assertEqual(self.log_output, b'<11>sp\xc3\xa4m')
self.handled.clear()
self.sl_hdlr.ident = "h\xe4m-"
logger.error("sp\xe4m")
self.handled.wait()
self.assertEqual(self.log_output, b'<11>h\xc3\xa4m-sp\xc3\xa4m')
@unittest.skipUnless(hasattr(socket, "AF_UNIX"), "Unix sockets required")
@unittest.skipUnless(threading, 'Threading required for this test.')
class UnixSysLogHandlerTest(SysLogHandlerTest):
"""Test for SysLogHandler with Unix sockets."""
if threading and hasattr(socket, "AF_UNIX"):
server_class = TestUnixDatagramServer
def setUp(self):
# override the definition in the base class
self.address = _get_temp_domain_socket()
SysLogHandlerTest.setUp(self)
def tearDown(self):
SysLogHandlerTest.tearDown(self)
support.unlink(self.address)
@unittest.skipUnless(threading, 'Threading required for this test.')
class HTTPHandlerTest(BaseTest):
"""Test for HTTPHandler."""
def setUp(self):
"""Set up an HTTP server to receive log messages, and a HTTPHandler
pointing to that server's address and port."""
BaseTest.setUp(self)
self.handled = threading.Event()
def handle_request(self, request):
self.command = request.command
self.log_data = urlparse(request.path)
if self.command == 'POST':
try:
rlen = int(request.headers['Content-Length'])
self.post_data = request.rfile.read(rlen)
except:
self.post_data = None
request.send_response(200)
request.end_headers()
self.handled.set()
def test_output(self):
# The log message sent to the HTTPHandler is properly received.
logger = logging.getLogger("http")
root_logger = self.root_logger
root_logger.removeHandler(self.root_logger.handlers[0])
for secure in (False, True):
addr = ('localhost', 0)
if secure:
try:
import ssl
except ImportError:
sslctx = None
else:
here = os.path.dirname(__file__)
localhost_cert = os.path.join(here, "keycert.pem")
sslctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
sslctx.load_cert_chain(localhost_cert)
context = ssl.create_default_context(cafile=localhost_cert)
else:
sslctx = None
context = None
self.server = server = TestHTTPServer(addr, self.handle_request,
0.01, sslctx=sslctx)
server.start()
server.ready.wait()
host = 'localhost:%d' % server.server_port
secure_client = secure and sslctx
self.h_hdlr = logging.handlers.HTTPHandler(host, '/frob',
secure=secure_client,
context=context,
credentials=('foo', 'bar'))
self.log_data = None
root_logger.addHandler(self.h_hdlr)
for method in ('GET', 'POST'):
self.h_hdlr.method = method
self.handled.clear()
msg = "sp\xe4m"
logger.error(msg)
self.handled.wait()
self.assertEqual(self.log_data.path, '/frob')
self.assertEqual(self.command, method)
if method == 'GET':
d = parse_qs(self.log_data.query)
else:
d = parse_qs(self.post_data.decode('utf-8'))
self.assertEqual(d['name'], ['http'])
self.assertEqual(d['funcName'], ['test_output'])
self.assertEqual(d['msg'], [msg])
self.server.stop(2.0)
self.root_logger.removeHandler(self.h_hdlr)
self.h_hdlr.close()
class MemoryTest(BaseTest):
"""Test memory persistence of logger objects."""
def setUp(self):
"""Create a dict to remember potentially destroyed objects."""
BaseTest.setUp(self)
self._survivors = {}
def _watch_for_survival(self, *args):
"""Watch the given objects for survival, by creating weakrefs to
them."""
for obj in args:
key = id(obj), repr(obj)
self._survivors[key] = weakref.ref(obj)
def _assertTruesurvival(self):
"""Assert that all objects watched for survival have survived."""
# Trigger cycle breaking.
gc.collect()
dead = []
for (id_, repr_), ref in self._survivors.items():
if ref() is None:
dead.append(repr_)
if dead:
self.fail("%d objects should have survived "
"but have been destroyed: %s" % (len(dead), ", ".join(dead)))
def test_persistent_loggers(self):
# Logger objects are persistent and retain their configuration, even
# if visible references are destroyed.
self.root_logger.setLevel(logging.INFO)
foo = logging.getLogger("foo")
self._watch_for_survival(foo)
foo.setLevel(logging.DEBUG)
self.root_logger.debug(self.next_message())
foo.debug(self.next_message())
self.assert_log_lines([
('foo', 'DEBUG', '2'),
])
del foo
# foo has survived.
self._assertTruesurvival()
# foo has retained its settings.
bar = logging.getLogger("foo")
bar.debug(self.next_message())
self.assert_log_lines([
('foo', 'DEBUG', '2'),
('foo', 'DEBUG', '3'),
])
class EncodingTest(BaseTest):
def test_encoding_plain_file(self):
# In Python 2.x, a plain file object is treated as having no encoding.
log = logging.getLogger("test")
fd, fn = tempfile.mkstemp(".log", "test_logging-1-")
os.close(fd)
# the non-ascii data we write to the log.
data = "foo\x80"
try:
handler = logging.FileHandler(fn, encoding="utf-8")
log.addHandler(handler)
try:
# write non-ascii data to the log.
log.warning(data)
finally:
log.removeHandler(handler)
handler.close()
# check we wrote exactly those bytes, ignoring trailing \n etc
f = open(fn, encoding="utf-8")
try:
self.assertEqual(f.read().rstrip(), data)
finally:
f.close()
finally:
if os.path.isfile(fn):
os.remove(fn)
def test_encoding_cyrillic_unicode(self):
log = logging.getLogger("test")
#Get a message in Unicode: Do svidanya in Cyrillic (meaning goodbye)
message = '\u0434\u043e \u0441\u0432\u0438\u0434\u0430\u043d\u0438\u044f'
#Ensure it's written in a Cyrillic encoding
writer_class = codecs.getwriter('cp1251')
writer_class.encoding = 'cp1251'
stream = io.BytesIO()
writer = writer_class(stream, 'strict')
handler = logging.StreamHandler(writer)
log.addHandler(handler)
try:
log.warning(message)
finally:
log.removeHandler(handler)
handler.close()
# check we wrote exactly those bytes, ignoring trailing \n etc
s = stream.getvalue()
#Compare against what the data should be when encoded in CP-1251
self.assertEqual(s, b'\xe4\xee \xf1\xe2\xe8\xe4\xe0\xed\xe8\xff\n')
class WarningsTest(BaseTest):
def test_warnings(self):
with warnings.catch_warnings():
logging.captureWarnings(True)
self.addCleanup(logging.captureWarnings, False)
warnings.filterwarnings("always", category=UserWarning)
stream = io.StringIO()
h = logging.StreamHandler(stream)
logger = logging.getLogger("py.warnings")
logger.addHandler(h)
warnings.warn("I'm warning you...")
logger.removeHandler(h)
s = stream.getvalue()
h.close()
self.assertGreater(s.find("UserWarning: I'm warning you...\n"), 0)
#See if an explicit file uses the original implementation
a_file = io.StringIO()
warnings.showwarning("Explicit", UserWarning, "dummy.py", 42,
a_file, "Dummy line")
s = a_file.getvalue()
a_file.close()
self.assertEqual(s,
"dummy.py:42: UserWarning: Explicit\n Dummy line\n")
def test_warnings_no_handlers(self):
with warnings.catch_warnings():
logging.captureWarnings(True)
self.addCleanup(logging.captureWarnings, False)
# confirm our assumption: no loggers are set
logger = logging.getLogger("py.warnings")
self.assertEqual(logger.handlers, [])
warnings.showwarning("Explicit", UserWarning, "dummy.py", 42)
self.assertEqual(len(logger.handlers), 1)
self.assertIsInstance(logger.handlers[0], logging.NullHandler)
def formatFunc(format, datefmt=None):
return logging.Formatter(format, datefmt)
def handlerFunc():
return logging.StreamHandler()
class CustomHandler(logging.StreamHandler):
pass
class ConfigDictTest(BaseTest):
"""Reading logging config from a dictionary."""
expected_log_pat = r"^(\w+) \+\+ (\w+)$"
# config0 is a standard configuration.
config0 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'root' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
}
# config1 adds a little to the standard configuration.
config1 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config1a moves the handler to the root. Used with config8a
config1a = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
},
},
'root' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
}
# config2 has a subtle configuration error that should be reported
config2 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdbout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
#As config1 but with a misspelt level on a handler
config2a = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NTOSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
#As config1 but with a misspelt level on a logger
config2b = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WRANING',
},
}
# config3 has a less subtle configuration error
config3 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'misspelled_name',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config4 specifies a custom formatter class to be loaded
config4 = {
'version': 1,
'formatters': {
'form1' : {
'()' : __name__ + '.ExceptionFormatter',
'format' : '%(levelname)s:%(name)s:%(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'root' : {
'level' : 'NOTSET',
'handlers' : ['hand1'],
},
}
# As config4 but using an actual callable rather than a string
config4a = {
'version': 1,
'formatters': {
'form1' : {
'()' : ExceptionFormatter,
'format' : '%(levelname)s:%(name)s:%(message)s',
},
'form2' : {
'()' : __name__ + '.formatFunc',
'format' : '%(levelname)s:%(name)s:%(message)s',
},
'form3' : {
'()' : formatFunc,
'format' : '%(levelname)s:%(name)s:%(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
'hand2' : {
'()' : handlerFunc,
},
},
'root' : {
'level' : 'NOTSET',
'handlers' : ['hand1'],
},
}
# config5 specifies a custom handler class to be loaded
config5 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : __name__ + '.CustomHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config6 specifies a custom handler class to be loaded
# but has bad arguments
config6 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : __name__ + '.CustomHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
'9' : 'invalid parameter name',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
#config 7 does not define compiler.parser but defines compiler.lexer
#so compiler.parser should be disabled after applying it
config7 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.lexer' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config8 defines both compiler and compiler.lexer
# so compiler.parser should not be disabled (since
# compiler is defined)
config8 = {
'version': 1,
'disable_existing_loggers' : False,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
'compiler.lexer' : {
},
},
'root' : {
'level' : 'WARNING',
},
}
# config8a disables existing loggers
config8a = {
'version': 1,
'disable_existing_loggers' : True,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
'compiler.lexer' : {
},
},
'root' : {
'level' : 'WARNING',
},
}
config9 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'WARNING',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'NOTSET',
},
}
config9a = {
'version': 1,
'incremental' : True,
'handlers' : {
'hand1' : {
'level' : 'WARNING',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'INFO',
},
},
}
config9b = {
'version': 1,
'incremental' : True,
'handlers' : {
'hand1' : {
'level' : 'INFO',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'INFO',
},
},
}
#As config1 but with a filter added
config10 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'filters' : {
'filt1' : {
'name' : 'compiler.parser',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
'filters' : ['filt1'],
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'filters' : ['filt1'],
},
},
'root' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
}
#As config1 but using cfg:// references
config11 = {
'version': 1,
'true_formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handler_configs': {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'formatters' : 'cfg://true_formatters',
'handlers' : {
'hand1' : 'cfg://handler_configs[hand1]',
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
#As config11 but missing the version key
config12 = {
'true_formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handler_configs': {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'formatters' : 'cfg://true_formatters',
'handlers' : {
'hand1' : 'cfg://handler_configs[hand1]',
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
#As config11 but using an unsupported version
config13 = {
'version': 2,
'true_formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handler_configs': {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'formatters' : 'cfg://true_formatters',
'handlers' : {
'hand1' : 'cfg://handler_configs[hand1]',
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# As config0, but with properties
config14 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
'.': {
'foo': 'bar',
'terminator': '!\n',
}
},
},
'root' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
}
out_of_order = {
"version": 1,
"formatters": {
"mySimpleFormatter": {
"format": "%(asctime)s (%(name)s) %(levelname)s: %(message)s",
"style": "$"
}
},
"handlers": {
"fileGlobal": {
"class": "logging.StreamHandler",
"level": "DEBUG",
"formatter": "mySimpleFormatter"
},
"bufferGlobal": {
"class": "logging.handlers.MemoryHandler",
"capacity": 5,
"formatter": "mySimpleFormatter",
"target": "fileGlobal",
"level": "DEBUG"
}
},
"loggers": {
"mymodule": {
"level": "DEBUG",
"handlers": ["bufferGlobal"],
"propagate": "true"
}
}
}
def apply_config(self, conf):
logging.config.dictConfig(conf)
def test_config0_ok(self):
# A simple config which overrides the default settings.
with support.captured_stdout() as output:
self.apply_config(self.config0)
logger = logging.getLogger()
# Won't output anything
logger.info(self.next_message())
# Outputs a message
logger.error(self.next_message())
self.assert_log_lines([
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config1_ok(self, config=config1):
# A config defining a sub-parser as well.
with support.captured_stdout() as output:
self.apply_config(config)
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config2_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config2)
def test_config2a_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config2a)
def test_config2b_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config2b)
def test_config3_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config3)
def test_config4_ok(self):
# A config specifying a custom formatter class.
with support.captured_stdout() as output:
self.apply_config(self.config4)
#logger = logging.getLogger()
try:
raise RuntimeError()
except RuntimeError:
logging.exception("just testing")
sys.stdout.seek(0)
self.assertEqual(output.getvalue(),
"ERROR:root:just testing\nGot a [RuntimeError]\n")
# Original logger output is empty
self.assert_log_lines([])
def test_config4a_ok(self):
# A config specifying a custom formatter class.
with support.captured_stdout() as output:
self.apply_config(self.config4a)
#logger = logging.getLogger()
try:
raise RuntimeError()
except RuntimeError:
logging.exception("just testing")
sys.stdout.seek(0)
self.assertEqual(output.getvalue(),
"ERROR:root:just testing\nGot a [RuntimeError]\n")
# Original logger output is empty
self.assert_log_lines([])
def test_config5_ok(self):
self.test_config1_ok(config=self.config5)
def test_config6_failure(self):
self.assertRaises(Exception, self.apply_config, self.config6)
def test_config7_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config1)
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
with support.captured_stdout() as output:
self.apply_config(self.config7)
logger = logging.getLogger("compiler.parser")
self.assertTrue(logger.disabled)
logger = logging.getLogger("compiler.lexer")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '3'),
('ERROR', '4'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
#Same as test_config_7_ok but don't disable old loggers.
def test_config_8_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config1)
logger = logging.getLogger("compiler.parser")
# All will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
with support.captured_stdout() as output:
self.apply_config(self.config8)
logger = logging.getLogger("compiler.parser")
self.assertFalse(logger.disabled)
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
logger = logging.getLogger("compiler.lexer")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '3'),
('ERROR', '4'),
('INFO', '5'),
('ERROR', '6'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config_8a_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config1a)
logger = logging.getLogger("compiler.parser")
# See issue #11424. compiler-hyphenated sorts
# between compiler and compiler.xyz and this
# was preventing compiler.xyz from being included
# in the child loggers of compiler because of an
# overzealous loop termination condition.
hyphenated = logging.getLogger('compiler-hyphenated')
# All will output a message
logger.info(self.next_message())
logger.error(self.next_message())
hyphenated.critical(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
('CRITICAL', '3'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
with support.captured_stdout() as output:
self.apply_config(self.config8a)
logger = logging.getLogger("compiler.parser")
self.assertFalse(logger.disabled)
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
logger = logging.getLogger("compiler.lexer")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
# Will not appear
hyphenated.critical(self.next_message())
self.assert_log_lines([
('INFO', '4'),
('ERROR', '5'),
('INFO', '6'),
('ERROR', '7'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config_9_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config9)
logger = logging.getLogger("compiler.parser")
#Nothing will be output since both handler and logger are set to WARNING
logger.info(self.next_message())
self.assert_log_lines([], stream=output)
self.apply_config(self.config9a)
#Nothing will be output since both handler is still set to WARNING
logger.info(self.next_message())
self.assert_log_lines([], stream=output)
self.apply_config(self.config9b)
#Message should now be output
logger.info(self.next_message())
self.assert_log_lines([
('INFO', '3'),
], stream=output)
def test_config_10_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config10)
logger = logging.getLogger("compiler.parser")
logger.warning(self.next_message())
logger = logging.getLogger('compiler')
#Not output, because filtered
logger.warning(self.next_message())
logger = logging.getLogger('compiler.lexer')
#Not output, because filtered
logger.warning(self.next_message())
logger = logging.getLogger("compiler.parser.codegen")
#Output, as not filtered
logger.error(self.next_message())
self.assert_log_lines([
('WARNING', '1'),
('ERROR', '4'),
], stream=output)
def test_config11_ok(self):
self.test_config1_ok(self.config11)
def test_config12_failure(self):
self.assertRaises(Exception, self.apply_config, self.config12)
def test_config13_failure(self):
self.assertRaises(Exception, self.apply_config, self.config13)
def test_config14_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config14)
h = logging._handlers['hand1']
self.assertEqual(h.foo, 'bar')
self.assertEqual(h.terminator, '!\n')
logging.warning('Exclamation')
self.assertTrue(output.getvalue().endswith('Exclamation!\n'))
@unittest.skipUnless(threading, 'listen() needs threading to work')
def setup_via_listener(self, text, verify=None):
text = text.encode("utf-8")
# Ask for a randomly assigned port (by using port 0)
t = logging.config.listen(0, verify)
t.start()
t.ready.wait()
# Now get the port allocated
port = t.port
t.ready.clear()
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(2.0)
sock.connect(('localhost', port))
slen = struct.pack('>L', len(text))
s = slen + text
sentsofar = 0
left = len(s)
while left > 0:
sent = sock.send(s[sentsofar:])
sentsofar += sent
left -= sent
sock.close()
finally:
t.ready.wait(2.0)
logging.config.stopListening()
t.join(2.0)
@unittest.skipUnless(threading, 'Threading required for this test.')
def test_listen_config_10_ok(self):
with support.captured_stdout() as output:
self.setup_via_listener(json.dumps(self.config10))
logger = logging.getLogger("compiler.parser")
logger.warning(self.next_message())
logger = logging.getLogger('compiler')
#Not output, because filtered
logger.warning(self.next_message())
logger = logging.getLogger('compiler.lexer')
#Not output, because filtered
logger.warning(self.next_message())
logger = logging.getLogger("compiler.parser.codegen")
#Output, as not filtered
logger.error(self.next_message())
self.assert_log_lines([
('WARNING', '1'),
('ERROR', '4'),
], stream=output)
@unittest.skipUnless(threading, 'Threading required for this test.')
def test_listen_config_1_ok(self):
with support.captured_stdout() as output:
self.setup_via_listener(textwrap.dedent(ConfigFileTest.config1))
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
@unittest.skipUnless(threading, 'Threading required for this test.')
def test_listen_verify(self):
def verify_fail(stuff):
return None
def verify_reverse(stuff):
return stuff[::-1]
logger = logging.getLogger("compiler.parser")
to_send = textwrap.dedent(ConfigFileTest.config1)
# First, specify a verification function that will fail.
# We expect to see no output, since our configuration
# never took effect.
with support.captured_stdout() as output:
self.setup_via_listener(to_send, verify_fail)
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([], stream=output)
# Original logger output has the stuff we logged.
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], pat=r"^[\w.]+ -> (\w+): (\d+)$")
# Now, perform no verification. Our configuration
# should take effect.
with support.captured_stdout() as output:
self.setup_via_listener(to_send) # no verify callable specified
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '3'),
('ERROR', '4'),
], stream=output)
# Original logger output still has the stuff we logged before.
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], pat=r"^[\w.]+ -> (\w+): (\d+)$")
# Now, perform verification which transforms the bytes.
with support.captured_stdout() as output:
self.setup_via_listener(to_send[::-1], verify_reverse)
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '5'),
('ERROR', '6'),
], stream=output)
# Original logger output still has the stuff we logged before.
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], pat=r"^[\w.]+ -> (\w+): (\d+)$")
def test_out_of_order(self):
self.apply_config(self.out_of_order)
handler = logging.getLogger('mymodule').handlers[0]
self.assertIsInstance(handler.target, logging.Handler)
self.assertIsInstance(handler.formatter._style,
logging.StringTemplateStyle)
def test_baseconfig(self):
d = {
'atuple': (1, 2, 3),
'alist': ['a', 'b', 'c'],
'adict': {'d': 'e', 'f': 3 },
'nest1': ('g', ('h', 'i'), 'j'),
'nest2': ['k', ['l', 'm'], 'n'],
'nest3': ['o', 'cfg://alist', 'p'],
}
bc = logging.config.BaseConfigurator(d)
self.assertEqual(bc.convert('cfg://atuple[1]'), 2)
self.assertEqual(bc.convert('cfg://alist[1]'), 'b')
self.assertEqual(bc.convert('cfg://nest1[1][0]'), 'h')
self.assertEqual(bc.convert('cfg://nest2[1][1]'), 'm')
self.assertEqual(bc.convert('cfg://adict.d'), 'e')
self.assertEqual(bc.convert('cfg://adict[f]'), 3)
v = bc.convert('cfg://nest3')
self.assertEqual(v.pop(1), ['a', 'b', 'c'])
self.assertRaises(KeyError, bc.convert, 'cfg://nosuch')
self.assertRaises(ValueError, bc.convert, 'cfg://!')
self.assertRaises(KeyError, bc.convert, 'cfg://adict[2]')
class ManagerTest(BaseTest):
def test_manager_loggerclass(self):
logged = []
class MyLogger(logging.Logger):
def _log(self, level, msg, args, exc_info=None, extra=None):
logged.append(msg)
man = logging.Manager(None)
self.assertRaises(TypeError, man.setLoggerClass, int)
man.setLoggerClass(MyLogger)
logger = man.getLogger('test')
logger.warning('should appear in logged')
logging.warning('should not appear in logged')
self.assertEqual(logged, ['should appear in logged'])
def test_set_log_record_factory(self):
man = logging.Manager(None)
expected = object()
man.setLogRecordFactory(expected)
self.assertEqual(man.logRecordFactory, expected)
class ChildLoggerTest(BaseTest):
def test_child_loggers(self):
r = logging.getLogger()
l1 = logging.getLogger('abc')
l2 = logging.getLogger('def.ghi')
c1 = r.getChild('xyz')
c2 = r.getChild('uvw.xyz')
self.assertIs(c1, logging.getLogger('xyz'))
self.assertIs(c2, logging.getLogger('uvw.xyz'))
c1 = l1.getChild('def')
c2 = c1.getChild('ghi')
c3 = l1.getChild('def.ghi')
self.assertIs(c1, logging.getLogger('abc.def'))
self.assertIs(c2, logging.getLogger('abc.def.ghi'))
self.assertIs(c2, c3)
class DerivedLogRecord(logging.LogRecord):
pass
class LogRecordFactoryTest(BaseTest):
def setUp(self):
class CheckingFilter(logging.Filter):
def __init__(self, cls):
self.cls = cls
def filter(self, record):
t = type(record)
if t is not self.cls:
msg = 'Unexpected LogRecord type %s, expected %s' % (t,
self.cls)
raise TypeError(msg)
return True
BaseTest.setUp(self)
self.filter = CheckingFilter(DerivedLogRecord)
self.root_logger.addFilter(self.filter)
self.orig_factory = logging.getLogRecordFactory()
def tearDown(self):
self.root_logger.removeFilter(self.filter)
BaseTest.tearDown(self)
logging.setLogRecordFactory(self.orig_factory)
def test_logrecord_class(self):
self.assertRaises(TypeError, self.root_logger.warning,
self.next_message())
logging.setLogRecordFactory(DerivedLogRecord)
self.root_logger.error(self.next_message())
self.assert_log_lines([
('root', 'ERROR', '2'),
])
class QueueHandlerTest(BaseTest):
# Do not bother with a logger name group.
expected_log_pat = r"^[\w.]+ -> (\w+): (\d+)$"
def setUp(self):
BaseTest.setUp(self)
self.queue = queue.Queue(-1)
self.que_hdlr = logging.handlers.QueueHandler(self.queue)
self.que_logger = logging.getLogger('que')
self.que_logger.propagate = False
self.que_logger.setLevel(logging.WARNING)
self.que_logger.addHandler(self.que_hdlr)
def tearDown(self):
self.que_hdlr.close()
BaseTest.tearDown(self)
def test_queue_handler(self):
self.que_logger.debug(self.next_message())
self.assertRaises(queue.Empty, self.queue.get_nowait)
self.que_logger.info(self.next_message())
self.assertRaises(queue.Empty, self.queue.get_nowait)
msg = self.next_message()
self.que_logger.warning(msg)
data = self.queue.get_nowait()
self.assertTrue(isinstance(data, logging.LogRecord))
self.assertEqual(data.name, self.que_logger.name)
self.assertEqual((data.msg, data.args), (msg, None))
@unittest.skipUnless(hasattr(logging.handlers, 'QueueListener'),
'logging.handlers.QueueListener required for this test')
def test_queue_listener(self):
handler = support.TestHandler(support.Matcher())
listener = logging.handlers.QueueListener(self.queue, handler)
listener.start()
try:
self.que_logger.warning(self.next_message())
self.que_logger.error(self.next_message())
self.que_logger.critical(self.next_message())
finally:
listener.stop()
self.assertTrue(handler.matches(levelno=logging.WARNING, message='1'))
self.assertTrue(handler.matches(levelno=logging.ERROR, message='2'))
self.assertTrue(handler.matches(levelno=logging.CRITICAL, message='3'))
handler.close()
# Now test with respect_handler_level set
handler = support.TestHandler(support.Matcher())
handler.setLevel(logging.CRITICAL)
listener = logging.handlers.QueueListener(self.queue, handler,
respect_handler_level=True)
listener.start()
try:
self.que_logger.warning(self.next_message())
self.que_logger.error(self.next_message())
self.que_logger.critical(self.next_message())
finally:
listener.stop()
self.assertFalse(handler.matches(levelno=logging.WARNING, message='4'))
self.assertFalse(handler.matches(levelno=logging.ERROR, message='5'))
self.assertTrue(handler.matches(levelno=logging.CRITICAL, message='6'))
if hasattr(logging.handlers, 'QueueListener'):
import multiprocessing
from unittest.mock import patch
class QueueListenerTest(BaseTest):
"""
Tests based on patch submitted for issue #27930. Ensure that
QueueListener handles all log messages.
"""
repeat = 20
@staticmethod
def setup_and_log(log_queue, ident):
"""
Creates a logger with a QueueHandler that logs to a queue read by a
QueueListener. Starts the listener, logs five messages, and stops
the listener.
"""
logger = logging.getLogger('test_logger_with_id_%s' % ident)
logger.setLevel(logging.DEBUG)
handler = logging.handlers.QueueHandler(log_queue)
logger.addHandler(handler)
listener = logging.handlers.QueueListener(log_queue)
listener.start()
logger.info('one')
logger.info('two')
logger.info('three')
logger.info('four')
logger.info('five')
listener.stop()
logger.removeHandler(handler)
handler.close()
@patch.object(logging.handlers.QueueListener, 'handle')
def test_handle_called_with_queue_queue(self, mock_handle):
for i in range(self.repeat):
log_queue = queue.Queue()
self.setup_and_log(log_queue, '%s_%s' % (self.id(), i))
self.assertEqual(mock_handle.call_count, 5 * self.repeat,
'correct number of handled log messages')
@support.requires_multiprocessing_queue
@patch.object(logging.handlers.QueueListener, 'handle')
def test_handle_called_with_mp_queue(self, mock_handle):
for i in range(self.repeat):
log_queue = multiprocessing.Queue()
self.setup_and_log(log_queue, '%s_%s' % (self.id(), i))
self.assertEqual(mock_handle.call_count, 5 * self.repeat,
'correct number of handled log messages')
@staticmethod
def get_all_from_queue(log_queue):
try:
while True:
yield log_queue.get_nowait()
except queue.Empty:
return []
@support.requires_multiprocessing_queue
def test_no_messages_in_queue_after_stop(self):
"""
Five messages are logged then the QueueListener is stopped. This
test then gets everything off the queue. Failure of this test
indicates that messages were not registered on the queue until
_after_ the QueueListener stopped.
"""
for i in range(self.repeat):
queue = multiprocessing.Queue()
self.setup_and_log(queue, '%s_%s' %(self.id(), i))
# time.sleep(1)
items = list(self.get_all_from_queue(queue))
expected = [[], [logging.handlers.QueueListener._sentinel]]
self.assertIn(items, expected,
'Found unexpected messages in queue: %s' % (
[m.msg if isinstance(m, logging.LogRecord)
else m for m in items]))
ZERO = datetime.timedelta(0)
class UTC(datetime.tzinfo):
def utcoffset(self, dt):
return ZERO
dst = utcoffset
def tzname(self, dt):
return 'UTC'
utc = UTC()
class FormatterTest(unittest.TestCase):
def setUp(self):
self.common = {
'name': 'formatter.test',
'level': logging.DEBUG,
'pathname': os.path.join('path', 'to', 'dummy.ext'),
'lineno': 42,
'exc_info': None,
'func': None,
'msg': 'Message with %d %s',
'args': (2, 'placeholders'),
}
self.variants = {
}
def get_record(self, name=None):
result = dict(self.common)
if name is not None:
result.update(self.variants[name])
return logging.makeLogRecord(result)
def test_percent(self):
# Test %-formatting
r = self.get_record()
f = logging.Formatter('${%(message)s}')
self.assertEqual(f.format(r), '${Message with 2 placeholders}')
f = logging.Formatter('%(random)s')
self.assertRaises(KeyError, f.format, r)
self.assertFalse(f.usesTime())
f = logging.Formatter('%(asctime)s')
self.assertTrue(f.usesTime())
f = logging.Formatter('%(asctime)-15s')
self.assertTrue(f.usesTime())
f = logging.Formatter('asctime')
self.assertFalse(f.usesTime())
def test_braces(self):
# Test {}-formatting
r = self.get_record()
f = logging.Formatter('$%{message}%$', style='{')
self.assertEqual(f.format(r), '$%Message with 2 placeholders%$')
f = logging.Formatter('{random}', style='{')
self.assertRaises(KeyError, f.format, r)
self.assertFalse(f.usesTime())
f = logging.Formatter('{asctime}', style='{')
self.assertTrue(f.usesTime())
f = logging.Formatter('{asctime!s:15}', style='{')
self.assertTrue(f.usesTime())
f = logging.Formatter('{asctime:15}', style='{')
self.assertTrue(f.usesTime())
f = logging.Formatter('asctime', style='{')
self.assertFalse(f.usesTime())
def test_dollars(self):
# Test $-formatting
r = self.get_record()
f = logging.Formatter('$message', style='$')
self.assertEqual(f.format(r), 'Message with 2 placeholders')
f = logging.Formatter('$$%${message}%$$', style='$')
self.assertEqual(f.format(r), '$%Message with 2 placeholders%$')
f = logging.Formatter('${random}', style='$')
self.assertRaises(KeyError, f.format, r)
self.assertFalse(f.usesTime())
f = logging.Formatter('${asctime}', style='$')
self.assertTrue(f.usesTime())
f = logging.Formatter('${asctime', style='$')
self.assertFalse(f.usesTime())
f = logging.Formatter('$asctime', style='$')
self.assertTrue(f.usesTime())
f = logging.Formatter('asctime', style='$')
self.assertFalse(f.usesTime())
def test_invalid_style(self):
self.assertRaises(ValueError, logging.Formatter, None, None, 'x')
def test_time(self):
r = self.get_record()
dt = datetime.datetime(1993, 4, 21, 8, 3, 0, 0, utc)
# We use None to indicate we want the local timezone
# We're essentially converting a UTC time to local time
r.created = time.mktime(dt.astimezone(None).timetuple())
r.msecs = 123
f = logging.Formatter('%(asctime)s %(message)s')
f.converter = time.gmtime
self.assertEqual(f.formatTime(r), '1993-04-21 08:03:00,123')
self.assertEqual(f.formatTime(r, '%Y:%d'), '1993:21')
f.format(r)
self.assertEqual(r.asctime, '1993-04-21 08:03:00,123')
class TestBufferingFormatter(logging.BufferingFormatter):
def formatHeader(self, records):
return '[(%d)' % len(records)
def formatFooter(self, records):
return '(%d)]' % len(records)
class BufferingFormatterTest(unittest.TestCase):
def setUp(self):
self.records = [
logging.makeLogRecord({'msg': 'one'}),
logging.makeLogRecord({'msg': 'two'}),
]
def test_default(self):
f = logging.BufferingFormatter()
self.assertEqual('', f.format([]))
self.assertEqual('onetwo', f.format(self.records))
def test_custom(self):
f = TestBufferingFormatter()
self.assertEqual('[(2)onetwo(2)]', f.format(self.records))
lf = logging.Formatter('<%(message)s>')
f = TestBufferingFormatter(lf)
self.assertEqual('[(2)<one><two>(2)]', f.format(self.records))
class ExceptionTest(BaseTest):
def test_formatting(self):
r = self.root_logger
h = RecordingHandler()
r.addHandler(h)
try:
raise RuntimeError('deliberate mistake')
except:
logging.exception('failed', stack_info=True)
r.removeHandler(h)
h.close()
r = h.records[0]
self.assertTrue(r.exc_text.startswith('Traceback (most recent '
'call last):\n'))
self.assertTrue(r.exc_text.endswith('\nRuntimeError: '
'deliberate mistake'))
self.assertTrue(r.stack_info.startswith('Stack (most recent '
'call last):\n'))
self.assertTrue(r.stack_info.endswith('logging.exception(\'failed\', '
'stack_info=True)'))
class LastResortTest(BaseTest):
def test_last_resort(self):
# Test the last resort handler
root = self.root_logger
root.removeHandler(self.root_hdlr)
old_lastresort = logging.lastResort
old_raise_exceptions = logging.raiseExceptions
try:
with support.captured_stderr() as stderr:
root.debug('This should not appear')
self.assertEqual(stderr.getvalue(), '')
root.warning('Final chance!')
self.assertEqual(stderr.getvalue(), 'Final chance!\n')
# No handlers and no last resort, so 'No handlers' message
logging.lastResort = None
with support.captured_stderr() as stderr:
root.warning('Final chance!')
msg = 'No handlers could be found for logger "root"\n'
self.assertEqual(stderr.getvalue(), msg)
# 'No handlers' message only printed once
with support.captured_stderr() as stderr:
root.warning('Final chance!')
self.assertEqual(stderr.getvalue(), '')
# If raiseExceptions is False, no message is printed
root.manager.emittedNoHandlerWarning = False
logging.raiseExceptions = False
with support.captured_stderr() as stderr:
root.warning('Final chance!')
self.assertEqual(stderr.getvalue(), '')
finally:
root.addHandler(self.root_hdlr)
logging.lastResort = old_lastresort
logging.raiseExceptions = old_raise_exceptions
class FakeHandler:
def __init__(self, identifier, called):
for method in ('acquire', 'flush', 'close', 'release'):
setattr(self, method, self.record_call(identifier, method, called))
def record_call(self, identifier, method_name, called):
def inner():
called.append('{} - {}'.format(identifier, method_name))
return inner
class RecordingHandler(logging.NullHandler):
def __init__(self, *args, **kwargs):
super(RecordingHandler, self).__init__(*args, **kwargs)
self.records = []
def handle(self, record):
"""Keep track of all the emitted records."""
self.records.append(record)
class ShutdownTest(BaseTest):
"""Test suite for the shutdown method."""
def setUp(self):
super(ShutdownTest, self).setUp()
self.called = []
raise_exceptions = logging.raiseExceptions
self.addCleanup(setattr, logging, 'raiseExceptions', raise_exceptions)
def raise_error(self, error):
def inner():
raise error()
return inner
def test_no_failure(self):
# create some fake handlers
handler0 = FakeHandler(0, self.called)
handler1 = FakeHandler(1, self.called)
handler2 = FakeHandler(2, self.called)
# create live weakref to those handlers
handlers = map(logging.weakref.ref, [handler0, handler1, handler2])
logging.shutdown(handlerList=list(handlers))
expected = ['2 - acquire', '2 - flush', '2 - close', '2 - release',
'1 - acquire', '1 - flush', '1 - close', '1 - release',
'0 - acquire', '0 - flush', '0 - close', '0 - release']
self.assertEqual(expected, self.called)
def _test_with_failure_in_method(self, method, error):
handler = FakeHandler(0, self.called)
setattr(handler, method, self.raise_error(error))
handlers = [logging.weakref.ref(handler)]
logging.shutdown(handlerList=list(handlers))
self.assertEqual('0 - release', self.called[-1])
def test_with_ioerror_in_acquire(self):
self._test_with_failure_in_method('acquire', OSError)
def test_with_ioerror_in_flush(self):
self._test_with_failure_in_method('flush', OSError)
def test_with_ioerror_in_close(self):
self._test_with_failure_in_method('close', OSError)
def test_with_valueerror_in_acquire(self):
self._test_with_failure_in_method('acquire', ValueError)
def test_with_valueerror_in_flush(self):
self._test_with_failure_in_method('flush', ValueError)
def test_with_valueerror_in_close(self):
self._test_with_failure_in_method('close', ValueError)
def test_with_other_error_in_acquire_without_raise(self):
logging.raiseExceptions = False
self._test_with_failure_in_method('acquire', IndexError)
def test_with_other_error_in_flush_without_raise(self):
logging.raiseExceptions = False
self._test_with_failure_in_method('flush', IndexError)
def test_with_other_error_in_close_without_raise(self):
logging.raiseExceptions = False
self._test_with_failure_in_method('close', IndexError)
def test_with_other_error_in_acquire_with_raise(self):
logging.raiseExceptions = True
self.assertRaises(IndexError, self._test_with_failure_in_method,
'acquire', IndexError)
def test_with_other_error_in_flush_with_raise(self):
logging.raiseExceptions = True
self.assertRaises(IndexError, self._test_with_failure_in_method,
'flush', IndexError)
def test_with_other_error_in_close_with_raise(self):
logging.raiseExceptions = True
self.assertRaises(IndexError, self._test_with_failure_in_method,
'close', IndexError)
class ModuleLevelMiscTest(BaseTest):
"""Test suite for some module level methods."""
def test_disable(self):
old_disable = logging.root.manager.disable
# confirm our assumptions are correct
self.assertEqual(old_disable, 0)
self.addCleanup(logging.disable, old_disable)
logging.disable(83)
self.assertEqual(logging.root.manager.disable, 83)
def _test_log(self, method, level=None):
called = []
support.patch(self, logging, 'basicConfig',
lambda *a, **kw: called.append((a, kw)))
recording = RecordingHandler()
logging.root.addHandler(recording)
log_method = getattr(logging, method)
if level is not None:
log_method(level, "test me: %r", recording)
else:
log_method("test me: %r", recording)
self.assertEqual(len(recording.records), 1)
record = recording.records[0]
self.assertEqual(record.getMessage(), "test me: %r" % recording)
expected_level = level if level is not None else getattr(logging, method.upper())
self.assertEqual(record.levelno, expected_level)
# basicConfig was not called!
self.assertEqual(called, [])
def test_log(self):
self._test_log('log', logging.ERROR)
def test_debug(self):
self._test_log('debug')
def test_info(self):
self._test_log('info')
def test_warning(self):
self._test_log('warning')
def test_error(self):
self._test_log('error')
def test_critical(self):
self._test_log('critical')
def test_set_logger_class(self):
self.assertRaises(TypeError, logging.setLoggerClass, object)
class MyLogger(logging.Logger):
pass
logging.setLoggerClass(MyLogger)
self.assertEqual(logging.getLoggerClass(), MyLogger)
logging.setLoggerClass(logging.Logger)
self.assertEqual(logging.getLoggerClass(), logging.Logger)
@support.requires_type_collecting
def test_logging_at_shutdown(self):
# Issue #20037
code = """if 1:
import logging
class A:
def __del__(self):
try:
raise ValueError("some error")
except Exception:
logging.exception("exception in __del__")
a = A()"""
rc, out, err = assert_python_ok("-c", code)
err = err.decode()
self.assertIn("exception in __del__", err)
self.assertIn("ValueError: some error", err)
class LogRecordTest(BaseTest):
def test_str_rep(self):
r = logging.makeLogRecord({})
s = str(r)
self.assertTrue(s.startswith('<LogRecord: '))
self.assertTrue(s.endswith('>'))
def test_dict_arg(self):
h = RecordingHandler()
r = logging.getLogger()
r.addHandler(h)
d = {'less' : 'more' }
logging.warning('less is %(less)s', d)
self.assertIs(h.records[0].args, d)
self.assertEqual(h.records[0].message, 'less is more')
r.removeHandler(h)
h.close()
def test_multiprocessing(self):
r = logging.makeLogRecord({})
self.assertEqual(r.processName, 'MainProcess')
try:
import multiprocessing as mp
r = logging.makeLogRecord({})
self.assertEqual(r.processName, mp.current_process().name)
except ImportError:
pass
def test_optional(self):
r = logging.makeLogRecord({})
NOT_NONE = self.assertIsNotNone
if threading:
NOT_NONE(r.thread)
NOT_NONE(r.threadName)
NOT_NONE(r.process)
NOT_NONE(r.processName)
log_threads = logging.logThreads
log_processes = logging.logProcesses
log_multiprocessing = logging.logMultiprocessing
try:
logging.logThreads = False
logging.logProcesses = False
logging.logMultiprocessing = False
r = logging.makeLogRecord({})
NONE = self.assertIsNone
NONE(r.thread)
NONE(r.threadName)
NONE(r.process)
NONE(r.processName)
finally:
logging.logThreads = log_threads
logging.logProcesses = log_processes
logging.logMultiprocessing = log_multiprocessing
class BasicConfigTest(unittest.TestCase):
"""Test suite for logging.basicConfig."""
def setUp(self):
super(BasicConfigTest, self).setUp()
self.handlers = logging.root.handlers
self.saved_handlers = logging._handlers.copy()
self.saved_handler_list = logging._handlerList[:]
self.original_logging_level = logging.root.level
self.addCleanup(self.cleanup)
logging.root.handlers = []
def tearDown(self):
for h in logging.root.handlers[:]:
logging.root.removeHandler(h)
h.close()
super(BasicConfigTest, self).tearDown()
def cleanup(self):
setattr(logging.root, 'handlers', self.handlers)
logging._handlers.clear()
logging._handlers.update(self.saved_handlers)
logging._handlerList[:] = self.saved_handler_list
logging.root.level = self.original_logging_level
def test_no_kwargs(self):
logging.basicConfig()
# handler defaults to a StreamHandler to sys.stderr
self.assertEqual(len(logging.root.handlers), 1)
handler = logging.root.handlers[0]
self.assertIsInstance(handler, logging.StreamHandler)
self.assertEqual(handler.stream, sys.stderr)
formatter = handler.formatter
# format defaults to logging.BASIC_FORMAT
self.assertEqual(formatter._style._fmt, logging.BASIC_FORMAT)
# datefmt defaults to None
self.assertIsNone(formatter.datefmt)
# style defaults to %
self.assertIsInstance(formatter._style, logging.PercentStyle)
# level is not explicitly set
self.assertEqual(logging.root.level, self.original_logging_level)
def test_strformatstyle(self):
with support.captured_stdout() as output:
logging.basicConfig(stream=sys.stdout, style="{")
logging.error("Log an error")
sys.stdout.seek(0)
self.assertEqual(output.getvalue().strip(),
"ERROR:root:Log an error")
def test_stringtemplatestyle(self):
with support.captured_stdout() as output:
logging.basicConfig(stream=sys.stdout, style="$")
logging.error("Log an error")
sys.stdout.seek(0)
self.assertEqual(output.getvalue().strip(),
"ERROR:root:Log an error")
def test_filename(self):
def cleanup(h1, h2, fn):
h1.close()
h2.close()
os.remove(fn)
logging.basicConfig(filename='test.log')
self.assertEqual(len(logging.root.handlers), 1)
handler = logging.root.handlers[0]
self.assertIsInstance(handler, logging.FileHandler)
expected = logging.FileHandler('test.log', 'a')
self.assertEqual(handler.stream.mode, expected.stream.mode)
self.assertEqual(handler.stream.name, expected.stream.name)
self.addCleanup(cleanup, handler, expected, 'test.log')
def test_filemode(self):
def cleanup(h1, h2, fn):
h1.close()
h2.close()
os.remove(fn)
logging.basicConfig(filename='test.log', filemode='wb')
handler = logging.root.handlers[0]
expected = logging.FileHandler('test.log', 'wb')
self.assertEqual(handler.stream.mode, expected.stream.mode)
self.addCleanup(cleanup, handler, expected, 'test.log')
def test_stream(self):
stream = io.StringIO()
self.addCleanup(stream.close)
logging.basicConfig(stream=stream)
self.assertEqual(len(logging.root.handlers), 1)
handler = logging.root.handlers[0]
self.assertIsInstance(handler, logging.StreamHandler)
self.assertEqual(handler.stream, stream)
def test_format(self):
logging.basicConfig(format='foo')
formatter = logging.root.handlers[0].formatter
self.assertEqual(formatter._style._fmt, 'foo')
def test_datefmt(self):
logging.basicConfig(datefmt='bar')
formatter = logging.root.handlers[0].formatter
self.assertEqual(formatter.datefmt, 'bar')
def test_style(self):
logging.basicConfig(style='$')
formatter = logging.root.handlers[0].formatter
self.assertIsInstance(formatter._style, logging.StringTemplateStyle)
def test_level(self):
old_level = logging.root.level
self.addCleanup(logging.root.setLevel, old_level)
logging.basicConfig(level=57)
self.assertEqual(logging.root.level, 57)
# Test that second call has no effect
logging.basicConfig(level=58)
self.assertEqual(logging.root.level, 57)
def test_incompatible(self):
assertRaises = self.assertRaises
handlers = [logging.StreamHandler()]
stream = sys.stderr
assertRaises(ValueError, logging.basicConfig, filename='test.log',
stream=stream)
assertRaises(ValueError, logging.basicConfig, filename='test.log',
handlers=handlers)
assertRaises(ValueError, logging.basicConfig, stream=stream,
handlers=handlers)
# Issue 23207: test for invalid kwargs
assertRaises(ValueError, logging.basicConfig, loglevel=logging.INFO)
# Should pop both filename and filemode even if filename is None
logging.basicConfig(filename=None, filemode='a')
def test_handlers(self):
handlers = [
logging.StreamHandler(),
logging.StreamHandler(sys.stdout),
logging.StreamHandler(),
]
f = logging.Formatter()
handlers[2].setFormatter(f)
logging.basicConfig(handlers=handlers)
self.assertIs(handlers[0], logging.root.handlers[0])
self.assertIs(handlers[1], logging.root.handlers[1])
self.assertIs(handlers[2], logging.root.handlers[2])
self.assertIsNotNone(handlers[0].formatter)
self.assertIsNotNone(handlers[1].formatter)
self.assertIs(handlers[2].formatter, f)
self.assertIs(handlers[0].formatter, handlers[1].formatter)
def _test_log(self, method, level=None):
# logging.root has no handlers so basicConfig should be called
called = []
old_basic_config = logging.basicConfig
def my_basic_config(*a, **kw):
old_basic_config()
old_level = logging.root.level
logging.root.setLevel(100) # avoid having messages in stderr
self.addCleanup(logging.root.setLevel, old_level)
called.append((a, kw))
support.patch(self, logging, 'basicConfig', my_basic_config)
log_method = getattr(logging, method)
if level is not None:
log_method(level, "test me")
else:
log_method("test me")
# basicConfig was called with no arguments
self.assertEqual(called, [((), {})])
def test_log(self):
self._test_log('log', logging.WARNING)
def test_debug(self):
self._test_log('debug')
def test_info(self):
self._test_log('info')
def test_warning(self):
self._test_log('warning')
def test_error(self):
self._test_log('error')
def test_critical(self):
self._test_log('critical')
class LoggerAdapterTest(unittest.TestCase):
def setUp(self):
super(LoggerAdapterTest, self).setUp()
old_handler_list = logging._handlerList[:]
self.recording = RecordingHandler()
self.logger = logging.root
self.logger.addHandler(self.recording)
self.addCleanup(self.logger.removeHandler, self.recording)
self.addCleanup(self.recording.close)
def cleanup():
logging._handlerList[:] = old_handler_list
self.addCleanup(cleanup)
self.addCleanup(logging.shutdown)
self.adapter = logging.LoggerAdapter(logger=self.logger, extra=None)
def test_exception(self):
msg = 'testing exception: %r'
exc = None
try:
1 / 0
except ZeroDivisionError as e:
exc = e
self.adapter.exception(msg, self.recording)
self.assertEqual(len(self.recording.records), 1)
record = self.recording.records[0]
self.assertEqual(record.levelno, logging.ERROR)
self.assertEqual(record.msg, msg)
self.assertEqual(record.args, (self.recording,))
self.assertEqual(record.exc_info,
(exc.__class__, exc, exc.__traceback__))
def test_exception_excinfo(self):
try:
1 / 0
except ZeroDivisionError as e:
exc = e
self.adapter.exception('exc_info test', exc_info=exc)
self.assertEqual(len(self.recording.records), 1)
record = self.recording.records[0]
self.assertEqual(record.exc_info,
(exc.__class__, exc, exc.__traceback__))
def test_critical(self):
msg = 'critical test! %r'
self.adapter.critical(msg, self.recording)
self.assertEqual(len(self.recording.records), 1)
record = self.recording.records[0]
self.assertEqual(record.levelno, logging.CRITICAL)
self.assertEqual(record.msg, msg)
self.assertEqual(record.args, (self.recording,))
def test_is_enabled_for(self):
old_disable = self.adapter.logger.manager.disable
self.adapter.logger.manager.disable = 33
self.addCleanup(setattr, self.adapter.logger.manager, 'disable',
old_disable)
self.assertFalse(self.adapter.isEnabledFor(32))
def test_has_handlers(self):
self.assertTrue(self.adapter.hasHandlers())
for handler in self.logger.handlers:
self.logger.removeHandler(handler)
self.assertFalse(self.logger.hasHandlers())
self.assertFalse(self.adapter.hasHandlers())
class LoggerTest(BaseTest):
def setUp(self):
super(LoggerTest, self).setUp()
self.recording = RecordingHandler()
self.logger = logging.Logger(name='blah')
self.logger.addHandler(self.recording)
self.addCleanup(self.logger.removeHandler, self.recording)
self.addCleanup(self.recording.close)
self.addCleanup(logging.shutdown)
def test_set_invalid_level(self):
self.assertRaises(TypeError, self.logger.setLevel, object())
def test_exception(self):
msg = 'testing exception: %r'
exc = None
try:
1 / 0
except ZeroDivisionError as e:
exc = e
self.logger.exception(msg, self.recording)
self.assertEqual(len(self.recording.records), 1)
record = self.recording.records[0]
self.assertEqual(record.levelno, logging.ERROR)
self.assertEqual(record.msg, msg)
self.assertEqual(record.args, (self.recording,))
self.assertEqual(record.exc_info,
(exc.__class__, exc, exc.__traceback__))
def test_log_invalid_level_with_raise(self):
with support.swap_attr(logging, 'raiseExceptions', True):
self.assertRaises(TypeError, self.logger.log, '10', 'test message')
def test_log_invalid_level_no_raise(self):
with support.swap_attr(logging, 'raiseExceptions', False):
self.logger.log('10', 'test message') # no exception happens
def test_find_caller_with_stack_info(self):
called = []
support.patch(self, logging.traceback, 'print_stack',
lambda f, file: called.append(file.getvalue()))
self.logger.findCaller(stack_info=True)
self.assertEqual(len(called), 1)
self.assertEqual('Stack (most recent call last):\n', called[0])
def test_make_record_with_extra_overwrite(self):
name = 'my record'
level = 13
fn = lno = msg = args = exc_info = func = sinfo = None
rv = logging._logRecordFactory(name, level, fn, lno, msg, args,
exc_info, func, sinfo)
for key in ('message', 'asctime') + tuple(rv.__dict__.keys()):
extra = {key: 'some value'}
self.assertRaises(KeyError, self.logger.makeRecord, name, level,
fn, lno, msg, args, exc_info,
extra=extra, sinfo=sinfo)
def test_make_record_with_extra_no_overwrite(self):
name = 'my record'
level = 13
fn = lno = msg = args = exc_info = func = sinfo = None
extra = {'valid_key': 'some value'}
result = self.logger.makeRecord(name, level, fn, lno, msg, args,
exc_info, extra=extra, sinfo=sinfo)
self.assertIn('valid_key', result.__dict__)
def test_has_handlers(self):
self.assertTrue(self.logger.hasHandlers())
for handler in self.logger.handlers:
self.logger.removeHandler(handler)
self.assertFalse(self.logger.hasHandlers())
def test_has_handlers_no_propagate(self):
child_logger = logging.getLogger('blah.child')
child_logger.propagate = False
self.assertFalse(child_logger.hasHandlers())
def test_is_enabled_for(self):
old_disable = self.logger.manager.disable
self.logger.manager.disable = 23
self.addCleanup(setattr, self.logger.manager, 'disable', old_disable)
self.assertFalse(self.logger.isEnabledFor(22))
def test_root_logger_aliases(self):
root = logging.getLogger()
self.assertIs(root, logging.root)
self.assertIs(root, logging.getLogger(None))
self.assertIs(root, logging.getLogger(''))
self.assertIs(root, logging.getLogger('foo').root)
self.assertIs(root, logging.getLogger('foo.bar').root)
self.assertIs(root, logging.getLogger('foo').parent)
self.assertIsNot(root, logging.getLogger('\0'))
self.assertIsNot(root, logging.getLogger('foo.bar').parent)
def test_invalid_names(self):
self.assertRaises(TypeError, logging.getLogger, any)
self.assertRaises(TypeError, logging.getLogger, b'foo')
class BaseFileTest(BaseTest):
"Base class for handler tests that write log files"
def setUp(self):
BaseTest.setUp(self)
fd, self.fn = tempfile.mkstemp(".log", "test_logging-2-")
os.close(fd)
self.rmfiles = []
def tearDown(self):
for fn in self.rmfiles:
os.unlink(fn)
if os.path.exists(self.fn):
os.unlink(self.fn)
BaseTest.tearDown(self)
def assertLogFile(self, filename):
"Assert a log file is there and register it for deletion"
self.assertTrue(os.path.exists(filename),
msg="Log file %r does not exist" % filename)
self.rmfiles.append(filename)
class FileHandlerTest(BaseFileTest):
def test_delay(self):
os.unlink(self.fn)
fh = logging.FileHandler(self.fn, delay=True)
self.assertIsNone(fh.stream)
self.assertFalse(os.path.exists(self.fn))
fh.handle(logging.makeLogRecord({}))
self.assertIsNotNone(fh.stream)
self.assertTrue(os.path.exists(self.fn))
fh.close()
class RotatingFileHandlerTest(BaseFileTest):
def next_rec(self):
return logging.LogRecord('n', logging.DEBUG, 'p', 1,
self.next_message(), None, None, None)
def test_should_not_rollover(self):
# If maxbytes is zero rollover never occurs
rh = logging.handlers.RotatingFileHandler(self.fn, maxBytes=0)
self.assertFalse(rh.shouldRollover(None))
rh.close()
def test_should_rollover(self):
rh = logging.handlers.RotatingFileHandler(self.fn, maxBytes=1)
self.assertTrue(rh.shouldRollover(self.next_rec()))
rh.close()
def test_file_created(self):
# checks that the file is created and assumes it was created
# by us
rh = logging.handlers.RotatingFileHandler(self.fn)
rh.emit(self.next_rec())
self.assertLogFile(self.fn)
rh.close()
def test_rollover_filenames(self):
def namer(name):
return name + ".test"
rh = logging.handlers.RotatingFileHandler(
self.fn, backupCount=2, maxBytes=1)
rh.namer = namer
rh.emit(self.next_rec())
self.assertLogFile(self.fn)
rh.emit(self.next_rec())
self.assertLogFile(namer(self.fn + ".1"))
rh.emit(self.next_rec())
self.assertLogFile(namer(self.fn + ".2"))
self.assertFalse(os.path.exists(namer(self.fn + ".3")))
rh.close()
@support.requires_zlib
def test_rotator(self):
def namer(name):
return name + ".gz"
def rotator(source, dest):
with open(source, "rb") as sf:
data = sf.read()
compressed = zlib.compress(data, 9)
with open(dest, "wb") as df:
df.write(compressed)
os.remove(source)
rh = logging.handlers.RotatingFileHandler(
self.fn, backupCount=2, maxBytes=1)
rh.rotator = rotator
rh.namer = namer
m1 = self.next_rec()
rh.emit(m1)
self.assertLogFile(self.fn)
m2 = self.next_rec()
rh.emit(m2)
fn = namer(self.fn + ".1")
self.assertLogFile(fn)
newline = os.linesep
with open(fn, "rb") as f:
compressed = f.read()
data = zlib.decompress(compressed)
self.assertEqual(data.decode("ascii"), m1.msg + newline)
rh.emit(self.next_rec())
fn = namer(self.fn + ".2")
self.assertLogFile(fn)
with open(fn, "rb") as f:
compressed = f.read()
data = zlib.decompress(compressed)
self.assertEqual(data.decode("ascii"), m1.msg + newline)
rh.emit(self.next_rec())
fn = namer(self.fn + ".2")
with open(fn, "rb") as f:
compressed = f.read()
data = zlib.decompress(compressed)
self.assertEqual(data.decode("ascii"), m2.msg + newline)
self.assertFalse(os.path.exists(namer(self.fn + ".3")))
rh.close()
class TimedRotatingFileHandlerTest(BaseFileTest):
# other test methods added below
def test_rollover(self):
fh = logging.handlers.TimedRotatingFileHandler(self.fn, 'S',
backupCount=1)
fmt = logging.Formatter('%(asctime)s %(message)s')
fh.setFormatter(fmt)
r1 = logging.makeLogRecord({'msg': 'testing - initial'})
fh.emit(r1)
self.assertLogFile(self.fn)
time.sleep(1.1) # a little over a second ...
r2 = logging.makeLogRecord({'msg': 'testing - after delay'})
fh.emit(r2)
fh.close()
# At this point, we should have a recent rotated file which we
# can test for the existence of. However, in practice, on some
# machines which run really slowly, we don't know how far back
# in time to go to look for the log file. So, we go back a fair
# bit, and stop as soon as we see a rotated file. In theory this
# could of course still fail, but the chances are lower.
found = False
now = datetime.datetime.now()
GO_BACK = 5 * 60 # seconds
for secs in range(GO_BACK):
prev = now - datetime.timedelta(seconds=secs)
fn = self.fn + prev.strftime(".%Y-%m-%d_%H-%M-%S")
found = os.path.exists(fn)
if found:
self.rmfiles.append(fn)
break
msg = 'No rotated files found, went back %d seconds' % GO_BACK
if not found:
#print additional diagnostics
dn, fn = os.path.split(self.fn)
files = [f for f in os.listdir(dn) if f.startswith(fn)]
print('Test time: %s' % now.strftime("%Y-%m-%d %H-%M-%S"), file=sys.stderr)
print('The only matching files are: %s' % files, file=sys.stderr)
for f in files:
print('Contents of %s:' % f)
path = os.path.join(dn, f)
with open(path, 'r') as tf:
print(tf.read())
self.assertTrue(found, msg=msg)
def test_invalid(self):
assertRaises = self.assertRaises
assertRaises(ValueError, logging.handlers.TimedRotatingFileHandler,
self.fn, 'X', delay=True)
assertRaises(ValueError, logging.handlers.TimedRotatingFileHandler,
self.fn, 'W', delay=True)
assertRaises(ValueError, logging.handlers.TimedRotatingFileHandler,
self.fn, 'W7', delay=True)
def test_compute_rollover_daily_attime(self):
currentTime = 0
atTime = datetime.time(12, 0, 0)
rh = logging.handlers.TimedRotatingFileHandler(
self.fn, when='MIDNIGHT', interval=1, backupCount=0, utc=True,
atTime=atTime)
try:
actual = rh.computeRollover(currentTime)
self.assertEqual(actual, currentTime + 12 * 60 * 60)
actual = rh.computeRollover(currentTime + 13 * 60 * 60)
self.assertEqual(actual, currentTime + 36 * 60 * 60)
finally:
rh.close()
#@unittest.skipIf(True, 'Temporarily skipped while failures investigated.')
def test_compute_rollover_weekly_attime(self):
currentTime = int(time.time())
today = currentTime - currentTime % 86400
atTime = datetime.time(12, 0, 0)
wday = time.gmtime(today).tm_wday
for day in range(7):
rh = logging.handlers.TimedRotatingFileHandler(
self.fn, when='W%d' % day, interval=1, backupCount=0, utc=True,
atTime=atTime)
try:
if wday > day:
# The rollover day has already passed this week, so we
# go over into next week
expected = (7 - wday + day)
else:
expected = (day - wday)
# At this point expected is in days from now, convert to seconds
expected *= 24 * 60 * 60
# Add in the rollover time
expected += 12 * 60 * 60
# Add in adjustment for today
expected += today
actual = rh.computeRollover(today)
if actual != expected:
print('failed in timezone: %d' % time.timezone)
print('local vars: %s' % locals())
self.assertEqual(actual, expected)
if day == wday:
# goes into following week
expected += 7 * 24 * 60 * 60
actual = rh.computeRollover(today + 13 * 60 * 60)
if actual != expected:
print('failed in timezone: %d' % time.timezone)
print('local vars: %s' % locals())
self.assertEqual(actual, expected)
finally:
rh.close()
def secs(**kw):
return datetime.timedelta(**kw) // datetime.timedelta(seconds=1)
for when, exp in (('S', 1),
('M', 60),
('H', 60 * 60),
('D', 60 * 60 * 24),
('MIDNIGHT', 60 * 60 * 24),
# current time (epoch start) is a Thursday, W0 means Monday
('W0', secs(days=4, hours=24)),
):
def test_compute_rollover(self, when=when, exp=exp):
rh = logging.handlers.TimedRotatingFileHandler(
self.fn, when=when, interval=1, backupCount=0, utc=True)
currentTime = 0.0
actual = rh.computeRollover(currentTime)
if exp != actual:
# Failures occur on some systems for MIDNIGHT and W0.
# Print detailed calculation for MIDNIGHT so we can try to see
# what's going on
if when == 'MIDNIGHT':
try:
if rh.utc:
t = time.gmtime(currentTime)
else:
t = time.localtime(currentTime)
currentHour = t[3]
currentMinute = t[4]
currentSecond = t[5]
# r is the number of seconds left between now and midnight
r = logging.handlers._MIDNIGHT - ((currentHour * 60 +
currentMinute) * 60 +
currentSecond)
result = currentTime + r
print('t: %s (%s)' % (t, rh.utc), file=sys.stderr)
print('currentHour: %s' % currentHour, file=sys.stderr)
print('currentMinute: %s' % currentMinute, file=sys.stderr)
print('currentSecond: %s' % currentSecond, file=sys.stderr)
print('r: %s' % r, file=sys.stderr)
print('result: %s' % result, file=sys.stderr)
except Exception:
print('exception in diagnostic code: %s' % sys.exc_info()[1], file=sys.stderr)
self.assertEqual(exp, actual)
rh.close()
setattr(TimedRotatingFileHandlerTest, "test_compute_rollover_%s" % when, test_compute_rollover)
@unittest.skipUnless(win32evtlog, 'win32evtlog/win32evtlogutil/pywintypes required for this test.')
class NTEventLogHandlerTest(BaseTest):
def test_basic(self):
logtype = 'Application'
elh = win32evtlog.OpenEventLog(None, logtype)
num_recs = win32evtlog.GetNumberOfEventLogRecords(elh)
try:
h = logging.handlers.NTEventLogHandler('test_logging')
except pywintypes.error as e:
if e.winerror == 5: # access denied
raise unittest.SkipTest('Insufficient privileges to run test')
raise
r = logging.makeLogRecord({'msg': 'Test Log Message'})
h.handle(r)
h.close()
# Now see if the event is recorded
self.assertLess(num_recs, win32evtlog.GetNumberOfEventLogRecords(elh))
flags = win32evtlog.EVENTLOG_BACKWARDS_READ | \
win32evtlog.EVENTLOG_SEQUENTIAL_READ
found = False
GO_BACK = 100
events = win32evtlog.ReadEventLog(elh, flags, GO_BACK)
for e in events:
if e.SourceName != 'test_logging':
continue
msg = win32evtlogutil.SafeFormatMessage(e, logtype)
if msg != 'Test Log Message\r\n':
continue
found = True
break
msg = 'Record not found in event log, went back %d records' % GO_BACK
self.assertTrue(found, msg=msg)
class MiscTestCase(unittest.TestCase):
def test__all__(self):
blacklist = {'logThreads', 'logMultiprocessing',
'logProcesses', 'currentframe',
'PercentStyle', 'StrFormatStyle', 'StringTemplateStyle',
'Filterer', 'PlaceHolder', 'Manager', 'RootLogger',
'root', 'threading'}
support.check__all__(self, logging, blacklist=blacklist)
# Set the locale to the platform-dependent default. I have no idea
# why the test does this, but in any case we save the current locale
# first and restore it at the end.
@support.run_with_locale('LC_ALL', '')
def test_main():
tests = [
BuiltinLevelsTest, BasicFilterTest, CustomLevelsAndFiltersTest,
HandlerTest, MemoryHandlerTest, ConfigFileTest, SocketHandlerTest,
DatagramHandlerTest, MemoryTest, EncodingTest, WarningsTest,
ConfigDictTest, ManagerTest, FormatterTest, BufferingFormatterTest,
StreamHandlerTest, LogRecordFactoryTest, ChildLoggerTest,
QueueHandlerTest, ShutdownTest, ModuleLevelMiscTest, BasicConfigTest,
LoggerAdapterTest, LoggerTest, SMTPHandlerTest, FileHandlerTest,
RotatingFileHandlerTest, LastResortTest, LogRecordTest,
ExceptionTest, SysLogHandlerTest, HTTPHandlerTest,
NTEventLogHandlerTest, TimedRotatingFileHandlerTest,
UnixSocketHandlerTest, UnixDatagramHandlerTest, UnixSysLogHandlerTest,
MiscTestCase
]
if hasattr(logging.handlers, 'QueueListener'):
tests.append(QueueListenerTest)
support.run_unittest(*tests)
if __name__ == "__main__":
test_main()
|
utils.py
|
import json
import time
import unittest
from threading import Thread
from http.server import SimpleHTTPRequestHandler, HTTPServer
import bugsnag
class MissingRequestError(Exception):
pass
class IntegrationTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.server = FakeBugsnagServer()
def setUp(self):
self.server.received = []
def tearDown(self):
bugsnag.legacy.default_client.uninstall_sys_hook()
client = bugsnag.Client()
client.configuration.api_key = 'some key'
bugsnag.legacy.default_client = client
bugsnag.legacy.configuration = client.configuration
@classmethod
def tearDownClass(cls):
cls.server.shutdown()
def assertSentReportCount(self, count):
self.assertEqual(len(self.server.received), count)
def assertExceptionName(self, received_index, event_index, name):
json_body = self.server.received[received_index]['json_body']
event = json_body['events'][event_index]
exception = event['exceptions'][0]
self.assertEqual(exception['errorClass'], name)
@property
def sent_report_count(self) -> int:
return len(self.server.received)
class FakeBugsnagServer(object):
"""
A server which accepts a single request, recording the JSON payload and
other request information
"""
def __init__(self):
self.received = []
self.paused = False
class Handler(SimpleHTTPRequestHandler):
def do_POST(handler):
start = time.time()
while self.paused:
if time.time() > (start + 0.5):
raise Exception('Paused HTTP server timeout')
time.sleep(0.001)
length = int(handler.headers['Content-Length'])
raw_body = handler.rfile.read(length).decode('utf-8')
if handler.path != '/ignore':
self.received.append({'headers': handler.headers,
'json_body': json.loads(raw_body),
'path': handler.path,
'method': handler.command})
handler.send_response(200)
handler.end_headers()
return ()
def log_request(self, *args):
pass
self.server = HTTPServer(('localhost', 0), Handler)
self.server.timeout = 0.5
self.thread = Thread(target=self.server.serve_forever, args=(0.1,))
self.thread.daemon = True
self.thread.start()
@property
def address(self):
return '{0}:{1}'.format(*self.server.server_address)
@property
def url(self):
return 'http://%s' % self.address
def shutdown(self):
self.server.shutdown()
self.thread.join()
self.server.server_close()
def wait_for_request(self, timeout=2):
start = time.time()
while (len(self.received) == 0):
if (time.time() - start > timeout):
raise MissingRequestError("No request received before timeout")
time.sleep(0.25)
class ScaryException(Exception):
pass
|
hpds-02-B-01.py
|
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 7 12:59:23 2020
Ilustrasi Pengaruh GIL (Multi thread)
@author: Taufik Sutanto
"""
import time
from threading import Thread
COUNT = 50000000
def countdown(n):
while n>0:
n -= 1
t1 = Thread(target=countdown, args=(COUNT//2,))
t2 = Thread(target=countdown, args=(COUNT//2,))
start = time.time()
t1.start()
t2.start()
t1.join()
t2.join()
end = time.time()
print('Time taken in seconds -', end - start)
|
smartcam.py
|
#!/usr/bin/env python
import numpy
import multiprocessing
import os,time
import conf.default as conf
import lib.core as core
import lib.service as service
import Queue
import logging
def main():
logging.basicConfig(level=service.get_loglevel(os.environ['LOG_LEVEL']))
coreobj = core.Core()
mgr = multiprocessing.Manager()
namespace = mgr.Namespace()
configuration = {}
configuration['frame_gap_sec_max'] = 3.0
configuration['ENC_QUEUE_SIZE'] = 10
configuration['MAX_QUEUE_SIZE'] = 5
configuration['AFD_FRAME_BUF_SIZE'] = 8
configuration['MATCHER_BUF_SIZE'] = 20
configuration['CAM_WIDTH'] = 1280
configuration['CAM_HEIGHT'] = 720
configuration['frame_shrink_rate'] = int(os.environ['FRAME_SHRINK_RATE']) # 1,2,4,8, larger number -> faster detection and face need to be closer to the camera
configuration['frame_shrink_rate_preprocess'] = int(os.environ['FRAME_SHRINK_RATE_PREPROC']) # 1,2,4,8, larger number -> faster detection and face need to be closer to the camera
configuration['consecutive_frames_tocheck'] = 2 # face recognition need to be checked in no of consecutive frames before trigger the event
configuration['event_supress_ms'] = 60000 # same face will not trigger the event again in this time frame
configuration['show_frames'] = (os.environ['SHOW_FRAMES'] == 'YES') #True
configuration['show_detection_box'] = (os.environ['SHOW_FRAMES'] == 'YES') #True
configuration['CAM_DEV_ID'] = int(os.environ['CAM_DEV_ID']) #True
if os.environ['MODE'] == 'LOGGING':
logging.info(('SYSTEM is in mode: ',os.environ['MODE']))
configuration['t_min'] = 0.25
configuration['t_max'] = 0.52
configuration['t_default'] = 0.35
configuration['t_adjust_step'] = 0.03
elif os.environ['MODE'] == 'SURVEILANCE':
logging.info(('SYSTEM is in mode: ',os.environ['MODE']))
configuration['t_min'] = 0.3
configuration['t_max'] = 0.57
configuration['t_default'] = 0.5
configuration['t_adjust_step'] = 0.03
else:
logging.info(('SYSTEM is in mode: ',os.environ['MODE']))
configuration['t_min'] = 0.3 # face will be Unknown when tolerance decreases to this min value, i.e. same face matches multiple name can be an error
configuration['t_max'] = 0.54 # face will be Unknown when tolerance increases to this max value
configuration['t_default'] = 0.4 # tolerance to start with
configuration['t_adjust_step'] = 0.04 # tolerance adjustment step
namespace.conf = configuration
q_enc = multiprocessing.Queue(configuration['ENC_QUEUE_SIZE'])
q_encoded = multiprocessing.Queue(configuration['MAX_QUEUE_SIZE'])
q_matched = multiprocessing.Queue(configuration['MAX_QUEUE_SIZE'])
namespace.loadfaces = True
namespace.faces_loaded = {'known_face_names':[],'known_face_encodings':[]}
namespace.frames_buffer = []
namespace.face_matched = {'id':0,'frame':numpy.ndarray((configuration['CAM_WIDTH'], configuration['CAM_HEIGHT'], 3)),'face_locations':[],'names':[]}
# namespace.faces_detected = {'id':0,'frame':numpy.ndarray((configuration['CAM_WIDTH'], configuration['CAM_HEIGHT'], 3)),'face_locations':[],'face_encodings':[]}
# namespace.match_inprocess = []
# namespace.event_inprocess = []
namespace.frame_have_face = 0.0
namespace.contourArea = 9999999
namespace.laplacianSwitch = 0.0
fe = multiprocessing.Process(target=coreobj.face_encoder, args=(namespace,q_enc,q_encoded))
fm = multiprocessing.Process(target=coreobj.face_matcher, args=(namespace,q_encoded,q_matched))
et = multiprocessing.Process(target=coreobj.event_trigger, args=(namespace,q_matched))
fl = multiprocessing.Process(target=coreobj.face_loader, args=(namespace,))
# slas is a list of SLA for afd process to achieve
slas = [0.5,5.5,0.6,6.5,0.7,7.5]
worst_sla = max(slas)
afd_queue_length_seconds = min((worst_sla * 2), 5)
frame_queues = []
afds = [None] * len(slas)
for qi,sla in enumerate(slas):
q_len = min(max(2,int(afd_queue_length_seconds/sla)),10)
logging.debug(('preparing adf process: ', qi, sla, q_len))
queue_fr = multiprocessing.Queue(q_len)
frame_queues.append(queue_fr)
afds[qi] = multiprocessing.Process(target=coreobj.auto_face_detector, args=(namespace,sla, 8.8, frame_queues[qi], q_enc))
fqi = multiprocessing.Process(target=coreobj.frame_queue_input, args=(namespace,frame_queues))
try:
for afd in afds:
afd.start()
fqi.start()
fl.start()
fe.start()
fm.start()
et.start()
for afd in afds:
afd.join()
fqi.join()
fl.join()
fe.join()
fm.join()
et.join()
except KeyboardInterrupt:
logging.info("Caught KeyboardInterrupt, terminating processes")
for afd in afds:
afd.terminate()
fqi.terminate()
fl.terminate()
fe.terminate()
fm.terminate()
et.terminate()
for afd in afds:
afd.join()
fqi.join()
fl.join()
fe.join()
fm.join()
et.join()
logging.info("All processes stopped.....")
if __name__ == "__main__":
main()
|
consumer_queue.py
|
from multiprocessing import Process, Queue # 多进程组件,队列
import time, random
# 生产者方法
def producer(name, food, q):
for i in range(4):
time.sleep(random.randint(1, 3)) # 模拟获取数据时间
f = '%s生产的%s%s' % (name, food, i)
print(f)
q.put(f) # 添加进队列
# 消费者方法
def consumer(q, name):
while True:
food = q.get() # 如果获取不到,会一直阻塞进程不会结束子进程
# 当队列中的数据是None的时候结束while循环
if food is None:
print('%s获取到一个空' % name)
break
f = '\033[31m%s消费了%s\033[0m' % (name, food)
print(f)
time.sleep(random.randint(1, 3)) # 模拟消耗数据时间
if __name__ == '__main__':
q = Queue() # 创建队列
# 模拟生产者 生产数据
p = Process(target=producer, args=('p', '包子', q)) # 创建进程
p.start() # 启动进程
p1 = Process(target=producer, args=('p1', '烧饼', q))
p1.start()
# 模拟消费者消费数据
c = Process(target=consumer, args=(q, 'c'))
c.start()
c1 = Process(target=consumer, args=(q, 'c1'))
c1.start()
p.join() # 阻塞主进程 直到p和p1 子进程结束后才执行q.put() 方法
p1.join() # 阻塞主进程 直到p和p1 子进程结束后才执行q.put() 方法
# 为了确保生产者生产完所有数据后,
# 最后一个是None,方便结束子进程中的while循环,
# 否则会一直等待队列中加入新数据。
q.put(None)
q.put(None)
"""
使用Queue组件实现的缺点就是,实现了多少个消费者consumer进程,就需要在最后往队列中添加多少个None标识,方便生产完毕结束消费者
consumer进程。否则,p.get() 不到任务会阻塞子进程,因为while循环,直到队列q中有新的任务加进来,才会再次执行。而我们的生产者只
能生产这么多东西,所以相当于程序卡死
作者:丘山Ivan
链接:https://www.jianshu.com/p/2d3e6a21f6fe
来源:简书
著作权归作者所有。商业转载请联系作者获得授权,非商业转载请注明出处。
"""
|
device_scheduler.py
|
import time
from random import randint
from copy import deepcopy
from threading import Thread, Condition, Lock
from xml.etree.ElementTree import TreeBuilder
from flask import Flask
from flask_mqtt import Mqtt
from app.models import Device, User
from app.models.db import db
from app.mqtt.mqtt_message import MqttMesssage, SetEnergyLevelSignal, ShutdownSignal, StartupSignal
# TODO remove after testing
SECONDS_PER_HOUR = 10
class ScheduleState:
"""Class for representing scheduler internal state"""
PUBLISH = "publish"
EXEC_SYNC = "execs"
EXEC_BACKGROUND = "execb"
def __init__(self):
self.queue = []
"""FIFO queue that accumulates requests from every module of this app\n
it is usually populated by ScheduleHandlers or, for example, anomaly detection routines"""
self.info = {}
"""Dictionary to represent current state"""
self.info_lock = Lock()
self.wakeup = Condition()
"""Variable that, when notified, wakes up the scheduler"""
def notify(self):
"""Notify wakeup condition"""
self.wakeup.acquire(True)
self.wakeup.notify()
self.wakeup.release()
def assign_publish(self, channel, content):
if type(content) == str:
content = content.encode()
self.queue.append((ScheduleState.PUBLISH, channel, content))
def assign_exec_sync(self, to_call, kwargs):
if type(to_call) != str:
to_call = to_call.__name__
self.queue.append((ScheduleState.EXEC_SYNC, to_call, kwargs))
def assign_exec_background(self, to_call, kwargs):
if type(to_call) != str:
to_call = to_call.__name__
self.queue.append((ScheduleState.EXEC_BACKGROUND, to_call, kwargs))
class ScheduleHandlers:
"""Collection of all (default) schedule-related handlers\n
Rules for implementing a handler:\n
1. every handler must receive as first argument the current state\n
2. for multithreading safety, lock before using info from (the) state object\n
"""
def global_shutdown(current_state: ScheduleState):
"""Global shutdown broadcast"""
current_state.info_lock.acquire()
global_channel = current_state.info["global_channel"]
current_state.info_lock.release()
current_state.assign_publish(global_channel, ShutdownSignal().pack())
current_state.notify()
def global_startup(current_state: ScheduleState):
"""Global startup broadcast"""
current_state.info_lock.acquire()
global_channel = current_state.info["global_channel"]
current_state.info_lock.release()
current_state.assign_publish(global_channel, StartupSignal().pack())
current_state.notify()
def schedule_tracker(current_state: ScheduleState, device_uuid):
"""Enforces schedule for a specific device"""
current_state.info_lock.acquire()
channel = current_state.info[device_uuid]["channel"]
intervals = deepcopy(current_state.info[device_uuid]["schedule"])
current_state.info_lock.release()
intervals.sort(key = lambda i: f"{i[0]}{i[1]}")
idx = 0
hour = int(time.strftime("%H"), 10)
delta = 0
if hour > intervals[-1][0]:
delta += 24 - hour
hour = 0
while intervals[idx][0] < hour:
idx += 1
delta += intervals[idx][0] - hour
time.sleep(delta * SECONDS_PER_HOUR)
while True:
current_state.assign_publish(channel, StartupSignal().pack())
time.sleep((intervals[idx][1] - intervals[idx][0]) * SECONDS_PER_HOUR)
current_state.notify()
current_state.assign_publish(channel, ShutdownSignal().pack())
lo = intervals[idx][1]
idx = (idx + 1) % len(intervals)
hi = intervals[idx][0]
delta = hi - lo
if hi < lo:
delta += 24
time.sleep(delta * SECONDS_PER_HOUR)
def power_schedule_tracker(current_state: ScheduleState, device_uuid):
"""Enforces ACPI schedule for a specific device"""
current_state.info_lock.acquire()
channel = current_state.info[device_uuid]["channel"]
intervals = deepcopy(current_state.info[device_uuid]["power_schedule"])
current_state.info_lock.release()
intervals.sort(key = lambda i: f"{i[0]}{i[1]}")
idx = 0
hour = int(time.strftime("%H"), 10)
delta = 0
if hour > intervals[-1][0]:
delta += 24 - hour
hour = 0
while intervals[idx][0] < hour:
idx += 1
delta += intervals[idx][0] - hour
time.sleep(delta * SECONDS_PER_HOUR)
while True:
current_state.assign_publish(channel, SetEnergyLevelSignal(intervals[idx][3]).pack())
time.sleep((intervals[idx][1] - intervals[idx][0]) * SECONDS_PER_HOUR)
current_state.notify()
current_state.assign_publish(channel, SetEnergyLevelSignal(intervals[idx][2]).pack())
lo = intervals[idx][1]
idx = (idx + 1) % len(intervals)
hi = intervals[idx][0]
delta = hi - lo
if hi < lo:
delta += 24
time.sleep(delta * SECONDS_PER_HOUR)
def alarm(current_state: ScheduleState,
seconds, repeats, device_uuid,
condition="always_true",
content_generator="default_content"):
"""General-purpose alarm"""
current_state.info_lock.acquire()
channel = current_state.info[device_uuid]["channel"]
current_state.info_lock.release()
if repeats == -1:
repeats = 1
rep = 0
while rep < repeats:
time.sleep(seconds)
if ScheduleHandlers.call[condition](current_state) is True:
current_state.assign_publish(channel, ScheduleHandlers.call[content_generator](current_state))
current_state.notify()
if repeats == -1:
rep += 1
call = {
"alarm": alarm,
"schedule_tracker": schedule_tracker,
"power_schedule_tracker": power_schedule_tracker,
"global_shutdown": global_shutdown,
"global_startup": global_startup,
"always_true": lambda _: True,
"default_content": lambda _: MqttMesssage(payload = f"ping {randint(0, 10000)}", sender = "sched").pack(),
"ping_alive": lambda _: MqttMesssage(payload = f"ping {randint(0, 10000)}", sender = "sched").pack()
}
"""Function dispatcher"""
class DeviceScheduler:
"""The mqtt client associated with the flask webserver\n
It manages the current state of the devices and their scheduling"""
def parse_device_settings(self, device, state: ScheduleState):
"""Update a state object based on given device settings"""
settings = device.settings
if "handlers" in settings.keys():
for fct, kwargs in settings["handlers"].items():
state.assign_exec_background(fct, kwargs)
state.info[device.uuid] = {}
if "channel" in settings.keys():
state.info[device.uuid]["channel"] = settings["channel"]
if "always_on" in settings.keys():
state.info[device.uuid]["always_on"] = settings["always_on"]
if "schedule" in settings.keys():
state.info[device.uuid]["schedule"] = settings["schedule"]
if "power_schedule" in settings.keys():
state.info[device.uuid]["power_schedule"] = settings["power_schedule"]
def scheduler_loop(self, state: ScheduleState):
"""Main scheduler infinite loop"""
while True:
# NOT busy waiting
while len(state.queue) == 0:
state.wakeup.acquire(True) # only because the wait() call needs the current thread to have the lock
state.wakeup.wait()
while len(state.queue) > 0:
r = state.queue.pop(0)
if r[0] == ScheduleState.PUBLISH:
self.mqtt.publish(r[1], r[2])
elif r[0] == ScheduleState.EXEC_SYNC:
ScheduleHandlers.call[r[1]](current_state=state, **r[2])
elif r[0] == ScheduleState.EXEC_BACKGROUND:
thr = Thread(target=ScheduleHandlers.call[r[1]], daemon=True, args=(state,), kwargs=r[2])
thr.start()
def start_scheduler(self):
"""Parses each device settings for each user, calls required handlers\n
and then starts the (infinite) publisher loop, for each different user"""
with self.app.app_context():
for user in db.session.query(User).all():
initial_state = ScheduleState()
self.per_user_scheds[user] = Thread(target=self.scheduler_loop, daemon=True, args=(initial_state,))
# NOTE: since the state object is currently referenced only here
# the locks are not (yet) used
initial_state.assign_exec_sync("global_startup", {})
for device in db.session.query(Device).filter_by(user_id=user.id):
self.parse_device_settings(device, initial_state)
initial_state.info["global_channel"] = f"{self.config['global_channel_prefix']}_{user.id}"
self.per_user_scheds[user].start()
def __init__(self, app: Flask, config):
self.config = config
try:
self.app = app
self.mqtt = Mqtt(app)
self.per_user_scheds = {}
self.global_sched_thr = Thread(target=self.start_scheduler, daemon=True)
self.global_sched_thr.start()
except Exception as err:
raise Exception(f"error while executing scheduler-related code: {err}")
|
housekeeper.py
|
#SPDX-License-Identifier: MIT
"""
Keeps data up to date
"""
import coloredlogs
from copy import deepcopy
import logging, os, time, requests
import logging.config
from multiprocessing import Process, get_start_method
from sqlalchemy.ext.automap import automap_base
import sqlalchemy as s
import pandas as pd
from sqlalchemy import MetaData
from augur.logging import AugurLogging
from urllib.parse import urlparse
import warnings
warnings.filterwarnings('ignore')
logger = logging.getLogger(__name__)
class Housekeeper:
def __init__(self, broker, augur_app):
logger.info("Booting housekeeper")
self._processes = []
self.augur_logging = augur_app.logging
self.jobs = deepcopy(augur_app.config.get_value("Housekeeper", "jobs"))
self.update_redirects = deepcopy(augur_app.config.get_value("Housekeeper", "update_redirects"))
self.broker_host = augur_app.config.get_value("Server", "host")
self.broker_port = augur_app.config.get_value("Server", "port")
self.broker = broker
self.db = augur_app.database
self.helper_db = augur_app.operations_database
helper_metadata = MetaData()
helper_metadata.reflect(self.helper_db, only=['worker_job'])
HelperBase = automap_base(metadata=helper_metadata)
HelperBase.prepare()
self.job_table = HelperBase.classes.worker_job.__table__
repoUrlSQL = s.sql.text("""
SELECT repo_git FROM repo
""")
rs = pd.read_sql(repoUrlSQL, self.db, params={})
all_repos = rs['repo_git'].values.tolist()
# If enabled, updates all redirects of repositories
# and organizations urls for configured repo_group_id
self.update_url_redirects()
# List of tasks that need periodic updates
self.schedule_updates()
def schedule_updates(self):
"""
Starts update processes
"""
self.prep_jobs()
self.augur_logging.initialize_housekeeper_logging_listener()
logger.info("Scheduling update processes")
for job in self.jobs:
process = Process(target=self.updater_process, name=job["model"], args=(self.broker_host, self.broker_port, self.broker, job, (self.augur_logging.housekeeper_job_config, self.augur_logging.get_config())))
self._processes.append(process)
process.start()
@staticmethod
def updater_process(broker_host, broker_port, broker, job, logging_config):
"""
Controls a given plugin's update process
"""
logging.config.dictConfig(logging_config[0])
logger = logging.getLogger(f"augur.jobs.{job['model']}")
coloredlogs.install(level=logging_config[1]["log_level"], logger=logger, fmt=logging_config[1]["format_string"])
if logging_config[1]["quiet"]:
logger.disabled
if 'repo_group_id' in job:
repo_group_id = job['repo_group_id']
logger.info('Housekeeper spawned {} model updater process for repo group id {}'.format(job['model'], repo_group_id))
else:
repo_group_id = None
logger.info('Housekeeper spawned {} model updater process for repo ids {}'.format(job['model'], job['repo_ids']))
try:
compatible_worker_found = False
# Waiting for compatible worker
while True:
if not compatible_worker_found:
for worker in list(broker._getvalue().keys()):
if job['model'] in broker[worker]['models'] and job['given'] in broker[worker]['given']:
compatible_worker_found = True
time.sleep(3)
continue
logger.info("Housekeeper recognized that the broker has a worker that " +
"can handle the {} model... beginning to distribute maintained tasks".format(job['model']))
while True:
logger.info('Housekeeper updating {} model with given {}...'.format(
job['model'], job['given'][0]))
if job['given'][0] == 'git_url' or job['given'][0] == 'github_url':
for repo in job['repos']:
if job['given'][0] == 'github_url' and 'github.com' not in repo['repo_git']:
continue
given_key = 'git_url' if job['given'][0] == 'git_url' else 'github_url'
task = {
"job_type": job['job_type'] if 'job_type' in job else 'MAINTAIN',
"models": [job['model']],
"display_name": "{} model for url: {}".format(job['model'], repo['repo_git']),
"given": {}
}
task['given'][given_key] = repo['repo_git']
if "focused_task" in repo:
task["focused_task"] = repo['focused_task']
try:
requests.post('http://{}:{}/api/unstable/task'.format(
broker_host,broker_port), json=task, timeout=10)
except Exception as e:
logger.error("Error encountered: {}".format(e))
logger.debug(task)
time.sleep(15)
elif job['given'][0] == 'repo_group':
task = {
"job_type": job['job_type'] if 'job_type' in job else 'MAINTAIN',
"models": [job['model']],
"display_name": "{} model for repo group id: {}".format(job['model'], repo_group_id),
"given": {
"repo_group": job['repos']
}
}
try:
requests.post('http://{}:{}/api/unstable/task'.format(
broker_host,broker_port), json=task, timeout=10)
except Exception as e:
logger.error("Error encountered: {}".format(e))
logger.info("Housekeeper finished sending {} tasks to the broker for it to distribute to your worker(s)".format(len(job['repos'])))
time.sleep(job['delay'])
except KeyboardInterrupt as e:
pass
def join_updates(self):
"""
Join to the update processes
"""
for process in self._processes:
logger.debug(f"Joining {process.name} update process")
process.join()
def shutdown_updates(self):
"""
Ends all running update processes
"""
for process in self._processes:
# logger.debug(f"Terminating {process.name} update process")
process.terminate()
def prep_jobs(self):
logger.info("Preparing housekeeper jobs")
for job in self.jobs:
if 'repo_group_id' in job or 'repo_ids' in job:
# If RG id is 0 then it just means to query all repos
where_and = 'AND' if job['model'] == 'issues' and 'repo_group_id' in job else 'WHERE'
where_condition = '{} repo_group_id = {}'.format(where_and, job['repo_group_id']
) if 'repo_group_id' in job and job['repo_group_id'] != 0 else '{} repo.repo_id IN ({})'.format(
where_and, ",".join(str(id) for id in job['repo_ids'])) if 'repo_ids' in job else ''
repo_url_sql = s.sql.text("""
SELECT repo.repo_id, repo.repo_git, pull_request_count, collected_pr_count,
(repo_info.pull_request_count - pr_count.collected_pr_count) AS pull_requests_missing
FROM augur_data.repo LEFT OUTER JOIN (
SELECT count(*) AS collected_pr_count, repo_id
FROM pull_requests GROUP BY repo_id ) pr_count
ON pr_count.repo_id = repo.repo_id LEFT OUTER JOIN (
SELECT repo_id, MAX ( data_collection_date ) AS last_collected
FROM augur_data.repo_info
GROUP BY repo_id) recent_info
ON recent_info.repo_id = pr_count.repo_id LEFT OUTER JOIN repo_info
ON recent_info.repo_id = repo_info.repo_id
AND repo_info.data_collection_date = recent_info.last_collected
{}
GROUP BY repo.repo_id, repo_info.pull_request_count, pr_count.collected_pr_count
ORDER BY pull_requests_missing DESC NULLS LAST
""".format(where_condition)) if job['model'] == 'pull_requests' else s.sql.text("""
SELECT
*
FROM
(
( SELECT repo_git, repo.repo_id, issues_enabled, COUNT ( * ) AS meta_count
FROM repo left outer join repo_info on repo.repo_id = repo_info.repo_id
--WHERE issues_enabled = 'true'
GROUP BY repo.repo_id, issues_enabled
ORDER BY repo.repo_id ) zz
LEFT OUTER JOIN (
SELECT repo.repo_id,
repo.repo_name,
b.issues_count,
d.repo_id AS issue_repo_id,
e.last_collected,
COUNT ( * ) AS issues_collected_count,
(
b.issues_count - COUNT ( * )) AS issues_missing,
ABS (
CAST (( COUNT ( * )) AS DOUBLE PRECISION ) / CAST ( b.issues_count + 1 AS DOUBLE PRECISION )) AS ratio_abs,
(
CAST (( COUNT ( * )) AS DOUBLE PRECISION ) / CAST ( b.issues_count + 1 AS DOUBLE PRECISION )) AS ratio_issues
FROM
augur_data.repo left outer join
augur_data.pull_requests d on d.repo_id = repo.repo_id left outer join
augur_data.repo_info b on d.repo_id = b.repo_id left outer join
( SELECT repo_id, MAX ( data_collection_date ) AS last_collected FROM augur_data.repo_info GROUP BY repo_id ORDER BY repo_id ) e
on e.repo_id = d.repo_id and b.data_collection_date = e.last_collected
WHERE d.pull_request_id IS NULL
{}
GROUP BY
repo.repo_id,
d.repo_id,
b.issues_count,
e.last_collected
ORDER BY ratio_abs
) yy ON zz.repo_id = yy.repo_id
) D
ORDER BY ratio_abs NULLS FIRST
""".format(where_condition)) if job['model'] == 'issues' and 'repo_group_id' in job else s.sql.text("""
SELECT repo_git, repo_id FROM repo {} ORDER BY repo_id ASC
""".format(where_condition)) if 'order' not in job else s.sql.text("""
SELECT repo_git, repo.repo_id, count(*) as commit_count
FROM augur_data.repo left outer join augur_data.commits
on repo.repo_id = commits.repo_id
{}
group by repo.repo_id ORDER BY commit_count {}
""".format(where_condition, job['order']))
reorganized_repos = pd.read_sql(repo_url_sql, self.db, params={})
if len(reorganized_repos) == 0:
logger.warning("Trying to send tasks for repo group, but the repo group does not contain any repos: {}".format(repo_url_sql))
job['repos'] = []
continue
if 'starting_repo_id' in job:
last_id = job['starting_repo_id']
else:
repoIdSQL = s.sql.text("""
SELECT since_id_str FROM worker_job
WHERE job_model = '{}'
""".format(job['model']))
job_df = pd.read_sql(repoIdSQL, self.helper_db, params={})
# If there is no job tuple found, insert one
if len(job_df) == 0:
job_tuple = {
'job_model': job['model'],
'oauth_id': 0
}
result = self.helper_db.execute(self.job_table.insert().values(job_tuple))
logger.debug("No job tuple for {} model was found, so one was inserted into the job table: {}".format(job['model'], job_tuple))
# If a last id is not recorded, start from beginning of repos
# (first id is not necessarily 0)
try:
last_id = int(job_df.iloc[0]['since_id_str'])
except:
last_id = 0
jobHistorySQL = s.sql.text("""
SELECT max(history_id) AS history_id, status FROM worker_history
GROUP BY status
LIMIT 1
""")
history_df = pd.read_sql(jobHistorySQL, self.helper_db, params={})
finishing_task = False
if len(history_df.index) != 0:
if history_df.iloc[0]['status'] == 'Stopped':
self.history_id = int(history_df.iloc[0]['history_id'])
finishing_task = True
# Rearrange repos so the one after the last one that
# was completed will be ran first (if prioritized ordering is not available/enabled)
if job['model'] not in ['issues', 'pull_requests']:
before_repos = reorganized_repos.loc[reorganized_repos['repo_id'].astype(int) < last_id]
after_repos = reorganized_repos.loc[reorganized_repos['repo_id'].astype(int) >= last_id]
reorganized_repos = after_repos.append(before_repos)
if 'all_focused' in job:
reorganized_repos['focused_task'] = job['all_focused']
reorganized_repos = reorganized_repos.to_dict('records')
if finishing_task:
reorganized_repos[0]['focused_task'] = 1
job['repos'] = reorganized_repos
elif 'repo_id' in job:
job['repo_group_id'] = None
repoUrlSQL = s.sql.text("""
SELECT repo_git, repo_id FROM repo WHERE repo_id = {}
""".format(job['repo_id']))
rs = pd.read_sql(repoUrlSQL, self.db, params={})
if 'all_focused' in job:
rs['focused_task'] = job['all_focused']
rs = rs.to_dict('records')
job['repos'] = rs
# time.sleep(120)
def update_url_redirects(self):
if 'switch' in self.update_redirects and self.update_redirects['switch'] == 1 and 'repo_group_id' in self.update_redirects:
repos_urls = self.get_repos_urls(self.update_redirects['repo_group_id'])
for url in repos_urls:
r = requests.get(url)
check_for_update = url != r.url
if check_for_update:
self.update_repo_url(url, r.url, self.update_redirects['repo_group_id'])
def get_repos_urls(self, repo_group_id):
repos_sql = s.sql.text("""
SELECT repo_git FROM repo
WHERE repo_group_id = ':repo_group_id'
""")
repos = pd.read_sql(repos_sql, self.db, params={'repo_group_id': repo_group_id})
if len(repos) == 0:
logger.info("Did not find any repositories stored in augur_database for repo_group_id {}\n".format(repo_group_id))
return repos['repo_git']
def update_repo_url(self, old_url, new_url, repo_group_id):
old_repo_path = Housekeeper.parseRepoName(old_url)
old_repo_group_name = old_repo_path[0]
new_repo_path = Housekeeper.parseRepoName(new_url)
new_repo_group_name = new_repo_path[0]
if old_repo_group_name != new_repo_group_name:
# verifying the old repo group name is available in the database
old_rg_name_sql = s.sql.text("""
SELECT rg_name FROM repo_groups
WHERE repo_group_id = ':repo_group_id'
""")
old_rg_name_from_DB = pd.read_sql(old_rg_name_sql, self.db, params={'repo_group_id': repo_group_id})
if len(old_rg_name_from_DB['rg_name']) > 0 and old_repo_group_name != old_rg_name_from_DB['rg_name'][0]:
logger.info("Incoming old repo group name doesn't match the DB record for repo_group_id {} . Incoming name: {} DB record: {} \n".format(repo_group_id, old_repo_group_name, old_rg_name_from_DB['rg_name'][0]))
# checking if the new repo group name already exists and
# inserting it in repo_groups if it doesn't
rg_name_check_sql = s.sql.text("""
SELECT rg_name, repo_group_id FROM repo_groups
WHERE rg_name = :new_repo_group_name
""")
rg_name_check = pd.read_sql(rg_name_check_sql, self.db, params={'new_repo_group_name': new_repo_group_name})
new_rg_name_already_exists = len(rg_name_check['rg_name']) > 0
if new_rg_name_already_exists:
new_repo_group_id = rg_name_check['repo_group_id'][0]
else:
insert_sql = s.sql.text("""
INSERT INTO repo_groups("rg_name", "rg_description", "rg_website", "rg_recache", "rg_last_modified", "rg_type", "tool_source", "tool_version", "data_source", "data_collection_date")
VALUES (:new_repo_group_name, '', '', 0, CURRENT_TIMESTAMP, 'Unknown', 'Loaded by user', '1.0', 'Git', CURRENT_TIMESTAMP) RETURNING repo_group_id;
""")
new_repo_group_id = self.db.execute(insert_sql, new_repo_group_name=new_repo_group_name).fetchone()[0]
logger.info("Inserted repo group {} with id {}\n".format(new_repo_group_name, new_repo_group_id))
update_sql = s.sql.text("""
UPDATE repo SET repo_git = :new_url, repo_path = NULL, repo_name = NULL, repo_status = 'New', repo_group_id = :new_repo_group_id
WHERE repo_git = :old_url
""")
self.db.execute(update_sql, new_url=new_url, new_repo_group_id=new_repo_group_id, old_url=old_url)
logger.info("Updated repo url from {} to {}\n".format(new_url, old_url))
else:
update_sql = s.sql.text("""
UPDATE repo SET repo_git = :new_url, repo_path = NULL, repo_name = NULL, repo_status = 'New'
WHERE repo_git = :old_url
""")
self.db.execute(update_sql, new_url=new_url, old_url=old_url)
logger.info("Updated repo url from {} to {}\n".format(new_url, old_url))
def parseRepoName(repo_url):
path = urlparse(repo_url).path
parts = path.split('/')
return parts[1:]
|
variable_scope.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A class to store named variables and a scope operator to manage sharing."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections as collections_lib
import copy
import enum # pylint: disable=g-bad-import-order
import functools
import sys
import threading
import traceback
import six
from six import iteritems
from six.moves import xrange, zip # pylint: disable=redefined-builtin
from tensorflow.python import tf2
from tensorflow.python.client import session
from tensorflow.python.eager import context
from tensorflow.python.eager import monitoring
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import deprecation
from tensorflow.python.util import function_utils
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util import tf_inspect
from tensorflow.python.util.tf_export import tf_export
__all__ = [
"AUTO_REUSE", "VariableScope", "get_variable_scope", "get_variable",
"get_local_variable", "variable_scope", "variable_op_scope",
"no_regularizer", "VariableSynchronization", "VariableAggregation"
]
_api_usage_gauge = monitoring.BoolGauge(
"/tensorflow/api/resource_variables",
"Whether variable_scope.enable_resource_variables() is called.")
class _PartitionInfo(object):
"""Holds partition info used by initializer functions."""
def __init__(self, full_shape, var_offset):
"""Constructor.
Args:
full_shape: Tuple or list of `int` indicating the full combined shape of
the partitioned variables.
var_offset: Tuple or list of `int` specifying offset of this partition
with respect to the full variable for each dimension.
Raises:
TypeError: If `full_shape` or `var_offset` is not a sequence.
ValueError: If `full_shape` or `var_offset` differ in length. If
`var_offset` exceeds `full_shape` in any dimension.
"""
if not isinstance(full_shape, collections_lib.Sequence) or isinstance(
full_shape, six.string_types):
raise TypeError(
"`full_shape` must be a sequence (like tuple or list) instead of " +
type(full_shape).__name__)
if not isinstance(var_offset, collections_lib.Sequence) or isinstance(
var_offset, six.string_types):
raise TypeError(
"`var_offset` must be a sequence (like tuple or list) instead of " +
type(var_offset).__name__)
if len(var_offset) != len(full_shape):
raise ValueError(
"Expected equal length, but `var_offset` is of length {} while "
"full_shape is of length {}.".format(
len(var_offset), len(full_shape)))
for offset, shape in zip(var_offset, full_shape):
if offset < 0 or offset >= shape:
raise ValueError(
"Expected 0 <= offset < shape but found offset={}, shape={} for "
"var_offset={}, full_shape={}".format(offset, shape, var_offset,
full_shape))
self._full_shape = full_shape
self._var_offset = var_offset
@property
def full_shape(self):
return self._full_shape
@property
def var_offset(self):
return self._var_offset
def single_offset(self, shape):
"""Returns the offset when the variable is partitioned in at most one dim.
Args:
shape: Tuple or list of `int` indicating the shape of one specific
variable partition.
Returns:
`int` representing the offset in the dimension along which the variable is
partitioned. Returns 0 if the variable is not being partitioned.
Raises:
ValueError: Depending on self.single_slice_dim().
"""
single_slice_dim = self.single_slice_dim(shape)
# If this variable is not being partitioned at all, single_slice_dim() could
# return None.
if single_slice_dim is None:
return 0
return self.var_offset[single_slice_dim]
def single_slice_dim(self, shape):
"""Returns the slice dim when the variable is partitioned only in one dim.
Args:
shape: Tuple or list of `int` indicating the shape of one specific
variable partition.
Returns:
`int` representing the dimension that the variable is partitioned in, or
`None` if the variable doesn't seem to be partitioned at all.
Raises:
TypeError: If `shape` is not a sequence.
ValueError: If `shape` is not the same length as `self.full_shape`. If
the variable is partitioned in more than one dimension.
"""
if not isinstance(shape, collections_lib.Sequence) or isinstance(
shape, six.string_types):
raise TypeError(
"`shape` must be a sequence (like tuple or list) instead of " +
type(shape).__name__)
if len(shape) != len(self.full_shape):
raise ValueError(
"Expected equal length, but received shape={} of length {} while "
"self.full_shape={} is of length {}.".format(shape, len(shape),
self.full_shape,
len(self.full_shape)))
for i in xrange(len(shape)):
if self.var_offset[i] + shape[i] > self.full_shape[i]:
raise ValueError(
"With self.var_offset={}, a partition of shape={} would exceed "
"self.full_shape={} in dimension {}.".format(
self.var_offset, shape, self.full_shape, i))
slice_dim = None
for i in xrange(len(shape)):
if shape[i] == self.full_shape[i]:
continue
if slice_dim is not None:
raise ValueError(
"Cannot use single_slice_dim() with shape={} and "
"self.full_shape={} since slice dim could be either dimension {} "
"or {}.".format(shape, self.full_shape, i, slice_dim))
slice_dim = i
return slice_dim
class _ReuseMode(enum.Enum):
"""Mode for variable access within a variable scope."""
# Indicates that variables are to be fetched if they already exist or
# otherwise created.
AUTO_REUSE = 1
# TODO(alive): For TensorFlow 2.0, Deprecate True/False/None API in favor of
# enum values.
# REUSE_FALSE = 2
# REUSE_TRUE = 3
# TODO(apassos) remove these forwarding symbols.
VariableSynchronization = variables.VariableSynchronization # pylint: disable=invalid-name
VariableAggregation = variables.VariableAggregation # pylint: disable=invalid-name
AUTO_REUSE = _ReuseMode.AUTO_REUSE
tf_export(v1=["AUTO_REUSE"]).export_constant(__name__, "AUTO_REUSE")
AUTO_REUSE.__doc__ = """
When passed in as the value for the `reuse` flag, AUTO_REUSE indicates that
get_variable() should create the requested variable if it doesn't exist or, if
it does exist, simply return it.
"""
_DEFAULT_USE_RESOURCE = tf2.enabled()
@tf_export(v1=["enable_resource_variables"])
def enable_resource_variables():
"""Creates resource variables by default.
Resource variables are improved versions of TensorFlow variables with a
well-defined memory model. Accessing a resource variable reads its value, and
all ops which access a specific read value of the variable are guaranteed to
see the same value for that tensor. Writes which happen after a read (by
having a control or data dependency on the read) are guaranteed not to affect
the value of the read tensor, and similarly writes which happen before a read
are guaranteed to affect the value. No guarantees are made about unordered
read/write pairs.
Calling tf.enable_resource_variables() lets you opt-in to this TensorFlow 2.0
feature.
"""
global _DEFAULT_USE_RESOURCE
_DEFAULT_USE_RESOURCE = True
_api_usage_gauge.get_cell().set(True)
@tf_export(v1=["resource_variables_enabled"])
def resource_variables_enabled():
"""Returns `True` if resource variables are enabled.
Resource variables are improved versions of TensorFlow variables with a
well-defined memory model. Accessing a resource variable reads its value, and
all ops which access a specific read value of the variable are guaranteed to
see the same value for that tensor. Writes which happen after a read (by
having a control or data dependency on the read) are guaranteed not to affect
the value of the read tensor, and similarly writes which happen before a read
are guaranteed to affect the value. No guarantees are made about unordered
read/write pairs.
Calling tf.enable_resource_variables() lets you opt-in to this TensorFlow 2.0
feature.
"""
global _DEFAULT_USE_RESOURCE
return _DEFAULT_USE_RESOURCE
@deprecation.deprecated(
None, "non-resource variables are not supported in the long term")
@tf_export(v1=["disable_resource_variables"])
def disable_resource_variables():
"""Opts out of resource variables.
If your code needs tf.disable_resource_variables() to be called to work
properly please file a bug.
"""
global _DEFAULT_USE_RESOURCE
_DEFAULT_USE_RESOURCE = False
_api_usage_gauge.get_cell().set(False)
class _VariableStore(object):
"""Variable store that carries a number of named Variables.
New variable names and new variables can be created; all stored
variables are initialized with the initializer passed to __init__.
Attributes:
vars: a dictionary with string names (same as passed in GetVar) as keys and
the corresponding TensorFlow Variables as values.
"""
def __init__(self):
"""Create a variable store."""
self._vars = {} # A dictionary of the stored TensorFlow variables.
self._partitioned_vars = {} # A dict of the stored PartitionedVariables.
self._store_eager_variables = False
def get_variable(self,
name,
shape=None,
dtype=dtypes.float32,
initializer=None,
regularizer=None,
reuse=None,
trainable=None,
collections=None,
caching_device=None,
partitioner=None,
validate_shape=True,
use_resource=None,
custom_getter=None,
constraint=None,
synchronization=VariableSynchronization.AUTO,
aggregation=VariableAggregation.NONE):
"""Gets an existing variable with these parameters or create a new one.
If a variable with the given name is already stored, we return the stored
variable. Otherwise, we create a new one.
Set `reuse` to `True` when you only want to reuse existing Variables.
Set `reuse` to `False` when you only want to create new Variables.
Set `reuse` to None (the default) or tf.compat.v1.AUTO_REUSE when you want
variables to be created if they don't exist or returned if they do.
If initializer is `None` (the default), the default initializer passed in
the constructor is used. If that one is `None` too, we use a new
`glorot_uniform_initializer`. If initializer is a Tensor, we use
it as a value and derive the shape from the initializer.
If a partitioner is provided, a `PartitionedVariable` is returned.
Accessing this object as a `Tensor` returns the shards concatenated along
the partition axis.
Some useful partitioners are available. See, e.g.,
`variable_axis_size_partitioner` and `min_max_variable_partitioner`.
Args:
name: The name of the new or existing variable.
shape: Shape of the new or existing variable.
dtype: Type of the new or existing variable (defaults to `DT_FLOAT`).
initializer: Initializer for the variable.
regularizer: A (Tensor -> Tensor or None) function; the result of applying
it on a newly created variable will be added to the collection
GraphKeys.REGULARIZATION_LOSSES and can be used for regularization.
reuse: a Boolean, None, or tf.AUTO_REUSE. Controls reuse or creation of
variables. When eager execution is enabled this argument is always
forced to be False.
trainable: If `True` also add the variable to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`). `trainable`
defaults to `True`, unless `synchronization` is set to `ON_READ`, in
which case it defaults to `False`.
collections: List of graph collections keys to add the `Variable` to.
Defaults to `[GraphKeys.GLOBAL_VARIABLES]` (see `tf.Variable`).
caching_device: Optional device string or function describing where the
Variable should be cached for reading. Defaults to the Variable's
device. If not `None`, caches on another device. Typical use is to
cache on the device where the Ops using the `Variable` reside, to
deduplicate copying through `Switch` and other conditional statements.
partitioner: Optional callable that accepts a fully defined `TensorShape`
and dtype of the `Variable` to be created, and returns a list of
partitions for each axis (currently only one axis can be partitioned).
validate_shape: If False, allows the variable to be initialized with a
value of unknown shape. If True, the default, the shape of initial_value
must be known.
use_resource: If False, creates a regular Variable. If True, creates
instead an experimental ResourceVariable which has well-defined
semantics. Defaults to False (will later change to True). When eager
execution is enabled this argument is always forced to be true.
custom_getter: Callable that takes as a first argument the true getter,
and allows overwriting the internal get_variable method. The signature
of `custom_getter` should match that of this method,
but the most future-proof version will allow for changes: `def
custom_getter(getter, *args, **kwargs)`. Direct access to
all `get_variable` parameters is also allowed: `def
custom_getter(getter, name, *args, **kwargs)`. A simple identity
custom getter that simply creates variables with modified names is:
```python
def custom_getter(getter, name, *args, **kwargs): return getter(name +
'_suffix', *args, **kwargs) ```
constraint: An optional projection function to be applied to the variable
after being updated by an `Optimizer` (e.g. used to implement norm
constraints or value constraints for layer weights). The function must
take as input the unprojected Tensor representing the value of the
variable and return the Tensor for the projected value (which must have
the same shape). Constraints are not safe to use when doing asynchronous
distributed training.
synchronization: Indicates when a distributed a variable will be
aggregated. Accepted values are constants defined in the class
`tf.VariableSynchronization`. By default the synchronization is set to
`AUTO` and the current `DistributionStrategy` chooses when to
synchronize.
aggregation: Indicates how a distributed variable will be aggregated.
Accepted values are constants defined in the class
`tf.VariableAggregation`.
Returns:
The created or existing `Variable` (or `PartitionedVariable`, if a
partitioner was used).
Raises:
ValueError: when creating a new variable and shape is not declared,
when reusing a variable and specifying a conflicting shape,
or when violating reuse during variable creation.
RuntimeError: when eager execution is enabled and not called from an
EagerVariableStore.
"""
if custom_getter is not None and not callable(custom_getter):
raise ValueError("Passed a custom_getter which is not callable: %s" %
custom_getter)
with ops.init_scope():
if context.executing_eagerly():
# Variable creation and initialization takes place in `init_scope`s;
# as such, if an `init_scope` lifts us into the eager context, then we
# need to use `ResourceVariable`s.
use_resource = True
# Note that it's fine to reuse eager variables whose initialization was
# lifted from a function-building graph into the eager context (that's why
# the following clause is not wrapped in an `init_scope`); lifted variables
# are tracked by the graph's `VariableStore`.
if context.executing_eagerly():
if not self._store_eager_variables and reuse:
raise RuntimeError(
"When eager execution is enabled variable reuse is only supported"
" when an EagerVariableStore is active. See the documentation on"
" EagerVariableStore for example usage.")
if self._store_eager_variables:
reuse = AUTO_REUSE
# If a *_ref type is passed in an error would be triggered further down the
# stack. We prevent this using base_dtype to get a non-ref version of the
# type, before doing anything else. When _ref types are removed in favor of
# resources, this line can be removed.
try:
dtype = dtype.base_dtype
except AttributeError:
# .base_dtype not existing means that we will try and use the raw dtype
# which was passed in - this might be a NumPy type which is valid.
pass
# This is the main logic of get_variable. However, custom_getter
# may override this logic. So we save it as a callable and pass
# it to custom_getter.
# Note: the parameters of _true_getter, and their documentation, match
# *exactly* item-for-item with the docstring of this method.
def _true_getter( # pylint: disable=missing-docstring
name,
shape=None,
dtype=dtypes.float32,
initializer=None,
regularizer=None,
reuse=None,
trainable=None,
collections=None,
caching_device=None,
partitioner=None,
validate_shape=True,
use_resource=None,
constraint=None,
synchronization=VariableSynchronization.AUTO,
aggregation=VariableAggregation.NONE):
is_scalar = (
shape is not None and isinstance(shape, collections_lib.Sequence) and
not shape)
# Partitioned variable case
if partitioner is not None and not is_scalar:
if not callable(partitioner):
raise ValueError("Partitioner must be callable, but received: %s" %
partitioner)
with ops.name_scope(None):
return self._get_partitioned_variable(
name=name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
reuse=reuse,
trainable=trainable,
collections=collections,
caching_device=caching_device,
partitioner=partitioner,
validate_shape=validate_shape,
use_resource=use_resource,
constraint=constraint,
synchronization=synchronization,
aggregation=aggregation)
# Special case for partitioned variable to allow reuse without having to
# specify partitioner.
if (reuse is True and partitioner is None
and name in self._partitioned_vars):
return self._get_partitioned_variable(
name=name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
reuse=reuse,
trainable=trainable,
collections=collections,
caching_device=caching_device,
partitioner=None,
validate_shape=validate_shape,
use_resource=use_resource,
constraint=constraint,
synchronization=synchronization,
aggregation=aggregation)
# Single variable case
if "%s/part_0" % name in self._vars:
raise ValueError(
"No partitioner was provided, but a partitioned version of the "
"variable was found: %s/part_0. Perhaps a variable of the same "
"name was already created with partitioning?" % name)
return self._get_single_variable(
name=name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
reuse=reuse,
trainable=trainable,
collections=collections,
caching_device=caching_device,
validate_shape=validate_shape,
use_resource=use_resource,
constraint=constraint,
synchronization=synchronization,
aggregation=aggregation)
synchronization, aggregation, trainable = (
variables.validate_synchronization_aggregation_trainable(
synchronization, aggregation, trainable, name))
if custom_getter is not None:
# Handle backwards compatibility with getter arguments that were added
# to the API after users started writing custom getters.
custom_getter_kwargs = {
"getter": _true_getter,
"name": name,
"shape": shape,
"dtype": dtype,
"initializer": initializer,
"regularizer": regularizer,
"reuse": reuse,
"trainable": trainable,
"collections": collections,
"caching_device": caching_device,
"partitioner": partitioner,
"validate_shape": validate_shape,
"use_resource": use_resource,
"synchronization": synchronization,
"aggregation": aggregation,
}
# `fn_args` and `has_kwargs` can handle functions, `functools.partial`,
# `lambda`.
if ("constraint" in function_utils.fn_args(custom_getter) or
function_utils.has_kwargs(custom_getter)):
custom_getter_kwargs["constraint"] = constraint
return custom_getter(**custom_getter_kwargs)
else:
return _true_getter(
name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
reuse=reuse,
trainable=trainable,
collections=collections,
caching_device=caching_device,
partitioner=partitioner,
validate_shape=validate_shape,
use_resource=use_resource,
constraint=constraint,
synchronization=synchronization,
aggregation=aggregation)
def _get_partitioned_variable(self,
name,
partitioner,
shape=None,
dtype=dtypes.float32,
initializer=None,
regularizer=None,
reuse=None,
trainable=None,
collections=None,
caching_device=None,
validate_shape=True,
use_resource=None,
constraint=None,
synchronization=VariableSynchronization.AUTO,
aggregation=VariableAggregation.NONE):
"""Gets or creates a sharded variable list with these parameters.
The `partitioner` must be a callable that accepts a fully defined
`TensorShape` and returns a sequence of integers (the `partitions`).
These integers describe how to partition the given sharded `Variable`
along the given dimension. That is, `partitions[1] = 3` means split
the `Variable` into 3 shards along dimension 1. Currently, sharding along
only one axis is supported.
If the list of variables with the given name (prefix) is already stored,
we return the stored variables. Otherwise, we create a new one.
Set `reuse` to `True` when you only want to reuse existing Variables.
Set `reuse` to `False` when you only want to create new Variables.
Set `reuse` to None (the default) or tf.compat.v1.AUTO_REUSE when you want
variables to be created if they don't exist or returned if they do.
If initializer is `None` (the default), the default initializer passed in
the constructor is used. If that one is `None` too, we use a new
`glorot_uniform_initializer`. If initializer is a Tensor, we use
it as a value and derive the shape from the initializer.
If the initializer is a callable, then it will be called for each
shard. Otherwise the initializer should match the shape of the entire
sharded Variable, and it will be sliced accordingly for each shard.
Some useful partitioners are available. See, e.g.,
`variable_axis_size_partitioner` and `min_max_variable_partitioner`.
Args:
name: the name of the new or existing sharded variable.
partitioner: Optional callable that accepts a fully defined `TensorShape`
and `dtype` of the Variable to be created, and returns a list of
partitions for each axis (currently only one axis can be partitioned).
shape: shape of the new or existing sharded variable.
dtype: type of the new or existing sharded variable (defaults to
`DT_FLOAT`).
initializer: initializer for the sharded variable.
regularizer: a (Tensor -> Tensor or None) function; the result of applying
it on a newly created variable will be added to the collection
GraphKeys.REGULARIZATION_LOSSES and can be used for regularization.
reuse: a Boolean, None, or tf.AUTO_REUSE. Controls reuse or creation of
variables.
trainable: If `True` also add the variable to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
collections: List of graph collections keys to add the Variable to.
Defaults to `[GraphKeys.GLOBAL_VARIABLES]` (see `tf.Variable`).
caching_device: Optional device string or function describing where the
Variable should be cached for reading. Defaults to the Variable's
device. If not `None`, caches on another device. Typical use is to
cache on the device where the Ops using the Variable reside, to
deduplicate copying through `Switch` and other conditional statements.
validate_shape: If False, allows the variable to be initialized with a
value of unknown shape. If True, the default, the shape of initial_value
must be known.
use_resource: If False, creates a regular Variable. If True, creates an
experimental ResourceVariable which has well-defined semantics. Defaults
to False (will later change to True).
constraint: An optional projection function to be applied to the variable
after being updated by an `Optimizer` (e.g. used to implement norm
constraints or value constraints for layer weights). The function must
take as input the unprojected Tensor representing the value of the
variable and return the Tensor for the projected value (which must have
the same shape). Constraints are not safe to use when doing asynchronous
distributed training.
synchronization: Indicates when a distributed a variable will be
aggregated. Accepted values are constants defined in the class
`tf.VariableSynchronization`. By default the synchronization is set to
`AUTO` and the current `DistributionStrategy` chooses when to
synchronize.
aggregation: Indicates how a distributed variable will be aggregated.
Accepted values are constants defined in the class
`tf.VariableAggregation`.
Returns:
A `PartitionedVariable` object.
Raises:
ValueError: when creating a new variable and shape is not declared,
when reusing a variable and specifying a conflicting shape,
when violating reuse during variable creation, or if an existing
sharded variable exists for the given name but with different sharding.
"""
initializing_from_value = initializer is not None and isinstance(
initializer, ops.Tensor)
if name in self._vars:
raise ValueError(
"A partitioner was provided, but an unpartitioned version of the "
"variable was found: %s. Perhaps a variable of the same name was "
"already created without partitioning?" % name)
shape = tensor_shape.as_shape(shape)
if initializing_from_value:
shape = shape.merge_with(initializer.get_shape())
partitions = None
if not reuse or partitioner:
partitions = _call_partitioner(partitioner, shape, dtype)
if name in self._partitioned_vars:
if reuse is False:
raise ValueError(
"Partitioned variable with name %s already exists. Did you mean to "
"set reuse=True or reuse=tf.AUTO_REUSE in VarScope?" % name)
existing_var = self._partitioned_vars[name]
if not shape.is_compatible_with(existing_var.get_shape()):
raise ValueError(
"Trying to reuse partitioned variable %s, but specified shape %s "
"and found shape %s." % (name, shape, existing_var.get_shape()))
if not dtype.is_compatible_with(existing_var.dtype):
raise ValueError(
"Trying to reuse partitioned variable %s, but specified dtype %s "
"and found dtype %s." % (name, dtype.name, existing_var.dtype.name))
# pylint: disable=protected-access
if (partitions is not None and
existing_var._get_partitions() != partitions):
raise ValueError(
"Trying to reuse partitioned variable %s, but specified partitions "
"%s and found partitions %s." %
(name, partitions, existing_var._get_partitions()))
# pylint: enable=protected-access
return existing_var
if reuse is True:
raise ValueError("PartitionedVariable %s does not exist, or was not "
"created with tf.get_variable(). Did you mean to set "
"reuse=False or reuse=tf.AUTO_REUSE in VarScope?" % name)
slice_dim, num_slices = _get_slice_dim_and_num_slices(partitions)
if "%s/part_0" % name in self._vars:
if "%s/part_%d" % (name, num_slices - 1) not in self._vars:
raise ValueError(
"Partitioner returned a different partitioning than what was "
"already found. Partitioner returned %d shards, and shard "
"%s/part_0 was found, but %s/part_%d was not." %
(num_slices, name, name, num_slices - 1))
if "%s/part_%d" % (name, num_slices) in self._vars:
raise ValueError(
"Partitioner returned a different partitioning than what was "
"already found. Partitioner returned %d shards, and shard "
"%s/part_0 was found, but so was the extra shard %s/part_%d." %
(num_slices, name, name, num_slices))
vs = []
for i, (var_offset, var_shape) in enumerate(
_iter_slices(shape.as_list(), num_slices, slice_dim)):
partition_info = _PartitionInfo(
full_shape=shape.as_list(), var_offset=var_offset)
var_full_name = "%s/part_%d" % (name, i)
with ops.name_scope(var_full_name + "/PartitionedInitializer"):
# Create the tensor to initialize the variable with default value.
if initializer is None:
init, initializing_from_value = self._get_default_initializer(
name=name, shape=shape, dtype=dtype)
if initializing_from_value:
init_shape = None
else:
init_shape = var_shape
elif callable(initializer):
init = initializer
init_shape = var_shape
elif isinstance(initializer, ops.Tensor):
init = array_ops.slice(initializer, var_offset, var_shape)
# Use the dtype of the given tensor.
dtype = init.dtype.base_dtype
init_shape = None
else:
init = ops.convert_to_tensor(initializer, dtype=dtype)
init = array_ops.slice(init, var_offset, var_shape)
init_shape = None
with ops.name_scope(None):
var = self._get_single_variable(
name=var_full_name,
shape=init_shape,
dtype=dtype,
initializer=init,
partition_info=partition_info,
regularizer=regularizer,
reuse=reuse,
trainable=trainable,
collections=collections,
caching_device=caching_device,
validate_shape=validate_shape,
use_resource=use_resource,
constraint=constraint,
synchronization=synchronization,
aggregation=aggregation)
# pylint: disable=protected-access
var._set_save_slice_info(
variables.Variable.SaveSliceInfo(name, shape.as_list(), var_offset,
var_shape))
vs.append(var)
# pylint: enable=protected-access
partitioned_var = variables.PartitionedVariable(
name=name,
shape=shape,
dtype=dtype,
variable_list=vs,
partitions=partitions)
if not context.executing_eagerly() or self._store_eager_variables:
self._partitioned_vars[name] = partitioned_var
return partitioned_var
def _get_single_variable(self,
name,
shape=None,
dtype=dtypes.float32,
initializer=None,
regularizer=None,
partition_info=None,
reuse=None,
trainable=None,
collections=None,
caching_device=None,
validate_shape=True,
use_resource=None,
constraint=None,
synchronization=VariableSynchronization.AUTO,
aggregation=VariableAggregation.NONE):
"""Get or create a single Variable (e.g.
a shard or entire variable).
See the documentation of get_variable above (ignore partitioning components)
for details.
Args:
name: see get_variable.
shape: see get_variable.
dtype: see get_variable.
initializer: see get_variable.
regularizer: see get_variable.
partition_info: _PartitionInfo object.
reuse: see get_variable.
trainable: see get_variable.
collections: see get_variable.
caching_device: see get_variable.
validate_shape: see get_variable.
use_resource: see get_variable.
constraint: see get_variable.
synchronization: see get_variable.
aggregation: see get_variable.
Returns:
A Variable. See documentation of get_variable above.
Raises:
ValueError: See documentation of get_variable above.
"""
# Set to true if initializer is a constant.
initializing_from_value = False
if initializer is not None and not callable(initializer):
initializing_from_value = True
if shape is not None and initializing_from_value:
raise ValueError("If initializer is a constant, do not specify shape.")
dtype = dtypes.as_dtype(dtype)
shape = tensor_shape.as_shape(shape)
if name in self._vars:
# Here we handle the case when returning an existing variable.
if reuse is False:
var = self._vars[name]
err_msg = ("Variable %s already exists, disallowed."
" Did you mean to set reuse=True or "
"reuse=tf.AUTO_REUSE in VarScope?" % name)
# ResourceVariables don't have an op associated with so no traceback
if isinstance(var, resource_variable_ops.ResourceVariable):
raise ValueError(err_msg)
tb = var.op.traceback[::-1]
# Throw away internal tf entries and only take a few lines. In some
# cases the traceback can be longer (e.g. if someone uses factory
# functions to create variables) so we take more than needed in the
# default case.
tb = [x for x in tb if "tensorflow/python" not in x[0]][:5]
raise ValueError("%s Originally defined at:\n\n%s" %
(err_msg, "".join(traceback.format_list(tb))))
found_var = self._vars[name]
if not shape.is_compatible_with(found_var.get_shape()):
raise ValueError("Trying to share variable %s, but specified shape %s"
" and found shape %s." %
(name, shape, found_var.get_shape()))
if not dtype.is_compatible_with(found_var.dtype):
dtype_str = dtype.name
found_type_str = found_var.dtype.name
raise ValueError("Trying to share variable %s, but specified dtype %s"
" and found dtype %s." %
(name, dtype_str, found_type_str))
return found_var
# The code below handles only the case of creating a new variable.
if reuse is True:
raise ValueError("Variable %s does not exist, or was not created with "
"tf.get_variable(). Did you mean to set "
"reuse=tf.AUTO_REUSE in VarScope?" % name)
# Create the tensor to initialize the variable with default value.
if initializer is None:
initializer, initializing_from_value = self._get_default_initializer(
name=name, shape=shape, dtype=dtype)
# Enter an init scope when creating the initializer.
with ops.init_scope():
if initializing_from_value:
init_val = initializer
variable_dtype = None
else:
# Instantiate initializer if provided initializer is a type object.
if tf_inspect.isclass(initializer):
initializer = initializer()
if shape is not None and shape.is_fully_defined():
if "partition_info" in tf_inspect.getargspec(initializer).args:
init_val = lambda: initializer( # pylint: disable=g-long-lambda
shape.as_list(),
dtype=dtype,
partition_info=partition_info)
else:
init_val = lambda: initializer( # pylint: disable=g-long-lambda
shape.as_list(), dtype=dtype)
variable_dtype = dtype.base_dtype
elif len(tf_inspect.getargspec(initializer).args) == len(
tf_inspect.getargspec(initializer).defaults or []):
init_val = initializer
variable_dtype = None
else:
raise ValueError("The initializer passed is not valid. It should "
"be a callable with no arguments and the "
"shape should not be provided or an instance of "
"`tf.keras.initializers.*' and `shape` should be "
"fully defined.")
# Create the variable.
if use_resource is None:
# Set the default value if unspecified.
use_resource = _DEFAULT_USE_RESOURCE
v = variables.VariableV1(
initial_value=init_val,
name=name,
trainable=trainable,
collections=collections,
caching_device=caching_device,
dtype=variable_dtype,
validate_shape=validate_shape,
constraint=constraint,
use_resource=use_resource,
synchronization=synchronization,
aggregation=aggregation)
if context.executing_eagerly() and self._store_eager_variables:
if collections:
ops.add_to_collections(collections, v)
else:
ops.add_to_collection(ops.GraphKeys.GLOBAL_VARIABLES, v)
if trainable:
ops.add_to_collection(ops.GraphKeys.TRAINABLE_VARIABLES, v)
if not context.executing_eagerly() or self._store_eager_variables:
# In eager mode we do not want to keep default references to Variable
# objects as this will prevent their memory from being released.
self._vars[name] = v
logging.vlog(1, "Created variable %s with shape %s and init %s", v.name,
format(shape), initializer)
# Run the regularizer if requested and save the resulting loss.
if regularizer:
def make_regularizer_op():
with ops.colocate_with(v):
with ops.name_scope(name + "/Regularizer/"):
return regularizer(v)
if regularizer(v) is not None:
lazy_eval_tensor = _LazyEvalTensor(make_regularizer_op)
ops.add_to_collection(ops.GraphKeys.REGULARIZATION_LOSSES,
lazy_eval_tensor)
return v
# Initialize variable when no initializer provided
def _get_default_initializer(self, name, shape=None, dtype=dtypes.float32):
"""Provide a default initializer and a corresponding value.
Args:
name: see get_variable.
shape: see get_variable.
dtype: see get_variable.
Returns:
initializer and initializing_from_value. See get_variable above.
Raises:
ValueError: When giving unsupported dtype.
"""
del shape
# If dtype is DT_FLOAT, provide a uniform unit scaling initializer
if dtype.is_floating:
initializer = init_ops.glorot_uniform_initializer()
initializing_from_value = False
# If dtype is DT_INT/DT_UINT, provide a default value `zero`
# If dtype is DT_BOOL, provide a default value `FALSE`
elif (dtype.is_integer or dtype.is_unsigned or dtype.is_bool or
dtype == dtypes.string):
initializer = init_ops.zeros_initializer()
initializing_from_value = False
# NOTES:Do we need to support for handling DT_STRING and DT_COMPLEX here?
else:
raise ValueError("An initializer for variable %s of %s is required" %
(name, dtype.base_dtype))
return initializer, initializing_from_value
class _LazyEvalTensor(object):
"""A Tensor-like object that only evaluates its thunk when used."""
def __init__(self, thunk):
"""Initializes a _LazyEvalTensor object.
Args:
thunk: A callable. A thunk which computes the value of the tensor.
"""
self._thunk = thunk
self._master_tensor = thunk()
def _as_tensor(self, dtype=None, name=None, as_ref=False):
del name
assert not as_ref
assert dtype in [None, self.dtype]
return self._thunk()
def _make_master_property(name):
@property
def prop(self):
return getattr(self._master_tensor, name) # pylint: disable=protected-access
return prop
_master_property_list = ("device", "dtype", "graph", "name", "op", "shape",
"value_index")
for _name in _master_property_list:
setattr(_LazyEvalTensor, _name, _make_master_property(_name))
def _make_master_method(name):
def method(self, *args, **kwargs):
return getattr(self._master_tensor, name)(*args, **kwargs) # pylint: disable=protected-access
return method
_master_method_list = ("get_shape", "__str__")
for _name in _master_method_list:
setattr(_LazyEvalTensor, _name, _make_master_method(_name))
def _make_op_method(name):
def method(self, *args, **kwargs):
return getattr(self._as_tensor(), name)(*args, **kwargs) # pylint: disable=protected-access
return method
_op_list = ("__abs__", "__add__", "__and__", "__bool__", "__div__", "__eq__",
"__floordiv__", "__ge__", "__getitem__", "__gt__", "__invert__",
"__iter__", "__le__", "__len__", "__lt__", "__matmul__", "__mod__",
"__mul__", "__ne__", "__neg__", "__nonzero__", "__or__", "__pow__",
"__radd__", "__rand__", "__rdiv__", "__rfloordiv__", "__rmatmul__",
"__rmod__", "__rmul__", "__ror__", "__rpow__", "__rsub__",
"__rtruediv__", "__rxor__", "__sub__", "__truediv__", "__xor__",
"eval", "numpy")
for _name in _op_list:
setattr(_LazyEvalTensor, _name, _make_op_method(_name))
ops.register_tensor_conversion_function(
_LazyEvalTensor,
lambda val, dtype, name, as_ref: val._as_tensor(dtype, name, as_ref) # pylint: disable=protected-access
)
session.register_session_run_conversion_functions(
_LazyEvalTensor,
lambda fetch: ([fetch._master_tensor], lambda fetched_vals: fetched_vals[0]) # pylint: disable=protected-access
)
ops.register_dense_tensor_like_type(_LazyEvalTensor)
# To stop regularization, use this regularizer
@tf_export(v1=["no_regularizer"])
def no_regularizer(_):
"""Use this function to prevent regularization of variables."""
return None
# TODO(alive): support caching devices and partitioned variables in Eager mode.
@tf_export(v1=["VariableScope"])
class VariableScope(object):
"""Variable scope object to carry defaults to provide to `get_variable`.
Many of the arguments we need for `get_variable` in a variable store are most
easily handled with a context. This object is used for the defaults.
Attributes:
name: name of the current scope, used as prefix in get_variable.
initializer: default initializer passed to get_variable.
regularizer: default regularizer passed to get_variable.
reuse: Boolean, None, or tf.compat.v1.AUTO_REUSE, setting the reuse in
get_variable. When eager execution is enabled this argument is always
forced to be False.
caching_device: string, callable, or None: the caching device passed to
get_variable.
partitioner: callable or `None`: the partitioner passed to `get_variable`.
custom_getter: default custom getter passed to get_variable.
name_scope: The name passed to `tf.name_scope`.
dtype: default type passed to get_variable (defaults to DT_FLOAT).
use_resource: if False, create a normal Variable; if True create an
experimental ResourceVariable with well-defined semantics. Defaults to
False (will later change to True). When eager execution is enabled this
argument is always forced to be True.
constraint: An optional projection function to be applied to the variable
after being updated by an `Optimizer` (e.g. used to implement norm
constraints or value constraints for layer weights). The function must
take as input the unprojected Tensor representing the value of the
variable and return the Tensor for the projected value (which must have
the same shape). Constraints are not safe to use when doing asynchronous
distributed training.
"""
def __init__(self,
reuse,
name="",
initializer=None,
regularizer=None,
caching_device=None,
partitioner=None,
custom_getter=None,
name_scope="",
dtype=dtypes.float32,
use_resource=None,
constraint=None):
"""Creates a new VariableScope with the given properties."""
self._name = name
self._initializer = initializer
self._regularizer = regularizer
self._reuse = reuse
self._caching_device = caching_device
self._partitioner = partitioner
self._custom_getter = custom_getter
self._name_scope = name_scope
self._dtype = dtype
self._use_resource = use_resource
self._constraint = constraint
if context.executing_eagerly():
if self._caching_device is not None:
raise NotImplementedError("Caching devices is not yet supported "
"when eager execution is enabled.")
self._reuse = AUTO_REUSE
self._use_resource = True
@property
def name(self):
return self._name
@property
def original_name_scope(self):
return self._name_scope
@property
def reuse(self):
return self._reuse
@property
def initializer(self):
return self._initializer
@property
def dtype(self):
return self._dtype
@property
def use_resource(self):
return self._use_resource
@property
def regularizer(self):
return self._regularizer
@property
def caching_device(self):
return self._caching_device
@property
def partitioner(self):
return self._partitioner
@property
def custom_getter(self):
return self._custom_getter
@property
def constraint(self):
return self._constraint
def reuse_variables(self):
"""Reuse variables in this scope."""
self._reuse = True
def set_initializer(self, initializer):
"""Set initializer for this scope."""
self._initializer = initializer
def set_dtype(self, dtype):
"""Set data type for this scope."""
self._dtype = dtype
def set_use_resource(self, use_resource):
"""Sets whether to use ResourceVariables for this scope."""
if context.executing_eagerly() and not use_resource:
raise ValueError("When eager execution is enabled, "
"use_resource cannot be set to false.")
self._use_resource = use_resource
def set_regularizer(self, regularizer):
"""Set regularizer for this scope."""
self._regularizer = regularizer
def set_caching_device(self, caching_device):
"""Set caching_device for this scope."""
if context.executing_eagerly():
raise NotImplementedError("Caching devices are not yet supported "
"when eager execution is enabled.")
self._caching_device = caching_device
def set_partitioner(self, partitioner):
"""Set partitioner for this scope."""
self._partitioner = partitioner
def set_custom_getter(self, custom_getter):
"""Set custom getter for this scope."""
self._custom_getter = custom_getter
def get_collection(self, name):
"""Get this scope's variables."""
scope = self._name + "/" if self._name else ""
return ops.get_collection(name, scope)
def trainable_variables(self):
"""Get this scope's trainable variables."""
return self.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
def global_variables(self):
"""Get this scope's global variables."""
return self.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
def local_variables(self):
"""Get this scope's local variables."""
return self.get_collection(ops.GraphKeys.LOCAL_VARIABLES)
def get_variable(self,
var_store,
name,
shape=None,
dtype=None,
initializer=None,
regularizer=None,
reuse=None,
trainable=None,
collections=None,
caching_device=None,
partitioner=None,
validate_shape=True,
use_resource=None,
custom_getter=None,
constraint=None,
synchronization=VariableSynchronization.AUTO,
aggregation=VariableAggregation.NONE):
"""Gets an existing variable with this name or create a new one."""
if regularizer is None:
regularizer = self._regularizer
if caching_device is None:
caching_device = self._caching_device
if partitioner is None:
partitioner = self._partitioner
if custom_getter is None:
custom_getter = self._custom_getter
if context.executing_eagerly():
reuse = False
use_resource = True
else:
if reuse is None:
reuse = self._reuse
if use_resource is None:
use_resource = self._use_resource
full_name = self.name + "/" + name if self.name else name
# Variable names only depend on variable_scope (full_name here),
# not name_scope, so we reset it below for the time of variable creation.
with ops.name_scope(None):
# Check that `initializer` dtype and `dtype` are consistent before
# replacing them with defaults.
if (dtype is not None and initializer is not None and
not callable(initializer)):
init_dtype = ops.convert_to_tensor(initializer).dtype.base_dtype
if init_dtype != dtype:
raise ValueError("Initializer type '%s' and explicit dtype '%s' "
"don't match." % (init_dtype, dtype))
if initializer is None:
initializer = self._initializer
if constraint is None:
constraint = self._constraint
if dtype is None:
dtype = self._dtype
return var_store.get_variable(
full_name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
reuse=reuse,
trainable=trainable,
collections=collections,
caching_device=caching_device,
partitioner=partitioner,
validate_shape=validate_shape,
use_resource=use_resource,
custom_getter=custom_getter,
constraint=constraint,
synchronization=synchronization,
aggregation=aggregation)
def _get_partitioned_variable(self,
var_store,
name,
shape=None,
dtype=None,
initializer=None,
regularizer=None,
trainable=None,
collections=None,
caching_device=None,
partitioner=None,
validate_shape=True,
use_resource=None,
constraint=None,
synchronization=VariableSynchronization.AUTO,
aggregation=VariableAggregation.NONE):
"""Gets an existing variable with this name or create a new one."""
if initializer is None:
initializer = self._initializer
if regularizer is None:
regularizer = self._regularizer
if constraint is None:
constraint = self._constraint
if caching_device is None:
caching_device = self._caching_device
if partitioner is None:
partitioner = self._partitioner
if dtype is None:
dtype = self._dtype
if use_resource is None:
use_resource = self._use_resource
if self._custom_getter is not None:
raise ValueError(
"Private access to _get_partitioned_variable is not allowed when "
"a custom getter is set. Current custom getter: %s. "
"It is likely that you're using create_partitioned_variables. "
"If so, consider instead using get_variable with a non-empty "
"partitioner parameter instead." % self._custom_getter)
if partitioner is None:
raise ValueError("No partitioner was specified")
# This allows the variable scope name to be used as the variable name if
# this function is invoked with an empty name arg, for backward
# compatibility with create_partitioned_variables().
full_name_list = []
if self.name:
full_name_list.append(self.name)
if name:
full_name_list.append(name)
full_name = "/".join(full_name_list)
# Variable names only depend on variable_scope (full_name here),
# not name_scope, so we reset it below for the time of variable creation.
with ops.name_scope(None):
# pylint: disable=protected-access
return var_store._get_partitioned_variable(
full_name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
reuse=self.reuse,
trainable=trainable,
collections=collections,
caching_device=caching_device,
partitioner=partitioner,
validate_shape=validate_shape,
use_resource=use_resource,
constraint=constraint,
synchronization=synchronization,
aggregation=aggregation)
# pylint: enable=protected-access
_VARSTORE_KEY = ("__variable_store",)
_VARSCOPESTORE_KEY = ("__varscope",)
class _VariableScopeStore(threading.local):
"""A thread local store for the current variable scope and scope counts."""
def __init__(self):
super(_VariableScopeStore, self).__init__()
self.current_scope = VariableScope(False)
self.variable_scopes_count = {}
def open_variable_scope(self, scope_name):
if scope_name in self.variable_scopes_count:
self.variable_scopes_count[scope_name] += 1
else:
self.variable_scopes_count[scope_name] = 1
def close_variable_subscopes(self, scope_name):
for k in list(self.variable_scopes_count.keys()):
if scope_name is None or k.startswith(scope_name + "/"):
self.variable_scopes_count[k] = 0
def variable_scope_count(self, scope_name):
return self.variable_scopes_count.get(scope_name, 0)
def get_variable_scope_store():
"""Returns the variable scope store for current thread."""
scope_store = ops.get_collection(_VARSCOPESTORE_KEY)
if not scope_store:
scope_store = _VariableScopeStore()
ops.add_to_collection(_VARSCOPESTORE_KEY, scope_store)
else:
scope_store = scope_store[0]
return scope_store
@tf_export(v1=["get_variable_scope"])
def get_variable_scope():
"""Returns the current variable scope."""
return get_variable_scope_store().current_scope
def _get_default_variable_store():
store = ops.get_collection(_VARSTORE_KEY)
if store:
return store[0]
store = _VariableStore()
ops.add_to_collection(_VARSTORE_KEY, store)
return store
@tf_contextlib.contextmanager
def with_variable_store(store):
store_collection = ops.get_collection_ref(_VARSTORE_KEY)
old = list(store_collection)
store_collection[:] = [store]
try:
yield
finally:
store_collection[:] = old
class EagerVariableStore(object):
"""Wrapper allowing functional layers to be used with eager execution.
When eager execution is enabled Variables get deleted when they go out of
scope, and are not stored in global collections by default. A lot of code
(mostly the functional layers in tf.layers) assumes that variables are kept in
a global list.
EagerVariableStore can be used in conjunction with this code to make it
eager-friendly. For example, to create a dense layer, use:
```
container = tfe.EagerVariableStore()
for input in dataset_iterator:
with container.as_default():
x = tf.compat.v1.layers.dense(input, name="l1")
print(container.variables) # Should print the variables used in the layer.
```
"""
def __init__(self, store=None):
if store is not None:
if not store._store_eager_variables: # pylint: disable=protected-access
raise ValueError("Cannot construct EagerVariableStore from a "
"VariableStore object that does not hold eager "
"variables.")
self._store = store
else:
self._store = _VariableStore()
self._store._store_eager_variables = True # pylint: disable=protected-access
def as_default(self):
return with_variable_store(self._store)
def variables(self):
return sorted(self._store._vars.values(), key=lambda x: x.name) # pylint: disable=protected-access
def trainable_variables(self):
# pylint: disable=protected-access
return sorted([x for x in self._store._vars.values() if x.trainable],
key=lambda x: x.name)
# pylint: enable=protected-access
def non_trainable_variables(self):
# pylint: disable=protected-access
return sorted([x for x in self._store._vars.values() if not x.trainable],
key=lambda x: x.name)
# pylint: enable=protected-access
def copy(self):
"""Copy this variable store and all of its contents.
Variables contained in this store will be copied over to the new variable
store, meaning that they can be modified without affecting the variables in
this store.
Returns:
A new EagerVariableStore instance containing copied variables.
"""
# pylint: disable=protected-access
new_store = EagerVariableStore()
for key, var in iteritems(self._store._vars):
# Strip device out of variable name.
try:
index = var.name.index(":")
except ValueError:
stripped_var_name = var.name
else:
stripped_var_name = var.name[:index]
# Create new variable with same value, name, and "trainable" flag.
new_var = resource_variable_ops.ResourceVariable(
var.read_value(), name=stripped_var_name, trainable=var.trainable)
new_store._store._vars[key] = new_var
return new_store
# pylint: enable=protected-access
# The argument list for get_variable must match arguments to get_local_variable.
# So, if you are updating the arguments, also update arguments to
# get_local_variable below.
@tf_export(v1=["get_variable"])
def get_variable(name,
shape=None,
dtype=None,
initializer=None,
regularizer=None,
trainable=None,
collections=None,
caching_device=None,
partitioner=None,
validate_shape=True,
use_resource=None,
custom_getter=None,
constraint=None,
synchronization=VariableSynchronization.AUTO,
aggregation=VariableAggregation.NONE):
return get_variable_scope().get_variable(
_get_default_variable_store(),
name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
trainable=trainable,
collections=collections,
caching_device=caching_device,
partitioner=partitioner,
validate_shape=validate_shape,
use_resource=use_resource,
custom_getter=custom_getter,
constraint=constraint,
synchronization=synchronization,
aggregation=aggregation)
get_variable_or_local_docstring = ("""%s
%sThis function prefixes the name with the current variable scope
and performs reuse checks. See the
[Variable Scope How To](https://tensorflow.org/guide/variables)
for an extensive description of how reusing works. Here is a basic example:
```python
def foo():
with tf.variable_scope("foo", reuse=tf.AUTO_REUSE):
v = tf.get_variable("v", [1])
return v
v1 = foo() # Creates v.
v2 = foo() # Gets the same, existing v.
assert v1 == v2
```
If initializer is `None` (the default), the default initializer passed in
the variable scope will be used. If that one is `None` too, a
`glorot_uniform_initializer` will be used. The initializer can also be
a Tensor, in which case the variable is initialized to this value and shape.
Similarly, if the regularizer is `None` (the default), the default regularizer
passed in the variable scope will be used (if that is `None` too,
then by default no regularization is performed).
If a partitioner is provided, a `PartitionedVariable` is returned.
Accessing this object as a `Tensor` returns the shards concatenated along
the partition axis.
Some useful partitioners are available. See, e.g.,
`variable_axis_size_partitioner` and `min_max_variable_partitioner`.
Args:
name: The name of the new or existing variable.
shape: Shape of the new or existing variable.
dtype: Type of the new or existing variable (defaults to `DT_FLOAT`).
initializer: Initializer for the variable if one is created. Can either be
an initializer object or a Tensor. If it's a Tensor, its shape must be known
unless validate_shape is False.
regularizer: A (Tensor -> Tensor or None) function; the result of
applying it on a newly created variable will be added to the collection
`tf.GraphKeys.REGULARIZATION_LOSSES` and can be used for regularization.
%scollections: List of graph collections keys to add the Variable to.
Defaults to `[%s]` (see `tf.Variable`).
caching_device: Optional device string or function describing where the
Variable should be cached for reading. Defaults to the Variable's
device. If not `None`, caches on another device. Typical use is to
cache on the device where the Ops using the Variable reside, to
deduplicate copying through `Switch` and other conditional statements.
partitioner: Optional callable that accepts a fully defined `TensorShape`
and `dtype` of the Variable to be created, and returns a list of
partitions for each axis (currently only one axis can be partitioned).
validate_shape: If False, allows the variable to be initialized with a
value of unknown shape. If True, the default, the shape of initial_value
must be known. For this to be used the initializer must be a Tensor and
not an initializer object.
use_resource: If False, creates a regular Variable. If true, creates an
experimental ResourceVariable instead with well-defined semantics.
Defaults to False (will later change to True). When eager execution is
enabled this argument is always forced to be True.
custom_getter: Callable that takes as a first argument the true getter, and
allows overwriting the internal get_variable method.
The signature of `custom_getter` should match that of this method,
but the most future-proof version will allow for changes:
`def custom_getter(getter, *args, **kwargs)`. Direct access to
all `get_variable` parameters is also allowed:
`def custom_getter(getter, name, *args, **kwargs)`. A simple identity
custom getter that simply creates variables with modified names is:
```python
def custom_getter(getter, name, *args, **kwargs):
return getter(name + '_suffix', *args, **kwargs)
```
constraint: An optional projection function to be applied to the variable
after being updated by an `Optimizer` (e.g. used to implement norm
constraints or value constraints for layer weights). The function must
take as input the unprojected Tensor representing the value of the
variable and return the Tensor for the projected value
(which must have the same shape). Constraints are not safe to
use when doing asynchronous distributed training.
synchronization: Indicates when a distributed a variable will be
aggregated. Accepted values are constants defined in the class
`tf.VariableSynchronization`. By default the synchronization is set to
`AUTO` and the current `DistributionStrategy` chooses
when to synchronize.
aggregation: Indicates how a distributed variable will be aggregated.
Accepted values are constants defined in the class
`tf.VariableAggregation`.
Returns:
The created or existing `Variable` (or `PartitionedVariable`, if a
partitioner was used).
Raises:
ValueError: when creating a new variable and shape is not declared,
when violating reuse during variable creation, or when `initializer` dtype
and `dtype` don't match. Reuse is set inside `variable_scope`.
""")
get_variable.__doc__ = get_variable_or_local_docstring % (
"Gets an existing variable with these parameters or create a new one.", "",
"trainable: If `True` also add the variable to the graph collection\n"
" `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).\n ",
"GraphKeys.GLOBAL_VARIABLES")
# The argument list for get_local_variable must match arguments to get_variable.
# So, if you are updating the arguments, also update arguments to get_variable.
@tf_export(v1=["get_local_variable"])
def get_local_variable( # pylint: disable=missing-docstring
name,
shape=None,
dtype=None,
initializer=None,
regularizer=None,
trainable=False, # pylint: disable=unused-argument
collections=None,
caching_device=None,
partitioner=None,
validate_shape=True,
use_resource=None,
custom_getter=None,
constraint=None,
synchronization=VariableSynchronization.AUTO,
aggregation=VariableAggregation.NONE):
if collections:
collections += [ops.GraphKeys.LOCAL_VARIABLES]
else:
collections = [ops.GraphKeys.LOCAL_VARIABLES]
return get_variable(
name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
trainable=False,
collections=collections,
caching_device=caching_device,
partitioner=partitioner,
validate_shape=validate_shape,
use_resource=use_resource,
synchronization=synchronization,
aggregation=aggregation,
custom_getter=custom_getter,
constraint=constraint)
get_local_variable.__doc__ = get_variable_or_local_docstring % (
"Gets an existing *local* variable or creates a new one.",
"Behavior is the same as in `get_variable`, except that variables are\n"
"added to the `LOCAL_VARIABLES` collection and `trainable` is set to\n"
"`False`.\n", "", "GraphKeys.LOCAL_VARIABLES")
def _get_partitioned_variable(name,
shape=None,
dtype=None,
initializer=None,
regularizer=None,
trainable=True,
collections=None,
caching_device=None,
partitioner=None,
validate_shape=True,
use_resource=None,
constraint=None,
synchronization=VariableSynchronization.AUTO,
aggregation=VariableAggregation.NONE):
"""Gets or creates a sharded variable list with these parameters.
The `partitioner` must be a callable that accepts a fully defined
`TensorShape` and returns a sequence of integers (the `partitions`).
These integers describe how to partition the given sharded `Variable`
along the given dimension. That is, `partitions[1] = 3` means split
the `Variable` into 3 shards along dimension 1. Currently, sharding along
only one axis is supported.
If the list of variables with the given name (prefix) is already stored,
we return the stored variables. Otherwise, we create a new one.
If initializer is `None` (the default), the default initializer passed in
the constructor is used. If that one is `None` too, we use a new
`glorot_uniform_initializer`. If initializer is a Tensor, we use
it as a value and derive the shape from the initializer.
If the initializer is a callable, then it will be called for each
shard. Otherwise the initializer should match the shape of the entire
sharded Variable, and it will be sliced accordingly for each shard.
Some useful partitioners are available. See, e.g.,
`variable_axis_size_partitioner` and `min_max_variable_partitioner`.
Args:
name: The name of the new or existing variable.
shape: Shape of the new or existing variable.
dtype: Type of the new or existing variable (defaults to `DT_FLOAT`).
initializer: Initializer for the variable if one is created.
regularizer: A (Tensor -> Tensor or None) function; the result of applying
it on a newly created variable will be added to the collection
GraphKeys.REGULARIZATION_LOSSES and can be used for regularization.
trainable: If `True` also add the variable to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
collections: List of graph collections keys to add the Variable to. Defaults
to `[GraphKeys.GLOBAL_VARIABLES]` (see `tf.Variable`).
caching_device: Optional device string or function describing where the
Variable should be cached for reading. Defaults to the Variable's device.
If not `None`, caches on another device. Typical use is to cache on the
device where the Ops using the Variable reside, to deduplicate copying
through `Switch` and other conditional statements.
partitioner: Optional callable that accepts a fully defined `TensorShape`
and `dtype` of the Variable to be created, and returns a list of
partitions for each axis (currently only one axis can be partitioned).
validate_shape: If False, allows the variable to be initialized with a value
of unknown shape. If True, the default, the shape of initial_value must be
known.
use_resource: If False, creates a regular Variable. If True, creates an
experimental ResourceVariable instead which has well-defined semantics.
Defaults to False (will later change to True).
constraint: An optional projection function to be applied to the variable
after being updated by an `Optimizer` (e.g. used to implement norm
constraints or value constraints for layer weights). The function must
take as input the unprojected Tensor representing the value of the
variable and return the Tensor for the projected value (which must have
the same shape). Constraints are not safe to use when doing asynchronous
distributed training.
synchronization: Indicates when a distributed a variable will be aggregated.
Accepted values are constants defined in the class
`tf.VariableSynchronization`. By default the synchronization is set to
`AUTO` and the current `DistributionStrategy` chooses when to synchronize.
aggregation: Indicates how a distributed variable will be aggregated.
Accepted values are constants defined in the class
`tf.VariableAggregation`.
Returns:
A tuple `(shards, partitions)` where `shards` is the list of `Variable`
shards and `partitions` is the output of the partitioner on the input
shape.
Raises:
ValueError: when creating a new variable and shape is not declared,
or when violating reuse during variable creation. Reuse is set inside
`variable_scope`.
"""
# pylint: disable=protected-access
scope = get_variable_scope()
if scope.custom_getter is not None:
raise ValueError(
"Private access to _get_partitioned_variable is not allowed when "
"a custom getter is set. Current custom getter: %s. "
"It is likely that you're using create_partitioned_variables. "
"If so, consider instead using get_variable with a non-empty "
"partitioner parameter instead." % scope.custom_getter)
return scope._get_partitioned_variable(
_get_default_variable_store(),
name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
trainable=trainable,
collections=collections,
caching_device=caching_device,
partitioner=partitioner,
validate_shape=validate_shape,
use_resource=use_resource,
constraint=constraint,
synchronization=synchronization,
aggregation=aggregation)
# pylint: enable=protected-access
# Named like a function for compatibility with the previous
# @tf_contextlib.contextmanager definition.
class _pure_variable_scope(object): # pylint: disable=invalid-name
"""A context for the variable_scope, see `variable_scope` for docs."""
def __init__(self,
name_or_scope,
reuse=None,
initializer=None,
regularizer=None,
caching_device=None,
partitioner=None,
custom_getter=None,
old_name_scope=None,
dtype=dtypes.float32,
use_resource=None,
constraint=None):
"""Creates a context for the variable_scope, see `variable_scope` for docs.
Note: this does not create a name scope.
Args:
name_or_scope: `string` or `VariableScope`: the scope to open.
reuse: `True` or None, or tf.compat.v1.AUTO_REUSE; if `None`, we inherit
the parent scope's reuse flag.
initializer: default initializer for variables within this scope.
regularizer: default regularizer for variables within this scope.
caching_device: default caching device for variables within this scope.
partitioner: default partitioner for variables within this scope.
custom_getter: default custom getter for variables within this scope.
old_name_scope: the original name scope when re-entering a variable scope.
dtype: type of the variables within this scope (defaults to `DT_FLOAT`).
use_resource: If False, variables in this scope will be regular Variables.
If True, experimental ResourceVariables will be creates instead, with
well-defined semantics. Defaults to False (will later change to True).
constraint: An optional projection function to be applied to the variable
after being updated by an `Optimizer` (e.g. used to implement norm
constraints or value constraints for layer weights). The function must
take as input the unprojected Tensor representing the value of the
variable and return the Tensor for the projected value (which must have
the same shape). Constraints are not safe to use when doing asynchronous
distributed training.
"""
self._name_or_scope = name_or_scope
self._reuse = reuse
self._initializer = initializer
self._regularizer = regularizer
self._caching_device = caching_device
self._partitioner = partitioner
self._custom_getter = custom_getter
self._old_name_scope = old_name_scope
self._dtype = dtype
self._use_resource = use_resource
self._constraint = constraint
self._var_store = _get_default_variable_store()
self._var_scope_store = get_variable_scope_store()
self._last_variable_scope_object = None
if isinstance(self._name_or_scope, VariableScope):
self._new_name = self._name_or_scope.name
name_scope = self._name_or_scope._name_scope # pylint: disable=protected-access
# Handler for the case when we jump to a shared scope. We create a new
# VariableScope (self._var_scope_object) that contains a copy of the
# provided shared scope, possibly with changed reuse and initializer, if
# the user requested this.
variable_scope_object = VariableScope(
self._name_or_scope.reuse if not self._reuse else self._reuse,
name=self._new_name,
initializer=self._name_or_scope.initializer,
regularizer=self._name_or_scope.regularizer,
caching_device=self._name_or_scope.caching_device,
partitioner=self._name_or_scope.partitioner,
dtype=self._name_or_scope.dtype,
custom_getter=self._name_or_scope.custom_getter,
name_scope=name_scope,
use_resource=self._name_or_scope.use_resource,
constraint=self._constraint)
if self._initializer is not None:
variable_scope_object.set_initializer(self._initializer)
if self._regularizer is not None:
variable_scope_object.set_regularizer(self._regularizer)
if self._caching_device is not None:
variable_scope_object.set_caching_device(self._caching_device)
if self._partitioner is not None:
variable_scope_object.set_partitioner(self._partitioner)
if self._custom_getter is not None:
variable_scope_object.set_custom_getter(
_maybe_wrap_custom_getter(self._custom_getter,
self._name_or_scope.custom_getter))
if self._dtype is not None:
variable_scope_object.set_dtype(self._dtype)
if self._use_resource is not None:
variable_scope_object.set_use_resource(self._use_resource)
self._cached_variable_scope_object = variable_scope_object
def __enter__(self):
"""Begins the scope block.
Returns:
A VariableScope.
Raises:
ValueError: when trying to reuse within a create scope, or create within
a reuse scope, or if reuse is not `None` or `True`.
TypeError: when the types of some arguments are not appropriate.
"""
self._old = self._var_scope_store.current_scope
if isinstance(self._name_or_scope, VariableScope):
self._var_scope_store.open_variable_scope(self._new_name)
self._old_subscopes = copy.copy(
self._var_scope_store.variable_scopes_count)
variable_scope_object = self._cached_variable_scope_object
else:
# Handler for the case when we just prolong current variable scope.
# VariableScope with name extended by the provided one, and inherited
# reuse and initializer (except if the user provided values to set).
self._new_name = (
self._old.name + "/" +
self._name_or_scope if self._old.name else self._name_or_scope)
self._reuse = (self._reuse or
self._old.reuse) # Re-using is inherited by sub-scopes.
if self._old_name_scope is None:
name_scope = self._name_or_scope
else:
name_scope = self._old_name_scope
variable_scope_object = VariableScope(
self._reuse,
name=self._new_name,
initializer=self._old.initializer,
regularizer=self._old.regularizer,
caching_device=self._old.caching_device,
partitioner=self._old.partitioner,
dtype=self._old.dtype,
use_resource=self._old.use_resource,
custom_getter=self._old.custom_getter,
name_scope=name_scope,
constraint=self._constraint)
if self._initializer is not None:
variable_scope_object.set_initializer(self._initializer)
if self._regularizer is not None:
variable_scope_object.set_regularizer(self._regularizer)
if self._caching_device is not None:
variable_scope_object.set_caching_device(self._caching_device)
if self._partitioner is not None:
variable_scope_object.set_partitioner(self._partitioner)
if self._custom_getter is not None:
variable_scope_object.set_custom_getter(
_maybe_wrap_custom_getter(self._custom_getter,
self._old.custom_getter))
if self._dtype is not None:
variable_scope_object.set_dtype(self._dtype)
if self._use_resource is not None:
variable_scope_object.set_use_resource(self._use_resource)
self._var_scope_store.open_variable_scope(self._new_name)
self._var_scope_store.current_scope = variable_scope_object
self._last_variable_scope_object = variable_scope_object
return variable_scope_object
def __exit__(self, type_arg, value_arg, traceback_arg):
if (self._var_scope_store.current_scope is
not self._last_variable_scope_object):
raise RuntimeError("Improper nesting of variable_scope.")
# If jumping out from a non-prolonged scope, restore counts.
if isinstance(self._name_or_scope, VariableScope):
self._var_scope_store.variable_scopes_count = self._old_subscopes
else:
self._var_scope_store.close_variable_subscopes(self._new_name)
self._var_scope_store.current_scope = self._old
def _maybe_wrap_custom_getter(custom_getter, old_getter):
"""Wrap a call to a custom_getter to use the old_getter internally."""
if old_getter is None:
return custom_getter
# The new custom_getter should call the old one
def wrapped_custom_getter(getter, *args, **kwargs):
# Call:
# custom_getter(
# lambda: old_getter(true_getter, ...), *args, **kwargs)
# which means custom_getter will call old_getter, which
# will call the true_getter, perform any intermediate
# processing, and return the results to the current
# getter, which will also perform additional processing.
return custom_getter(functools.partial(old_getter, getter), *args, **kwargs)
return wrapped_custom_getter
def _get_unique_variable_scope(prefix):
"""Get a name with the given prefix unique in the current variable scope."""
var_scope_store = get_variable_scope_store()
current_scope = get_variable_scope()
name = current_scope.name + "/" + prefix if current_scope.name else prefix
if var_scope_store.variable_scope_count(name) == 0:
return prefix
idx = 1
while var_scope_store.variable_scope_count(name + ("_%d" % idx)) > 0:
idx += 1
return prefix + ("_%d" % idx)
# Named like a function for backwards compatibility with the
# @tf_contextlib.contextmanager version, which was switched to a class to avoid
# some object creation overhead.
@tf_export(v1=["variable_scope"]) # pylint: disable=invalid-name
class variable_scope(object):
"""A context manager for defining ops that creates variables (layers).
This context manager validates that the (optional) `values` are from the same
graph, ensures that graph is the default graph, and pushes a name scope and a
variable scope.
If `name_or_scope` is not None, it is used as is. If `name_or_scope` is None,
then `default_name` is used. In that case, if the same name has been
previously used in the same scope, it will be made unique by appending `_N`
to it.
Variable scope allows you to create new variables and to share already created
ones while providing checks to not create or share by accident. For details,
see the [Variable Scope How To](https://tensorflow.org/guide/variables), here
we present only a few basic examples.
Simple example of how to create a new variable:
```python
with tf.compat.v1.variable_scope("foo"):
with tf.compat.v1.variable_scope("bar"):
v = tf.compat.v1.get_variable("v", [1])
assert v.name == "foo/bar/v:0"
```
Simple example of how to reenter a premade variable scope safely:
```python
with tf.compat.v1.variable_scope("foo") as vs:
pass
# Re-enter the variable scope.
with tf.compat.v1.variable_scope(vs,
auxiliary_name_scope=False) as vs1:
# Restore the original name_scope.
with tf.name_scope(vs1.original_name_scope):
v = tf.compat.v1.get_variable("v", [1])
assert v.name == "foo/v:0"
c = tf.constant([1], name="c")
assert c.name == "foo/c:0"
```
Basic example of sharing a variable AUTO_REUSE:
```python
def foo():
with tf.compat.v1.variable_scope("foo", reuse=tf.compat.v1.AUTO_REUSE):
v = tf.compat.v1.get_variable("v", [1])
return v
v1 = foo() # Creates v.
v2 = foo() # Gets the same, existing v.
assert v1 == v2
```
Basic example of sharing a variable with reuse=True:
```python
with tf.compat.v1.variable_scope("foo"):
v = tf.compat.v1.get_variable("v", [1])
with tf.compat.v1.variable_scope("foo", reuse=True):
v1 = tf.compat.v1.get_variable("v", [1])
assert v1 == v
```
Sharing a variable by capturing a scope and setting reuse:
```python
with tf.compat.v1.variable_scope("foo") as scope:
v = tf.compat.v1.get_variable("v", [1])
scope.reuse_variables()
v1 = tf.compat.v1.get_variable("v", [1])
assert v1 == v
```
To prevent accidental sharing of variables, we raise an exception when getting
an existing variable in a non-reusing scope.
```python
with tf.compat.v1.variable_scope("foo"):
v = tf.compat.v1.get_variable("v", [1])
v1 = tf.compat.v1.get_variable("v", [1])
# Raises ValueError("... v already exists ...").
```
Similarly, we raise an exception when trying to get a variable that does not
exist in reuse mode.
```python
with tf.compat.v1.variable_scope("foo", reuse=True):
v = tf.compat.v1.get_variable("v", [1])
# Raises ValueError("... v does not exists ...").
```
Note that the `reuse` flag is inherited: if we open a reusing scope, then all
its sub-scopes become reusing as well.
A note about name scoping: Setting `reuse` does not impact the naming of other
ops such as mult. See related discussion on
[github#6189](https://github.com/tensorflow/tensorflow/issues/6189)
Note that up to and including version 1.0, it was allowed (though explicitly
discouraged) to pass False to the reuse argument, yielding undocumented
behaviour slightly different from None. Starting at 1.1.0 passing None and
False as reuse has exactly the same effect.
A note about using variable scopes in multi-threaded environment: Variable
scopes are thread local, so one thread will not see another thread's current
scope. Also, when using `default_name`, unique scopes names are also generated
only on a per thread basis. If the same name was used within a different
thread, that doesn't prevent a new thread from creating the same scope.
However, the underlying variable store is shared across threads (within the
same graph). As such, if another thread tries to create a new variable with
the same name as a variable created by a previous thread, it will fail unless
reuse is True.
Further, each thread starts with an empty variable scope. So if you wish to
preserve name prefixes from a scope from the main thread, you should capture
the main thread's scope and re-enter it in each thread. For e.g.
```
main_thread_scope = variable_scope.get_variable_scope()
# Thread's target function:
def thread_target_fn(captured_scope):
with variable_scope.variable_scope(captured_scope):
# .... regular code for this thread
thread = threading.Thread(target=thread_target_fn, args=(main_thread_scope,))
```
"""
def __init__(self,
name_or_scope,
default_name=None,
values=None,
initializer=None,
regularizer=None,
caching_device=None,
partitioner=None,
custom_getter=None,
reuse=None,
dtype=None,
use_resource=None,
constraint=None,
auxiliary_name_scope=True):
"""Initialize the context manager.
Args:
name_or_scope: `string` or `VariableScope`: the scope to open.
default_name: The default name to use if the `name_or_scope` argument is
`None`, this name will be uniquified. If name_or_scope is provided it
won't be used and therefore it is not required and can be None.
values: The list of `Tensor` arguments that are passed to the op function.
initializer: default initializer for variables within this scope.
regularizer: default regularizer for variables within this scope.
caching_device: default caching device for variables within this scope.
partitioner: default partitioner for variables within this scope.
custom_getter: default custom getter for variables within this scope.
reuse: `True`, None, or tf.compat.v1.AUTO_REUSE; if `True`, we go into
reuse mode for this scope as well as all sub-scopes; if
tf.compat.v1.AUTO_REUSE, we create variables if they do not exist, and
return them otherwise; if None, we inherit the parent scope's reuse
flag. When eager execution is enabled, new variables are always created
unless an EagerVariableStore or template is currently active.
dtype: type of variables created in this scope (defaults to the type in
the passed scope, or inherited from parent scope).
use_resource: If False, all variables will be regular Variables. If True,
experimental ResourceVariables with well-defined semantics will be used
instead. Defaults to False (will later change to True). When eager
execution is enabled this argument is always forced to be True.
constraint: An optional projection function to be applied to the variable
after being updated by an `Optimizer` (e.g. used to implement norm
constraints or value constraints for layer weights). The function must
take as input the unprojected Tensor representing the value of the
variable and return the Tensor for the projected value (which must have
the same shape). Constraints are not safe to use when doing asynchronous
distributed training.
auxiliary_name_scope: If `True`, we create an auxiliary name scope with
the scope. If `False`, we don't create it. Note that the argument is not
inherited, and it only takes effect for once when creating. You should
only use it for re-entering a premade variable scope.
Returns:
A scope that can be captured and reused.
Raises:
ValueError: when trying to reuse within a create scope, or create within
a reuse scope.
TypeError: when the types of some arguments are not appropriate.
"""
self._name_or_scope = name_or_scope
self._default_name = default_name
self._values = values
self._initializer = initializer
self._regularizer = regularizer
self._caching_device = caching_device
self._partitioner = partitioner
self._custom_getter = custom_getter
self._reuse = reuse
self._dtype = dtype
self._use_resource = use_resource
self._constraint = constraint
if self._default_name is None and self._name_or_scope is None:
raise TypeError("If default_name is None then name_or_scope is required")
if self._reuse is False:
# We don't allow non-inheriting scopes, False = None here.
self._reuse = None
if not (self._reuse is True
or self._reuse is None
or self._reuse is AUTO_REUSE):
raise ValueError("The reuse parameter must be True or False or None.")
if self._values is None:
self._values = []
self._in_graph_mode = not context.executing_eagerly()
if self._in_graph_mode:
self._graph = ops._get_graph_from_inputs(self._values) # pylint: disable=protected-access
self._cached_pure_variable_scope = None
self._current_name_scope = None
if not isinstance(auxiliary_name_scope, bool):
raise TypeError("The auxiliary_name_scope must be `True` or `False`, "
"while get {}".format(auxiliary_name_scope))
self._auxiliary_name_scope = auxiliary_name_scope
def __enter__(self):
# If the default graph is building a function, then we should not replace it
# with the cached graph.
if ops.get_default_graph().building_function:
self._building_function = True
else:
self._building_function = False
if self._in_graph_mode and not self._building_function:
self._graph_context_manager = self._graph.as_default()
self._graph_context_manager.__enter__()
if self._cached_pure_variable_scope is not None:
# Fast path for re-entering variable_scopes. We've held on to the pure
# variable scope from a previous successful __enter__, so we avoid some
# overhead by re-using that object.
if self._current_name_scope is not None:
self._current_name_scope.__enter__()
return self._cached_pure_variable_scope.__enter__()
try:
return self._enter_scope_uncached()
except:
if (self._in_graph_mode and not self._building_function and
self._graph_context_manager is not None):
self._graph_context_manager.__exit__(*sys.exc_info())
raise
def _enter_scope_uncached(self):
"""Enters the context manager when there is no cached scope yet.
Returns:
The entered variable scope.
Raises:
TypeError: A wrong type is passed as `scope` at __init__().
ValueError: `reuse` is incorrectly set at __init__().
"""
if self._auxiliary_name_scope:
# Create a new name scope later
current_name_scope = None
else:
# Reenter the current name scope
name_scope = ops.get_name_scope()
if name_scope:
# Hack to reenter
name_scope += "/"
current_name_scope = ops.name_scope(name_scope)
else:
# Root scope
current_name_scope = ops.name_scope(name_scope)
# IMPORTANT: Only assign to self._cached_pure_variable_scope and
# self._current_name_scope after successful __enter__() calls.
if self._name_or_scope is not None:
if not isinstance(self._name_or_scope,
(VariableScope,) + six.string_types):
raise TypeError("VariableScope: name_or_scope must be a string or "
"VariableScope.")
if isinstance(self._name_or_scope, six.string_types):
name_scope = self._name_or_scope
else:
name_scope = self._name_or_scope.name.split("/")[-1]
if name_scope or current_name_scope:
current_name_scope = current_name_scope or ops.name_scope(name_scope)
try:
current_name_scope_name = current_name_scope.__enter__()
except:
current_name_scope.__exit__(*sys.exc_info())
raise
self._current_name_scope = current_name_scope
if isinstance(self._name_or_scope, six.string_types):
old_name_scope = current_name_scope_name
else:
old_name_scope = self._name_or_scope.original_name_scope
pure_variable_scope = _pure_variable_scope(
self._name_or_scope,
reuse=self._reuse,
initializer=self._initializer,
regularizer=self._regularizer,
caching_device=self._caching_device,
partitioner=self._partitioner,
custom_getter=self._custom_getter,
old_name_scope=old_name_scope,
dtype=self._dtype,
use_resource=self._use_resource,
constraint=self._constraint)
try:
entered_pure_variable_scope = pure_variable_scope.__enter__()
except:
pure_variable_scope.__exit__(*sys.exc_info())
raise
self._cached_pure_variable_scope = pure_variable_scope
return entered_pure_variable_scope
else:
self._current_name_scope = None
# This can only happen if someone is entering the root variable scope.
pure_variable_scope = _pure_variable_scope(
self._name_or_scope,
reuse=self._reuse,
initializer=self._initializer,
regularizer=self._regularizer,
caching_device=self._caching_device,
partitioner=self._partitioner,
custom_getter=self._custom_getter,
dtype=self._dtype,
use_resource=self._use_resource,
constraint=self._constraint)
try:
entered_pure_variable_scope = pure_variable_scope.__enter__()
except:
pure_variable_scope.__exit__(*sys.exc_info())
raise
self._cached_pure_variable_scope = pure_variable_scope
return entered_pure_variable_scope
else: # Here name_or_scope is None. Using default name, but made unique.
if self._reuse:
raise ValueError("reuse=True cannot be used without a name_or_scope")
current_name_scope = current_name_scope or ops.name_scope(
self._default_name)
try:
current_name_scope_name = current_name_scope.__enter__()
except:
current_name_scope.__exit__(*sys.exc_info())
raise
self._current_name_scope = current_name_scope
unique_default_name = _get_unique_variable_scope(self._default_name)
pure_variable_scope = _pure_variable_scope(
unique_default_name,
initializer=self._initializer,
regularizer=self._regularizer,
caching_device=self._caching_device,
partitioner=self._partitioner,
custom_getter=self._custom_getter,
old_name_scope=current_name_scope_name,
dtype=self._dtype,
use_resource=self._use_resource,
constraint=self._constraint)
try:
entered_pure_variable_scope = pure_variable_scope.__enter__()
except:
pure_variable_scope.__exit__(*sys.exc_info())
raise
self._cached_pure_variable_scope = pure_variable_scope
return entered_pure_variable_scope
def __exit__(self, type_arg, value_arg, traceback_arg):
try:
self._cached_pure_variable_scope.__exit__(type_arg, value_arg,
traceback_arg)
finally:
try:
if self._current_name_scope:
self._current_name_scope.__exit__(type_arg, value_arg,
traceback_arg)
finally:
if self._in_graph_mode and not self._building_function:
self._graph_context_manager.__exit__(type_arg, value_arg,
traceback_arg)
# pylint: disable=g-doc-return-or-yield
@tf_export(v1=["variable_op_scope"])
@tf_contextlib.contextmanager
def variable_op_scope(values,
name_or_scope,
default_name=None,
initializer=None,
regularizer=None,
caching_device=None,
partitioner=None,
custom_getter=None,
reuse=None,
dtype=None,
use_resource=None,
constraint=None):
"""Deprecated: context manager for defining an op that creates variables."""
logging.warn("tf.variable_op_scope(values, name, default_name) is deprecated,"
" use tf.variable_scope(name, default_name, values)")
with variable_scope(
name_or_scope,
default_name=default_name,
values=values,
initializer=initializer,
regularizer=regularizer,
caching_device=caching_device,
partitioner=partitioner,
custom_getter=custom_getter,
reuse=reuse,
dtype=dtype,
use_resource=use_resource,
constraint=constraint) as scope:
yield scope
def _call_partitioner(partitioner, shape, dtype):
"""Call partitioner validating its inputs/output.
Args:
partitioner: a function mapping `Tensor` shape and dtype to a list of
partitions.
shape: shape of the `Tensor` to partition, must have at least two
dimensions.
dtype: dtype of the elements in the `Tensor`.
Returns:
A list with elements >=1 and exactly one >1. The index of that
element corresponds to the partitioning axis.
"""
if not shape.is_fully_defined():
raise ValueError("Shape of a new partitioned variable must be "
"fully defined, but instead was %s." % (shape,))
if shape.ndims < 1:
raise ValueError("A partitioned Variable must have rank at least 1, "
"shape: %s" % shape)
slicing = partitioner(shape=shape, dtype=dtype)
if not isinstance(slicing, collections_lib.Sequence):
raise ValueError("Partitioner must return a sequence, but saw: %s" %
slicing)
if len(slicing) != shape.ndims:
raise ValueError(
"Partitioner returned a partition list that does not match the "
"Variable's rank: %s vs. %s" % (slicing, shape))
if any(p < 1 for p in slicing):
raise ValueError("Partitioner returned zero partitions for some axes: %s" %
slicing)
if sum(p > 1 for p in slicing) > 1:
raise ValueError("Can only slice a variable along one dimension: "
"shape: %s, partitioning: %s" % (shape, slicing))
return slicing
# TODO(slebedev): could be inlined, but
# `_VariableStore._get_partitioned_variable` is too complex even
# without this logic.
def _get_slice_dim_and_num_slices(slicing):
"""Get slicing dimension and number of slices from the partitioner output."""
for slice_dim, num_slices in enumerate(slicing):
if num_slices > 1:
break
else:
# Degenerate case: no partitioning applied.
slice_dim = 0
num_slices = 1
return slice_dim, num_slices
def _iter_slices(full_shape, num_slices, slice_dim):
"""Slices a given a shape along the specified dimension."""
num_slices_with_excess = full_shape[slice_dim] % num_slices
offset = [0] * len(full_shape)
min_slice_len = full_shape[slice_dim] // num_slices
for i in xrange(num_slices):
shape = full_shape[:]
shape[slice_dim] = min_slice_len + bool(i < num_slices_with_excess)
yield offset[:], shape
offset[slice_dim] += shape[slice_dim]
def default_variable_creator(next_creator=None, **kwargs):
"""Default variable creator."""
assert next_creator is None
initial_value = kwargs.get("initial_value", None)
trainable = kwargs.get("trainable", None)
collections = kwargs.get("collections", None)
validate_shape = kwargs.get("validate_shape", True)
caching_device = kwargs.get("caching_device", None)
name = kwargs.get("name", None)
variable_def = kwargs.get("variable_def", None)
dtype = kwargs.get("dtype", None)
expected_shape = kwargs.get("expected_shape", None)
import_scope = kwargs.get("import_scope", None)
constraint = kwargs.get("constraint", None)
use_resource = kwargs.get("use_resource", None)
synchronization = kwargs.get("synchronization", None)
aggregation = kwargs.get("aggregation", None)
shape = kwargs.get("shape", None)
if use_resource is None:
use_resource = get_variable_scope().use_resource
if use_resource is None:
use_resource = _DEFAULT_USE_RESOURCE
use_resource = use_resource or context.executing_eagerly()
if use_resource:
distribute_strategy = kwargs.get("distribute_strategy", None)
return resource_variable_ops.ResourceVariable(
initial_value=initial_value,
trainable=trainable,
collections=collections,
validate_shape=validate_shape,
caching_device=caching_device,
name=name,
dtype=dtype,
constraint=constraint,
variable_def=variable_def,
import_scope=import_scope,
distribute_strategy=distribute_strategy,
synchronization=synchronization,
aggregation=aggregation,
shape=shape)
else:
return variables.RefVariable(
initial_value=initial_value,
trainable=trainable,
collections=collections,
validate_shape=validate_shape,
caching_device=caching_device,
name=name,
dtype=dtype,
constraint=constraint,
variable_def=variable_def,
expected_shape=expected_shape,
import_scope=import_scope,
synchronization=synchronization,
aggregation=aggregation,
shape=shape)
def default_variable_creator_v2(next_creator=None, **kwargs):
"""Default variable creator."""
assert next_creator is None
initial_value = kwargs.get("initial_value", None)
trainable = kwargs.get("trainable", None)
validate_shape = kwargs.get("validate_shape", True)
caching_device = kwargs.get("caching_device", None)
name = kwargs.get("name", None)
variable_def = kwargs.get("variable_def", None)
dtype = kwargs.get("dtype", None)
import_scope = kwargs.get("import_scope", None)
constraint = kwargs.get("constraint", None)
distribute_strategy = kwargs.get("distribute_strategy", None)
synchronization = kwargs.get("synchronization", None)
aggregation = kwargs.get("aggregation", None)
shape = kwargs.get("shape", None)
return resource_variable_ops.ResourceVariable(
initial_value=initial_value,
trainable=trainable,
validate_shape=validate_shape,
caching_device=caching_device,
name=name,
dtype=dtype,
constraint=constraint,
variable_def=variable_def,
import_scope=import_scope,
distribute_strategy=distribute_strategy,
synchronization=synchronization,
aggregation=aggregation,
shape=shape)
variables.default_variable_creator = default_variable_creator
variables.default_variable_creator_v2 = default_variable_creator_v2
def _make_getter(captured_getter, captured_previous):
"""Gets around capturing loop variables in python being broken."""
return lambda **kwargs: captured_getter(captured_previous, **kwargs)
# TODO(apassos) remove forwarding symbol
variable = variables.VariableV1
@tf_export(v1=["variable_creator_scope"])
@tf_contextlib.contextmanager
def variable_creator_scope_v1(variable_creator):
"""Scope which defines a variable creation function to be used by variable().
variable_creator is expected to be a function with the following signature:
```
def variable_creator(next_creator, **kwargs)
```
The creator is supposed to eventually call the next_creator to create a
variable if it does want to create a variable and not call Variable or
ResourceVariable directly. This helps make creators composable. A creator may
choose to create multiple variables, return already existing variables, or
simply register that a variable was created and defer to the next creators in
line. Creators can also modify the keyword arguments seen by the next
creators.
Custom getters in the variable scope will eventually resolve down to these
custom creators when they do create variables.
The valid keyword arguments in kwds are:
* initial_value: A `Tensor`, or Python object convertible to a `Tensor`,
which is the initial value for the Variable. The initial value must have
a shape specified unless `validate_shape` is set to False. Can also be a
callable with no argument that returns the initial value when called. In
that case, `dtype` must be specified. (Note that initializer functions
from init_ops.py must first be bound to a shape before being used here.)
* trainable: If `True`, the default, also adds the variable to the graph
collection `GraphKeys.TRAINABLE_VARIABLES`. This collection is used as
the default list of variables to use by the `Optimizer` classes.
`trainable` defaults to `True`, unless `synchronization` is
set to `ON_READ`, in which case it defaults to `False`.
* collections: List of graph collections keys. The new variable is added to
these collections. Defaults to `[GraphKeys.GLOBAL_VARIABLES]`.
* validate_shape: If `False`, allows the variable to be initialized with a
value of unknown shape. If `True`, the default, the shape of
`initial_value` must be known.
* caching_device: Optional device string describing where the Variable
should be cached for reading. Defaults to the Variable's device.
If not `None`, caches on another device. Typical use is to cache
on the device where the Ops using the Variable reside, to deduplicate
copying through `Switch` and other conditional statements.
* name: Optional name for the variable. Defaults to `'Variable'` and gets
uniquified automatically.
* dtype: If set, initial_value will be converted to the given type.
If `None`, either the datatype will be kept (if `initial_value` is
a Tensor), or `convert_to_tensor` will decide.
* constraint: A constraint function to be applied to the variable after
updates by some algorithms.
* use_resource: if True, a ResourceVariable is always created.
* synchronization: Indicates when a distributed a variable will be
aggregated. Accepted values are constants defined in the class
`tf.VariableSynchronization`. By default the synchronization is set to
`AUTO` and the current `DistributionStrategy` chooses
when to synchronize.
* aggregation: Indicates how a distributed variable will be aggregated.
Accepted values are constants defined in the class
`tf.VariableAggregation`.
This set may grow over time, so it's important the signature of creators is as
mentioned above.
Args:
variable_creator: the passed creator
Yields:
A scope in which the creator is active
"""
with ops.get_default_graph()._variable_creator_scope(variable_creator): # pylint: disable=protected-access
yield
# Note: only the docstrings differ between this and v1.
@tf_export("variable_creator_scope", v1=[])
@tf_contextlib.contextmanager
def variable_creator_scope(variable_creator):
"""Scope which defines a variable creation function to be used by variable().
variable_creator is expected to be a function with the following signature:
```
def variable_creator(next_creator, **kwargs)
```
The creator is supposed to eventually call the next_creator to create a
variable if it does want to create a variable and not call Variable or
ResourceVariable directly. This helps make creators composable. A creator may
choose to create multiple variables, return already existing variables, or
simply register that a variable was created and defer to the next creators in
line. Creators can also modify the keyword arguments seen by the next
creators.
Custom getters in the variable scope will eventually resolve down to these
custom creators when they do create variables.
The valid keyword arguments in kwds are:
* initial_value: A `Tensor`, or Python object convertible to a `Tensor`,
which is the initial value for the Variable. The initial value must have
a shape specified unless `validate_shape` is set to False. Can also be a
callable with no argument that returns the initial value when called. In
that case, `dtype` must be specified. (Note that initializer functions
from init_ops.py must first be bound to a shape before being used here.)
* trainable: If `True`, the default, GradientTapes automatically watch
uses of this Variable.
* validate_shape: If `False`, allows the variable to be initialized with a
value of unknown shape. If `True`, the default, the shape of
`initial_value` must be known.
* caching_device: Optional device string describing where the Variable
should be cached for reading. Defaults to the Variable's device.
If not `None`, caches on another device. Typical use is to cache
on the device where the Ops using the Variable reside, to deduplicate
copying through `Switch` and other conditional statements.
* name: Optional name for the variable. Defaults to `'Variable'` and gets
uniquified automatically.
dtype: If set, initial_value will be converted to the given type.
If `None`, either the datatype will be kept (if `initial_value` is
a Tensor), or `convert_to_tensor` will decide.
* constraint: A constraint function to be applied to the variable after
updates by some algorithms.
* synchronization: Indicates when a distributed a variable will be
aggregated. Accepted values are constants defined in the class
`tf.VariableSynchronization`. By default the synchronization is set to
`AUTO` and the current `DistributionStrategy` chooses
when to synchronize.
* aggregation: Indicates how a distributed variable will be aggregated.
Accepted values are constants defined in the class
`tf.VariableAggregation`.
This set may grow over time, so it's important the signature of creators is as
mentioned above.
Args:
variable_creator: the passed creator
Yields:
A scope in which the creator is active
"""
with ops.get_default_graph()._variable_creator_scope(variable_creator): # pylint: disable=protected-access
yield
|
autolauncher.py
|
from __future__ import annotations
import logging
import multiprocessing
from datetime import timedelta, datetime
import concurrent.futures
import sys
import typing
import time
from pony.orm import db_session, select, commit
from Utils import restricted_loads
class CommonLocker():
"""Uses a file lock to signal that something is already running"""
def __init__(self, lockname: str):
self.lockname = lockname
self.lockfile = f"./{self.lockname}.lck"
class AlreadyRunningException(Exception):
pass
if sys.platform == 'win32':
import os
class Locker(CommonLocker):
def __enter__(self):
try:
if os.path.exists(self.lockfile):
os.unlink(self.lockfile)
self.fp = os.open(
self.lockfile, os.O_CREAT | os.O_EXCL | os.O_RDWR)
except OSError as e:
raise AlreadyRunningException() from e
def __exit__(self, _type, value, tb):
fp = getattr(self, "fp", None)
if fp:
os.close(self.fp)
os.unlink(self.lockfile)
else: # unix
import fcntl
class Locker(CommonLocker):
def __enter__(self):
try:
self.fp = open(self.lockfile, "wb")
fcntl.flock(self.fp.fileno(), fcntl.LOCK_EX)
except OSError as e:
raise AlreadyRunningException() from e
def __exit__(self, _type, value, tb):
fcntl.flock(self.fp.fileno(), fcntl.LOCK_UN)
self.fp.close()
def launch_room(room: Room, config: dict):
# requires db_session!
if room.last_activity >= datetime.utcnow() - timedelta(seconds=room.timeout):
multiworld = multiworlds.get(room.id, None)
if not multiworld:
multiworld = MultiworldInstance(room, config)
multiworld.start()
def handle_generation_success(seed_id):
logging.info(f"Generation finished for seed {seed_id}")
def handle_generation_failure(result: BaseException):
try: # hacky way to get the full RemoteTraceback
raise result
except Exception as e:
logging.exception(e)
def launch_generator(pool: multiprocessing.pool.Pool, generation: Generation):
options = restricted_loads(generation.options)
logging.info(f"Generating {generation.id} for {len(options)} players")
meta = restricted_loads(generation.meta)
pool.apply_async(gen_game, (options,),
{"race": meta["race"], "sid": generation.id, "owner": generation.owner},
handle_generation_success, handle_generation_failure)
generation.state = STATE_STARTED
def init_db(pony_config: dict):
db.bind(**pony_config)
db.generate_mapping()
def autohost(config: dict):
def keep_running():
try:
with Locker("autohost"):
with multiprocessing.Pool(config["GENERATORS"], initializer=init_db,
initargs=(config["PONY"],)) as generator_pool:
with db_session:
to_start = select(generation for generation in Generation if generation.state == STATE_STARTED)
if to_start:
logging.info("Resuming generation")
for generation in to_start:
sid = Seed.get(id=generation.id)
if sid:
generation.delete()
else:
launch_generator(generator_pool, generation)
commit()
select(generation for generation in Generation if generation.state == STATE_ERROR).delete()
while 1:
time.sleep(0.50)
with db_session:
rooms = select(
room for room in Room if
room.last_activity >= datetime.utcnow() - timedelta(days=3))
for room in rooms:
launch_room(room, config)
to_start = select(
generation for generation in Generation if generation.state == STATE_QUEUED)
for generation in to_start:
launch_generator(generator_pool, generation)
except AlreadyRunningException:
pass
import threading
threading.Thread(target=keep_running).start()
multiworlds = {}
guardians = concurrent.futures.ThreadPoolExecutor(2, thread_name_prefix="Guardian")
class MultiworldInstance():
def __init__(self, room: Room, config: dict):
self.room_id = room.id
self.process: typing.Optional[multiprocessing.Process] = None
multiworlds[self.room_id] = self
self.ponyconfig = config["PONY"]
def start(self):
if self.process and self.process.is_alive():
return False
logging.info(f"Spinning up {self.room_id}")
self.process = multiprocessing.Process(group=None, target=run_server_process,
args=(self.room_id, self.ponyconfig),
name="MultiHost")
self.process.start()
self.guardian = guardians.submit(self._collect)
def stop(self):
if self.process:
self.process.terminate()
self.process = None
def _collect(self):
self.process.join() # wait for process to finish
self.process = None
self.guardian = None
from .models import Room, Generation, STATE_QUEUED, STATE_STARTED, STATE_ERROR, db, Seed
from .customserver import run_server_process
from .generate import gen_game
|
video_ffpyplayer.py
|
'''
FFmpeg based video abstraction
==============================
To use, you need to install ffpyplyaer and have a compiled ffmpeg shared
library.
https://github.com/matham/ffpyplayer
The docs there describe how to set this up. But briefly, first you need to
compile ffmpeg using the shared flags while disabling the static flags (you'll
probably have to set the fPIC flag, e.g. CFLAGS=-fPIC). Here's some
instructions: https://trac.ffmpeg.org/wiki/CompilationGuide. For Windows, you
can download compiled GPL binaries from http://ffmpeg.zeranoe.com/builds/.
Similarly, you should download SDL.
Now, you should a ffmpeg and sdl directory. In each, you should have a include,
bin, and lib directory, where e.g. for Windows, lib contains the .dll.a files,
while bin contains the actual dlls. The include directory holds the headers.
The bin directory is only needed if the shared libraries are not already on
the path. In the environment define FFMPEG_ROOT and SDL_ROOT, each pointing to
the ffmpeg, and SDL directories, respectively. (If you're using SDL2,
the include directory will contain a directory called SDL2, which then holds
the headers).
Once defined, download the ffpyplayer git and run
python setup.py build_ext --inplace
Finally, before running you need to ensure that ffpyplayer is in python's path.
..Note::
When kivy exits by closing the window while the video is playing,
it appears that the __del__method of VideoFFPy
is not called. Because of this the VideoFFPy object is not
properly deleted when kivy exits. The consequence is that because
MediaPlayer creates internal threads which do not have their daemon
flag set, when the main threads exists it'll hang and wait for the other
MediaPlayer threads to exit. But since __del__ is not called to delete the
MediaPlayer object, those threads will remain alive hanging kivy. What this
means is that you have to be sure to delete the MediaPlayer object before
kivy exits by setting it to None.
'''
__all__ = ('VideoFFPy', )
try:
import ffpyplayer
from ffpyplayer.player import MediaPlayer
from ffpyplayer.tools import set_log_callback, loglevels, get_log_callback
except:
raise
from threading import Thread
from kivy.clock import Clock, mainthread
from kivy.logger import Logger
from kivy.core.video import VideoBase
from kivy.graphics import Rectangle, BindTexture
from kivy.graphics.texture import Texture
from kivy.graphics.fbo import Fbo
from kivy.weakmethod import WeakMethod
import time
Logger.info('VideoFFPy: Using ffpyplayer {}'.format(ffpyplayer.version))
logger_func = {'quiet': Logger.critical, 'panic': Logger.critical,
'fatal': Logger.critical, 'error': Logger.error,
'warning': Logger.warning, 'info': Logger.info,
'verbose': Logger.debug, 'debug': Logger.debug}
def _log_callback(message, level):
message = message.strip()
if message:
logger_func[level]('ffpyplayer: {}'.format(message))
class VideoFFPy(VideoBase):
YUV_RGB_FS = """
$HEADER$
uniform sampler2D tex_y;
uniform sampler2D tex_u;
uniform sampler2D tex_v;
void main(void) {
float y = texture2D(tex_y, tex_coord0).r;
float u = texture2D(tex_u, tex_coord0).r - 0.5;
float v = texture2D(tex_v, tex_coord0).r - 0.5;
float r = y + 1.402 * v;
float g = y - 0.344 * u - 0.714 * v;
float b = y + 1.772 * u;
gl_FragColor = vec4(r, g, b, 1.0);
}
"""
def __init__(self, **kwargs):
self._ffplayer = None
self._thread = None
self._next_frame = None
self._ffplayer_need_quit = False
self._log_callback_set = False
self._callback_ref = WeakMethod(self._player_callback)
self._trigger = Clock.create_trigger(self._redraw)
if not get_log_callback():
set_log_callback(_log_callback)
self._log_callback_set = True
super(VideoFFPy, self).__init__(**kwargs)
def __del__(self):
self.unload()
if self._log_callback_set:
set_log_callback(None)
def _player_callback(self, selector, value):
if self._ffplayer is None:
return
if selector == 'quit':
def close(*args):
self.unload()
Clock.schedule_once(close, 0)
def _get_position(self):
if self._ffplayer is not None:
return self._ffplayer.get_pts()
return 0
def _set_position(self, pos):
self.seek(pos)
def _get_volume(self):
if self._ffplayer is not None:
self._volume = self._ffplayer.get_volume()
return self._volume
def _set_volume(self, volume):
self._volume = volume
if self._ffplayer is not None:
self._ffplayer.set_volume(volume)
def _get_duration(self):
if self._ffplayer is None:
return 0
return self._ffplayer.get_metadata()['duration']
@mainthread
def _do_eos(self):
if self.eos == 'pause':
self.pause()
elif self.eos == 'stop':
self.stop()
elif self.eos == 'loop':
self.position = 0
self.dispatch('on_eos')
@mainthread
def _change_state(self, state):
self._state = state
def _redraw(self, *args):
if not self._ffplayer:
return
next_frame = self._next_frame
if not next_frame:
return
img, pts = next_frame
if img.get_size() != self._size or self._texture is None:
self._size = w, h = img.get_size()
if self._out_fmt == 'yuv420p':
w2 = int(w / 2)
h2 = int(h / 2)
self._tex_y = Texture.create(
size=(w, h), colorfmt='luminance')
self._tex_u = Texture.create(
size=(w2, h2), colorfmt='luminance')
self._tex_v = Texture.create(
size=(w2, h2), colorfmt='luminance')
self._fbo = fbo = Fbo(size=self._size)
with fbo:
BindTexture(texture=self._tex_u, index=1)
BindTexture(texture=self._tex_v, index=2)
Rectangle(size=fbo.size, texture=self._tex_y)
fbo.shader.fs = VideoFFPy.YUV_RGB_FS
fbo['tex_y'] = 0
fbo['tex_u'] = 1
fbo['tex_v'] = 2
self._texture = fbo.texture
else:
self._texture = Texture.create(size=self._size, colorfmt='rgba')
# XXX FIXME
#self.texture.add_reload_observer(self.reload_buffer)
self._texture.flip_vertical()
self.dispatch('on_load')
if self._texture:
if self._out_fmt == 'yuv420p':
dy, du, dv, _ = img.to_memoryview()
self._tex_y.blit_buffer(dy, colorfmt='luminance')
self._tex_u.blit_buffer(du, colorfmt='luminance')
self._tex_v.blit_buffer(dv, colorfmt='luminance')
else:
self._texture.blit_buffer(
img.to_memoryview()[0], colorfmt='rgba')
self._fbo.ask_update()
self._fbo.draw()
self.dispatch('on_frame')
def _next_frame_run(self):
ffplayer = self._ffplayer
sleep = time.sleep
trigger = self._trigger
did_dispatch_eof = False
# fast path, if the source video is yuv420p, we'll use a glsl shader for
# buffer conversion to rgba
while not self._ffplayer_need_quit:
src_pix_fmt = ffplayer.get_metadata().get('src_pix_fmt')
if not src_pix_fmt:
sleep(0.005)
continue
if src_pix_fmt == 'yuv420p':
self._out_fmt = 'yuv420p'
ffplayer.set_output_pix_fmt(self._out_fmt)
self._ffplayer.toggle_pause()
break
if self._ffplayer_need_quit:
return
# wait until loaded or failed, shouldn't take long, but just to make
# sure metadata is available.
s = time.clock()
while not self._ffplayer_need_quit:
if ffplayer.get_metadata()['src_vid_size'] != (0, 0):
break
# XXX if will fail later then?
if time.clock() - s > 10.:
break
sleep(0.005)
if self._ffplayer_need_quit:
return
# we got all the informations, now, get the frames :)
self._change_state('playing')
while not self._ffplayer_need_quit:
t1 = time.time()
frame, val = ffplayer.get_frame()
t2 = time.time()
if val == 'eof':
sleep(0.2)
if not did_dispatch_eof:
self._do_eos()
did_dispatch_eof = True
elif val == 'paused':
did_dispatch_eof = False
sleep(0.2)
else:
did_dispatch_eof = False
if frame:
self._next_frame = frame
trigger()
else:
val = val if val else (1 / 30.)
sleep(val)
def seek(self, percent):
if self._ffplayer is None:
return
self._ffplayer.seek(percent * self._ffplayer.get_metadata()
['duration'], relative=False)
self._next_frame = None
def stop(self):
self.unload()
def pause(self):
if self._ffplayer and self._state != 'paused':
self._ffplayer.toggle_pause()
self._state = 'paused'
def play(self):
if self._ffplayer and self._state == 'paused':
self._ffplayer.toggle_pause()
self._state = 'playing'
return
self.load()
self._out_fmt = 'rgba'
ff_opts = {
'paused': True,
'out_fmt': self._out_fmt
}
self._ffplayer = MediaPlayer(
self._filename, callback=self._callback_ref,
thread_lib='SDL',
loglevel='info', ff_opts=ff_opts)
self._thread = Thread(target=self._next_frame_run, name='Next frame')
self._thread.start()
def load(self):
self.unload()
def unload(self):
Clock.unschedule(self._redraw)
self._ffplayer_need_quit = True
if self._thread:
self._thread.join()
self._thread = None
if self._ffplayer:
self._ffplayer = None
self._next_frame = None
self._size = (0, 0)
self._state = ''
self._ffplayer_need_quit = False
|
listen.py
|
import numpy as np
from config.config import *
from lib.machinelearning import feature_engineering, feature_engineering_raw, get_label_for_directory, get_highest_intensity_of_wav_file, get_recording_power
import pyaudio
import wave
import time
import scipy
import scipy.io.wavfile
import hashlib
import os
import operator
import audioop
import math
import time
import csv
from scipy.fftpack import fft
from scipy.fftpack import fftfreq
import msvcrt
from queue import *
import threading
import traceback
import sys
def break_loop_controls(audioQueue=None):
ESCAPEKEY = b'\x1b'
SPACEBAR = b' '
if( msvcrt.kbhit() ):
character = msvcrt.getch()
if( character == SPACEBAR ):
print( "Listening paused " )
# Pause the recording by looping until we get a new keypress
while( True ):
## If the audio queue exists - make sure to clear it continuously
if( audioQueue != None ):
audioQueue.queue.clear()
if( msvcrt.kbhit() ):
character = msvcrt.getch()
if( character == SPACEBAR ):
print( "Listening resumed! " )
return True
elif( character == ESCAPEKEY ):
print( "Listening stopped " )
return False
elif( character == ESCAPEKEY ):
print( "Listening stopped " )
return False
return True
def classify_audioframes( audioQueue, audio_frames, classifier, high_speed ):
if( not audioQueue.empty() ):
audio_frames.append( audioQueue.get() )
# In case we are dealing with frames not being met and a buffer being built up,
# Start skipping every other audio frame to maintain being up to date,
# Trading being up to date over being 100% correct in sequence
if( audioQueue.qsize() > 1 ):
print( "SKIP FRAME", audioQueue.qsize() )
audioQueue.get()
if( len( audio_frames ) >= 2 ):
audio_frames = audio_frames[-2:]
highestintensity = np.amax( audioop.maxpp( audio_frames[1], 4 ) / 32767 )
wavData = b''.join(audio_frames)
# SKIP FEATURE ENGINEERING COMPLETELY WHEN DEALING WITH SILENCE
if( high_speed == True and highestintensity < SILENCE_INTENSITY_THRESHOLD ):
probabilityDict, predicted, frequency = create_empty_probability_dict( classifier, {}, 0, highestintensity, 0 )
else:
power = fftData = np.frombuffer( wavData, dtype=np.int16 )
power = get_recording_power( fftData, RECORD_SECONDS )
probabilityDict, predicted, frequency = predict_raw_data( wavData, classifier, highestintensity, power )
return probabilityDict, predicted, audio_frames, highestintensity, frequency, wavData
return False, False, audio_frames, False, False, False
def action_consumer( stream, classifier, dataDicts, persist_replay, replay_file, mode_switcher=False ):
actions = []
global classifierQueue
starttime = time.time()
try:
if( persist_replay ):
with open(replay_file, 'a', newline='') as csvfile:
headers = ['time', 'winner', 'intensity', 'frequency', 'power', 'actions', 'buffer']
headers.extend( classifier.classes_ )
writer = csv.DictWriter(csvfile, fieldnames=headers, delimiter=',')
writer.writeheader()
while( stream.is_active() ):
if( not classifierQueue.empty() ):
current_time = time.time()
seconds_playing = time.time() - starttime
probabilityDict = classifierQueue.get()
dataDicts.append( probabilityDict )
if( len(dataDicts) > PREDICTION_LENGTH ):
dataDicts.pop(0)
if( mode_switcher ):
actions = mode_switcher.getMode().handle_input( dataDicts )
if( isinstance( actions, list ) == False ):
actions = []
replay_row = { 'time': int(seconds_playing * 1000) / 1000, 'actions': ':'.join(actions), 'buffer': classifierQueue.qsize()}
for label, labelDict in probabilityDict.items():
replay_row[ label ] = labelDict['percent']
if( labelDict['winner'] ):
replay_row['winner'] = label
replay_row['intensity'] = int(labelDict['intensity'])
replay_row['power'] = int(labelDict['power'])
replay_row['frequency'] = labelDict['frequency']
writer.writerow( replay_row )
csvfile.flush()
else:
while( stream.is_active() ):
if( not classifierQueue.empty() ):
dataDicts.append( classifierQueue.get() )
if( len(dataDicts) > PREDICTION_LENGTH ):
dataDicts.pop(0)
if( mode_switcher ):
actions = mode_switcher.getMode().handle_input( dataDicts )
if( isinstance( actions, list ) == False ):
actions = []
except Exception as e:
print( "----------- ERROR DURING CONSUMING ACTIONS -------------- " )
exc_type, exc_value, exc_tb = sys.exc_info()
traceback.print_exception(exc_type, exc_value, exc_tb)
stream.stop_stream()
def classification_consumer( audio, stream, classifier, persist_files, high_speed ):
audio_frames = []
dataDicts = []
for i in range( 0, PREDICTION_LENGTH ):
dataDict = {}
for directoryname in classifier.classes_:
dataDict[ directoryname ] = {'percent': 0, 'intensity': 0, 'frequency': 0, 'winner': False}
dataDicts.append( dataDict )
starttime = time.time()
global audioQueue
global classifierQueue
try:
while( stream.is_active() ):
probabilityDict, predicted, audio_frames, highestintensity, frequency, wavData = classify_audioframes( audioQueue, audio_frames, classifier, high_speed )
# Skip if a prediction could not be made
if( probabilityDict == False ):
continue
seconds_playing = time.time() - starttime
winner = classifier.classes_[ predicted ]
prediction_time = time.time() - starttime - seconds_playing
#long_comment = "Time: %0.2f - Prediction in: %0.2f - Winner: %s - Percentage: %0d - Frequency %0d " % (seconds_playing, prediction_time, winner, probabilityDict[winner]['percent'], probabilityDict[winner]['frequency'])
short_comment = "T %0.2f - [%0d%s %s] F:%0d I:%0d P:%0d" % (seconds_playing, probabilityDict[winner]['percent'], '%', winner, frequency, probabilityDict[winner]['intensity'], probabilityDict[winner]['power'])
if( winner != "silence" ):
print( short_comment )
classifierQueue.put( probabilityDict )
if( persist_files ):
audioFile = wave.open(REPLAYS_AUDIO_FOLDER + "/%0.3f.wav" % (seconds_playing), 'wb')
audioFile.setnchannels(CHANNELS)
audioFile.setsampwidth(audio.get_sample_size(FORMAT))
audioFile.setframerate(RATE)
audioFile.writeframes(wavData)
audioFile.close()
except Exception as e:
print( "----------- ERROR DURING AUDIO CLASSIFICATION -------------- " )
exc_type, exc_value, exc_tb = sys.exc_info()
traceback.print_exception(exc_type, exc_value, exc_tb)
stream.stop_stream()
def nonblocking_record( in_data, frame_count, time_info, status ):
global audioQueue
audioQueue.put( in_data )
return in_data, pyaudio.paContinue
def start_nonblocking_listen_loop( classifier, mode_switcher = False, persist_replay = False, persist_files = False, amount_of_seconds=-1, high_speed=False ):
global audioQueue
audioQueue = Queue(maxsize=0)
global classifierQueue
classifierQueue = Queue(maxsize=0)
# Get a minimum of these elements of data dictionaries
dataDicts = []
audio_frames = []
for i in range( 0, PREDICTION_LENGTH ):
dataDict = {}
for directoryname in classifier.classes_:
dataDict[ directoryname ] = {'percent': 0, 'intensity': 0, 'frequency': 0, 'winner': False}
dataDicts.append( dataDict )
continue_loop = True
starttime = int(time.time())
replay_file = REPLAYS_FOLDER + "/replay_" + str(starttime) + ".csv"
infinite_duration = amount_of_seconds == -1
if( infinite_duration ):
print( "Listening..." )
else:
print ( "Listening for " + str( amount_of_seconds ) + " seconds..." )
print ( "" )
audio = pyaudio.PyAudio()
stream = audio.open(format=FORMAT, channels=CHANNELS,
rate=RATE, input=True,
input_device_index=INPUT_DEVICE_INDEX,
frames_per_buffer=round( RATE * RECORD_SECONDS / SLIDING_WINDOW_AMOUNT ),
stream_callback=nonblocking_record)
classificationConsumer = threading.Thread(name='classification_consumer', target=classification_consumer, args=(audio, stream, classifier, persist_files, high_speed) )
classificationConsumer.setDaemon( True )
classificationConsumer.start()
actionConsumer = threading.Thread(name='action_consumer', target=action_consumer, args=(stream, classifier, dataDicts, persist_replay, replay_file, mode_switcher) )
actionConsumer.setDaemon( True )
actionConsumer.start()
stream.start_stream()
while stream.is_active():
currenttime = int(time.time())
if( not infinite_duration and currenttime - starttime > amount_of_seconds or break_loop_controls( audioQueue ) == False ):
stream.stop_stream()
time.sleep(0.1)
# Stop all the streams and different threads
stream.stop_stream()
stream.close()
audio.terminate()
audioQueue.queue.clear()
classifierQueue.queue.clear()
return replay_file
def start_listen_loop( classifier, mode_switcher = False, persist_replay = False, persist_files = False, amount_of_seconds=-1, high_speed=False ):
# Get a minimum of these elements of data dictionaries
dataDicts = []
audio_frames = []
for i in range( 0, PREDICTION_LENGTH ):
dataDict = {}
for directoryname in classifier.classes_:
dataDict[ directoryname ] = {'percent': 0, 'intensity': 0, 'frequency': 0, 'winner': False}
dataDicts.append( dataDict )
continue_loop = True
starttime = int(time.time())
replay_file = REPLAYS_FOLDER + "/replay_" + str(starttime) + ".csv"
infinite_duration = amount_of_seconds == -1
if( infinite_duration ):
print( "Listening..." )
else:
print ( "Listening for " + str( amount_of_seconds ) + " seconds..." )
print ( "" )
audio = pyaudio.PyAudio()
stream = audio.open(format=FORMAT, channels=CHANNELS,
rate=RATE, input=True,
input_device_index=INPUT_DEVICE_INDEX,
frames_per_buffer=CHUNK)
if( persist_replay ):
with open(replay_file, 'a', newline='') as csvfile:
headers = ['time', 'winner', 'intensity', 'frequency', 'actions']
headers.extend( classifier.classes_ )
writer = csv.DictWriter(csvfile, fieldnames=headers, delimiter=',')
writer.writeheader()
starttime = int(time.time())
while( continue_loop ):
seconds_playing = time.time() - starttime
probabilityDict, predicted, audio_frames, intensity, frequency, wavData = listen_loop( audio, stream, classifier, dataDicts, audio_frames, high_speed )
winner = classifier.classes_[ predicted ]
dataDicts.append( probabilityDict )
if( len(dataDicts) > PREDICTION_LENGTH ):
dataDicts.pop(0)
prediction_time = time.time() - starttime - seconds_playing
long_comment = "Time: %0.2f - Prediction in: %0.2f - Winner: %s - Percentage: %0d - Frequency %0d " % (seconds_playing, prediction_time, winner, probabilityDict[winner]['percent'], probabilityDict[winner]['frequency'])
short_comment = "T: %0.2f - %0d%s - %s " % (seconds_playing, probabilityDict[winner]['percent'], '%', winner)
print( short_comment )
if( ( infinite_duration == False and seconds_playing > amount_of_seconds ) or break_loop_controls() == False ):
continue_loop = False
actions = []
if( mode_switcher ):
actions = mode_switcher.getMode().handle_input( dataDicts )
if( isinstance( actions, list ) == False ):
actions = []
replay_row = { 'time': int(seconds_playing * 1000) / 1000, 'winner': winner, 'intensity': int(intensity), 'frequency': frequency, 'actions': ':'.join(actions) }
for label, labelDict in probabilityDict.items():
replay_row[ label ] = labelDict['percent']
writer.writerow( replay_row )
csvfile.flush()
if( persist_files ):
audioFile = wave.open(REPLAYS_AUDIO_FOLDER + "/%0.3f.wav" % (seconds_playing), 'wb')
audioFile.setnchannels(CHANNELS)
audioFile.setsampwidth(audio.get_sample_size(FORMAT))
audioFile.setframerate(RATE)
audioFile.writeframes(wavData)
audioFile.close()
print("Finished listening! ")
stream.close()
else:
starttime = int(time.time())
while( continue_loop ):
probabilityDict, predicted, audio_frames, intensity, frequency, wavData = listen_loop( audio, stream, classifier, dataDicts, audio_frames, high_speed )
dataDicts.append( probabilityDict )
if( len(dataDicts) > PREDICTION_LENGTH ):
dataDicts.pop(0)
seconds_playing = time.time() - starttime;
if( ( infinite_duration == False and seconds_playing > amount_of_seconds ) or break_loop_controls() == False ):
continue_loop = False
if( mode_switcher ):
mode_switcher.getMode().handle_input( dataDicts )
if( persist_files ):
audioFile = wave.open(REPLAYS_AUDIO_FOLDER + "/%0.3f.wav" % (seconds_playing), 'wb')
audioFile.setnchannels(CHANNELS)
audioFile.setsampwidth(audio.get_sample_size(FORMAT))
audioFile.setframerate(RATE)
audioFile.writeframes(wavData)
audioFile.close()
stream.close()
return replay_file
def listen_loop( audio, stream, classifier, dataDicts, audio_frames, high_speed=False ):
audio_frames, intensity = get_stream_wav_segment( stream, [] )
wavData = b''.join(audio_frames)
# SKIP FEATURE ENGINEERING COMPLETELY WHEN DEALING WITH SILENCE
if( high_speed == True and intensity < SILENCE_INTENSITY_THRESHOLD ):
probabilityDict, predicted, frequency = create_probability_dict( classifier, {}, 0, intensity, 0 )
else:
fftData = np.frombuffer( byteString, dtype=np.int16 )
power = get_recording_power( fftData, RECORD_SECONDS )
probabilityDict, predicted, frequency = predict_raw_data( wavData, classifier, intensity, power )
return probabilityDict, predicted, audio_frames, intensity, frequency, wavData
def get_stream_wav_segment( stream, frames ):
stream.start_stream()
range_length = int(RATE / CHUNK * RECORD_SECONDS)
remove_half = int( range_length / 2 )
frames = frames[remove_half:]
frame_length = len( frames )
intensity = []
for i in range( frame_length, range_length):
data = stream.read(CHUNK)
peak = audioop.maxpp( data, 4 ) / 32767
intensity.append( peak )
frames.append(data)
highestintensity = np.amax( intensity )
stream.stop_stream()
return frames, highestintensity
def predict_wav_files( classifier, wav_files ):
dataDicts = []
audio_frames = []
print ( "Analyzing " + str( len( wav_files) ) + " audio files..." )
print ( "" )
for i in range( 0, PREDICTION_LENGTH ):
dataDict = {}
for directoryname in classifier.classes_:
dataDict[ directoryname ] = {'percent': 0, 'intensity': 0}
dataDicts.append( dataDict )
probabilities = []
for index, wav_file in enumerate( wav_files ):
highestintensity = get_highest_intensity_of_wav_file( wav_file )
probabilityDict, predicted, frequency = predict_wav_file( wav_file, classifier, highestintensity )
winner = classifier.classes_[predicted]
#print( "Analyzing file " + str( index + 1 ) + " - Winner: %s - Percentage: %0d - Frequency: %0d " % (winner, probabilityDict[winner]['percent'], probabilityDict[winner]['frequency']) , end="\r")
probabilities.append( probabilityDict )
print( " ", end="\r" )
return probabilities
def predict_raw_data( wavData, classifier, intensity, power ):
# FEATURE ENGINEERING
first_channel_data = np.frombuffer( wavData, dtype=np.int16 )[::2]
data_row, frequency = feature_engineering_raw( first_channel_data, RATE, intensity )
data = [ data_row ]
return create_probability_dict( classifier, data, frequency, intensity, power )
def predict_wav_file( wav_file, classifier, intensity ):
# FEATURE ENGINEERING
data_row, frequency = feature_engineering( wav_file )
data = [ data_row ]
if( intensity < SILENCE_INTENSITY_THRESHOLD ):
return create_empty_probability_dict( classifier, data, frequency, intensity, 0 )
else:
return create_probability_dict( classifier, data, frequency, intensity, 0 )
def create_empty_probability_dict( classifier, data, frequency, intensity, power ):
probabilityDict = {}
index = 0
predicted = -1
for label in classifier.classes_:
winner = False
percent = 0
if( label == 'silence' ):
predicted = index
percent = 100
winner = True
probabilityDict[ label ] = { 'percent': percent, 'intensity': int(intensity), 'winner': winner, 'frequency': frequency, 'power': power }
index += 1
return probabilityDict, predicted, frequency
def create_probability_dict( classifier, data, frequency, intensity, power ):
# Predict the outcome of the audio file
probabilities = classifier.predict_proba( data ) * 100
probabilities = probabilities.astype(int)
# Get the predicted winner
predicted = np.argmax( probabilities[0] )
if( isinstance(predicted, list) ):
predicted = predicted[0]
probabilityDict = {}
for index, percent in enumerate( probabilities[0] ):
label = classifier.classes_[ index ]
probabilityDict[ label ] = { 'percent': percent, 'intensity': int(intensity), 'winner': index == predicted, 'frequency': frequency, 'power': power }
return probabilityDict, predicted, frequency
|
email.py
|
from threading import Thread
from flask import current_app, render_template
from flask.ext.mail import Message
from . import mail
def send_async_email(app, msg):
with app.app_context():
mail.send(msg)
def send_email(to, subject, template, **kwargs):
app = current_app._get_current_object()
msg = Message(app.config['FLASKY_MAIL_SUBJECT_PREFIX'] + ' ' + subject,
sender=app.config['FLASKY_MAIL_SENDER'], recipients=[to])
msg.body = render_template(template + '.txt', **kwargs)
msg.html = render_template(template + '.html', **kwargs)
thr = Thread(target=send_async_email, args=[app, msg])
thr.start()
return thr
|
webstreaming.py
|
"""
this is a web interface
eidt by hichens
"""
import face_recognition
from flask import Response
from flask import Flask, request, redirect, url_for
from flask import render_template
import datetime
import cv2
import os
import sqlite3 as sq
import numpy as np
import time
import sys; sys.path.append("../")
from FaceRecognition import functions as fc
import OCR.baiduOcr as ocr
import threading
from werkzeug.utils import secure_filename
lock = threading.Lock()
app = Flask(__name__)
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = datetime.timedelta(seconds=1) # file refresh time
app.config['MAX_CONTENT_LENGTH'] = 160 * 1024 * 1024 # maximum file <= 160MB
baseDir = os.path.abspath(os.path.dirname(__file__))
dataBaseDir = os.path.join(baseDir, 'DataBase')
"""global paramters"""
outputFrame = None
name = 'Unknown'
capFlag = False # contral the camera
ALLOWED_EXTENSIONS = set(['jpg', 'png'])
total_face_encoding = []
total_image_name = []
def connectDB():
"""
connetc to sqlite
"""
conn = sq.connect('data.sqlite')
cursor = conn.cursor()
sqText = "select * from user;"
try:
data = cursor.execute(sqText)
for raw in data:
total_image_name.append(raw[1])
img = np.frombuffer(raw[6], dtype=np.uint8)
height, width = raw[7], raw[8]
img = img.reshape(height, width, 3)
total_face_encoding.append(
face_recognition.face_encodings(img)[0])
except:
pass
def refreshDB():
"""
Refresh Database
"""
global total_face_encoding
global total_image_name
total_face_encoding = []
total_image_name = []
conn = sq.connect('data.sqlite')
cursor = conn.cursor()
sqText = "select * from user;"
data = cursor.execute(sqText)
for raw in data:
total_image_name.append(raw[1])
img = np.frombuffer(raw[6], dtype=np.uint8)
height, width = raw[7], raw[8]
img = img.reshape(height, width, 3)
total_face_encoding.append(
face_recognition.face_encodings(img)[0])
@app.route("/index")
@app.route("/")
def index():
# return the rendered template
global capFlag, name
capFlag = False
name = 'Unknown'
return render_template("index.html")
@app.route("/main")
def main():
"""
match the face and return it's name
"""
global capFlag, name, outputFrame
capFlag = True
return render_template("main.html")
@app.route("/result")
def result():
"""
get the whole data in the database from name
"""
global capFlag, name
capFlag = False
data = getData(name)
if len(data) == 0:
return redirect(url_for('ProcessError'))
return render_template("result.html", data=data)
def faceMatch(frame):
global total_face_encoding
global total_image_name
face_locations = face_recognition.face_locations(frame)
face_encodings = face_recognition.face_encodings(frame, face_locations)
top, right, bottom, left = face_locations[0]
face_encoding = face_encodings[0]
# 看看面部是否与已知人脸相匹配
print(total_image_name)
for i, v in enumerate(total_face_encoding):
match = face_recognition.compare_faces(
[v], face_encoding, tolerance=0.4)
name = "Unknown"
if match[0]:
name = total_image_name[i]
# print(name)
break
return name
def getData(name):
"""
load all the name from data.sqlite
Params:
name: a name of a face image got from camera and searched in the database
Return:
data: the whole data searched by the name int the database
"""
conn = sq.connect('data.sqlite')
cursor = conn.cursor()
sqText = "select * from user where name=?;"
col_names = ['身份证号码', '姓名', '性别', '民族', '生日', '住址']
data_dict = {}
try:
data = cursor.execute(sqText, (name, )).fetchall()[0]
for i in range(6):
data_dict[col_names[i]] = data[i]
except:
pass
conn.close()
return data_dict
def saveFaceImg(name):
"""
load all the name from data.sqlite
Params:
name: a name of a face image got from camera and searched in the database
Return:
img: face Image
"""
img_dir = os.path.join(baseDir, 'static')
img_dir = os.path.join(img_dir, 'images')
path = os.path.join(img_dir, 'face.jpg')
conn = sq.connect('data.sqlite')
cursor = conn.cursor()
sqText = "select * from user where name=?;"
raw = cursor.execute(sqText, (name, )).fetchall()[0]
img = np.frombuffer(raw[6], dtype=np.uint8)
height, width = raw[7], raw[8]
img = img.reshape(height, width, 3)
cv2.imwrite(path, img)
# print("save face image to ==> ", path)
conn.close()
@app.route("/PorcessError")
def ProcessError():
return render_template("ProcessError.html")
@app.route("/uploadError")
def uploadError():
return render_template("uploadError.html")
@app.route("/detect")
def detect():
global outputFrame, name, capFlag
if capFlag == False:
return
## open the camera and get a frame
cap = cv2.VideoCapture(0)
while True:
ret, frame = cap.read()
if capFlag == False:
break
if ret:
with lock:
scale_percent = 700 # percent of original size
width = scale_percent
height = scale_percent * frame.shape[0] // frame.shape[1]
frame = cv2.resize(frame, (width, height))
"""get the face image from camera"""
outputFrame = frame.copy()
try:
_, x, y, w, h = fc.getFace(frame, debug=True)
cv2.rectangle(outputFrame, (x, y), (x+w, y+h), (0, 0, 255), 2)
if name == 'Unknown':
name = faceMatch(outputFrame)
saveFaceImg(name)
else:
print(name)
# result()
except:
pass
def generate():
global outputFrame, lock, capFlag
t = threading.Thread(target=detect)
t.daemon = True
t.start()
while True:
with lock:
if outputFrame is None:
continue
(temp, encodedImage) = cv2.imencode(".jpg", outputFrame)
if not temp:
continue
yield(b'--frame\r\n' b'Content-Type: image/jpeg\r\n\r\n' +
bytearray(encodedImage) + b'\r\n')
@app.route("/video_feed")
def video_feed():
return Response(generate(),
mimetype = "multipart/x-mixed-replace; boundary=frame")
@app.route('/upload_file', methods=['GET', 'POST'])
def upload_file():
if request.method == 'POST':
file = request.files['file']
if file and allowed_file(file.filename):
## save the id card image
fileName = secure_filename(file.filename)
dirName = fileName.split(".")[0]
saveDir = os.path.join(dataBaseDir, dirName)
os.system("mkdir " + saveDir)
savePath = os.path.join(saveDir, fileName)
file.save(savePath)
cardImg = cv2.imread(savePath)
## step2 ocr process get the information of image
try:
text_data = ocr.ocrFully(savePath)
print(text_data)
except:
return redirect(url_for('uploadError'))
## step3 crop the face image
faceImg = fc.getFace(cardImg)
savePath = os.path.join(saveDir, 'face.jpg')
cv2.imwrite(savePath, faceImg)
faceImg = cv2.imread(savePath)
## step4 save text_data and faceImg to data.sqlite
save2DB(text_data, faceImg)
refreshDB()
return redirect(url_for('index')+"#2")
def save2DB(text_data, faceImg):
## step1 connet to database
conn = sq.connect('data.sqlite')
cursor = conn.cursor()
try:
"""
ID: the id card number
name: name in the id card
sex: 男 or 女
nation: 民族
birth: 生日(eg: 1984年10月1日)
cardImg: the image of id card with byte form
cardImgHeight: the height of image(in order to reshape)
cardImgWidth: the width of image
the example of insert data:
==> sqText = "insert into user(ID, name, sex, nation, birth, address, cardImg, cardImgHeight, cardImgWidth) values(?, ?, ?, ?, ?, ?, ?, ?, ?);"
==> cursor.execute(sqText, ('111111111111111111', '张三', '男', '汉族', '1984年10月1日', '重庆市xxx', img_blob, img.shape[0], img.shape[1]))
"""
cursor.execute('''create table user
(
ID varchar(20) primary key,
name varchar(20),
sex varchar(4),
nation varchar(10),
birth varchar(20),
address varchar(40),
cardImg blob,
cardImgHeight int,
cardImgWidth int
);''')
except:
pass
faceImgBlob = sq.Binary(faceImg)
sqText = "insert into user(ID, name, sex, nation, birth, address, cardImg, cardImgHeight, cardImgWidth) values(?, ?, ?, ?, ?, ?, ?, ?, ?);"
cursor.execute(sqText, (text_data[5], text_data[0], text_data[1], text_data[2], text_data[3], text_data[4], faceImgBlob, faceImg.shape[0], faceImg.shape[1]))
conn.commit()
print("insert done!")
conn.close()
@app.route('/videoprocess', methods=['GET', 'POST'])
def process():
if request.method == 'POST':
return redirect(url_for('main'))
else:
return redirect(url_for('index')+"#2")
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS
if __name__ == '__main__':
connectDB()
app.run(host='127.0.0.1', port=8000, debug=False,
threaded=True, use_reloader=False)
|
offboard3.py
|
import rospy
import glob
import json
import math
import os
import px4tools
import sys
import time
import actionlib
import roslaunch
from mavros import mavlink
from mavros import action_server2
from mavros_msgs.msg import Mavlink, Waypoint, WaypointReached, GlobalPositionTarget, State, TakeoffAction, TakeoffGoal, LandAction, LandGoal, WaypointsAction, WaypointsGoal, HomePosition
from mavros_msgs.srv import CommandBool, SetMode, CommandTOL, WaypointPush, WaypointClear, CommandHome
from sensor_msgs.msg import NavSatFix
from geometry_msgs.msg import PoseStamped
from mavros_test_common import MavrosTestCommon
from pymavlink import mavutil
from threading import Thread
# Brings in the SimpleActionClient
import actionlib
class offboard():
def state_callback(self, data):
self.state = data
def wp_reached_callback(self, data):
self.wp_reached = data
def home_pos_callback(self, data):
self.home_pos = data
#print(self.home_pos.geo)
def global_pos_callback(self, data):
self.global_pos = data
def __init__(self):
rospy.init_node('guidance_node', anonymous=True)
state_sub = rospy.Subscriber('mavros/state', State, self.state_callback)
self.state = State
#PUBLISHERS
local_pos_pub = rospy.Publisher('mavros/setpoint_position/local', PoseStamped, queue_size=10)
#global_pos_pub = rospy.Publisher('mavros/setpoint_position/global', GlobalPositionTarget, queue_size=10)
local_pos_sub = rospy.Subscriber('/mavros/global_position/global', NavSatFix, self.global_pos_callback)
home_pos_sub = rospy.Subscriber('/mavros/home_position/home', HomePosition, self.home_pos_callback)
#ACTIONS
#init actionlib servers
server = Thread(target=action_server2.ActionServer)
server.setDaemon(True)
server.start()
takeoff_client = actionlib.SimpleActionClient('takeoff', TakeoffAction)
land_client = actionlib.SimpleActionClient('land', LandAction)
waypoints_client = actionlib.SimpleActionClient('waypoints', WaypointsAction)
# need to simulate heartbeat to prevent datalink loss detection
hb_mav_msg = mavutil.mavlink.MAVLink_heartbeat_message(mavutil.mavlink.MAV_TYPE_GCS, 0, 0, 0, 0, 0)
hb_mav_msg.pack(mavutil.mavlink.MAVLink('', 2, 1))
hb_ros_msg = mavlink.convert_to_rosmsg(hb_mav_msg)
hb_thread = Thread(target=self.send_heartbeat, args=(hb_ros_msg))
hb_thread.setDaemon(True)
#PREFLIGHT CHECK
rate = rospy.Rate(30)
while (not self.state.connected):
print('Waiting on Connection')
rate.sleep()
print('Connected')
time.sleep(5)
goal = TakeoffGoal()
goal.height = 10
print('Actionlib started')
takeoff_client.send_goal(goal)
takeoff_client.wait_for_result()
wps = []
wp1 = Waypoint()
wp2 = Waypoint()
wp3 = Waypoint()
wp1.command = 16
wp1.x_lat = self.home_pos.geo.latitude - 0.00050
wp1.y_long = self.home_pos.geo.longitude
wp1.z_alt = 10
wp1.autocontinue = True
wp2.command = 16
wp2.x_lat = self.home_pos.geo.latitude - 0.00050
wp2.y_long = self.home_pos.geo.longitude - 0.00050
wp2.z_alt = 10
wp2.autocontinue = True
wp3.command = 16
wp3.x_lat = self.home_pos.geo.latitude
wp3.y_long = self.home_pos.geo.longitude
wp3.z_alt = 10
wp3.autocontinue = True
goal = WaypointsGoal()
goal.waypoints.append(wp1)
goal.waypoints.append(wp2)
goal.waypoints.append(wp3)
print(goal)
waypoints_client.send_goal(goal)
waypoints_client.wait_for_result(rospy.Duration.from_sec(45.0))
time.sleep(5)
goal = LandGoal()
goal.x_lat = self.home_pos.geo.latitude
goal.y_long = self.home_pos.geo.longitude
goal.z_alt = 0.0
print('Actionlib started')
land_client.send_goal(goal)
land_client.wait_for_result(rospy.Duration.from_sec(30.0))
sys.exit()
# Heartbeat must be sent to px4 at 2Hz or else auto disconnect
def send_heartbeat(self, hb_ros_msg):
rate = rospy.Rate(2) # Hz
while not rospy.is_shutdown():
self.mavlink_pub.publish(hb_ros_msg)
try: # prevent garbage in console output when thread is killed
rate.sleep()
except rospy.ROSInterruptException:
pass
if __name__ == '__main__':
offboard()
|
multicpu_copy.py
|
import os
from shutil import copyfile
import sqlite3
from PIL import Image
import uuid
from multiprocessing import Process
def do_chunk(pid,todo):
out= open("all_detections_"+str(pid)+".csv","w")
co=0
for line in todo:
try:
dest= os.path.join(root,line[2].replace(" ","_"))
src= os.path.join('/datadrive0/emammal',line[1])
image = Image.open(src)
dpi = 100
s = image.size; imageHeight = s[1]; imageWidth = s[0]
figsize = imageWidth / float(dpi), imageHeight / float(dpi)
topRel = float(line[4])
leftRel = float(line[5])
bottomRel = float(line[6])
rightRel = float(line[7])
unq_id= "crops_"+str(uuid.uuid1())
#print(line,imageWidth,imageHeight)
print("%s,%s,%s,%0.6f,%0.6f,%0.6f,%0.6f,%0.6f"%(unq_id, line[0],line[2],float(line[3]),topRel,leftRel,bottomRel,rightRel), file=out)
x1 = int(leftRel * imageWidth)
y1 = int(topRel * imageHeight)
x2 = int(rightRel* imageWidth)
y2 = int(bottomRel * imageHeight)
crop= image.crop((x1,y1,x2,y2)).resize((256,256),Image.BILINEAR)
if not os.path.exists(dest):
os.mkdir(dest)
crop.save(os.path.join(dest,unq_id+".JPG"))
image.close()
co+=1
if co%1000==0:
print(pid,co)
except:
pass
out.close()
def divide(t,n,i):
length=t/(n+0.0)
#print length,(i-1)*length,i*length
return int(round((i-1)*length)),int(round(i*length))
conn = sqlite3.connect('emammal.db')
c = conn.cursor()
root= 'all_crops/emammal_crops'
images_folder='crops'
c.execute('SELECT * FROM detections where label in (select label from species)')
rows = c.fetchall()
os.mkdir(root)
total_records=len(rows)
total_processors=12
print(total_records)
for i in range(1,total_processors+1):
st,ln=divide(total_records,total_processors,i)
p1 = Process(target=do_chunk, args=(i,rows[st:ln]))
p1.start()
|
tl.py
|
#!/usr/bin/env python
import argparse
import json
import traceback
import logging
import os
import signal
import socket
import sys
import threading
import time
import shutil
import select
import fnmatch
import subprocess
import yaml
##############
# dependencies
# pip install pyyaml
# pip install simplejson
# pip install requests
# pip install psutil
#
# windows also:
# pip install pypiwin32
proj_root = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
sys.path.append(proj_root)
import test_looper.data_model.TestDefinition as TestDefinition
import test_looper.data_model.TestDefinitionResolver as TestDefinitionResolver
import test_looper.core.tools.Git as Git
import test_looper.core.Config as Config
import test_looper.worker.WorkerState as WorkerState
import test_looper.core.SubprocessRunner as SubprocessRunner
if sys.platform != "win32":
import test_looper.core.tools.Docker as Docker
else:
Docker = None
import win32file
FILE_ATTRIBUTE_REPARSE_POINT = 1024
# To make things easier.
REPARSE_FOLDER = (win32file.FILE_ATTRIBUTE_DIRECTORY | FILE_ATTRIBUTE_REPARSE_POINT)
if os.getenv("TESTLOOPER_AWS_CREDS"):
try:
with open(os.getenv("TESTLOOPER_AWS_CREDS"), "r") as f:
creds = json.loads(f.read())
os.environ["AWS_ACCESS_KEY_ID"] = str(creds["access_key_id"])
os.environ["AWS_SECRET_ACCESS_KEY"] = str(creds["secret_access_key"])
os.environ["AWS_SESSION_TOKEN"] = str(creds["session_token"])
except:
print "WARNING: couldn't read credentials from ", os.getenv("TESTLOOPER_AWS_CREDS")
ROOT_CHECKOUT_NAME = "__root"
def printGrid(grid):
grid = [[str(cell) for cell in row] for row in grid]
col_count = max([len(row) for row in grid])
gridWidths = []
for col in xrange(col_count):
gridWidths.append(max([len(grid[row][col]) if col < len(grid[row]) else 0 for row in xrange(len(grid))]))
grid = grid[:1] + [["-" * g for g in gridWidths]] + grid[1:]
rows = []
for row in grid:
fmt = " ".join(["%-" + str(gridWidths[col]) + "s" for col in xrange(len(row))])
rows.append(fmt % tuple(row))
print "\n".join(rows) + "\n",
def configureLogging(verbose=False):
loglevel = logging.INFO if verbose else logging.WARN
logging.getLogger().setLevel(loglevel)
for handler in logging.getLogger().handlers:
handler.setLevel(loglevel)
handler.setFormatter(
logging.Formatter(
'%(asctime)s %(levelname)s %(filename)s:%(lineno)s@%(funcName)s %(name)s - %(message)s'
)
)
def createArgumentParser():
parser = argparse.ArgumentParser(
description="Checkout multi-repo projects locally and run tests using docker"
)
parser.add_argument("-v",
"--verbose",
dest="verbose",
default=False,
action='store_true',
help="Set logging level to verbose")
subparsers = parser.add_subparsers()
init_parser = subparsers.add_parser("init", help="Initialize a testlooper checkout")
init_parser.set_defaults(command="init")
init_parser.add_argument("path", help="Path to disk storage")
init_parser.add_argument("git_clone_root", help="Git clone root (e.g. [email protected])")
init_parser.add_argument("-i", "--ignore", help="Any repos with these strings at the start are by convention not displayed in outputs", nargs='*')
init_parser.add_argument("-s", "--strip", help="These strings are by default stripped from the front of reponames", nargs='*')
init_parser.add_argument("--repos", help="store source repos somewhere other than '.tl/repos'", default=None)
fetch_parser = subparsers.add_parser("fetch", help="Run 'git fetch origin' on all the repos we know about")
fetch_parser.set_defaults(command="fetch")
fetch_parser.add_argument("--all", help="Run fetch on all repos (even hidden ones)", default=False, action="store_true")
status_parser = subparsers.add_parser("status", help="Show currently referenced repos and changed files.")
status_parser.set_defaults(command="status")
status_parser.add_argument("--all", help="Show all repos (even hidden ones)", default=False, action="store_true")
checkout_parser = subparsers.add_parser("checkout", help="Checkout a given repo/commit into 'src/src' and grab dependencies")
checkout_parser.set_defaults(command="checkout")
checkout_parser.add_argument("repo", help="Name of the repo")
checkout_parser.add_argument("committish", help="Name of the commit or branch")
checkout_parser.add_argument("--hard", help="Force a hard reset in the source repo (and all dependent source repos)", default=False, action="store_true")
checkout_parser.add_argument("--prune", help="Get rid of unused repos", default=False, action="store_true")
checkout_parser.add_argument("--from", help="Create a new branch, based on this one", dest="from_name", default=None)
checkout_parser.add_argument("--orphan", help="Create a new orphaned branch, based on this one", dest="orphan", default=False, action='store_true')
run_parser = subparsers.add_parser("run", help="Run a build or test.")
run_parser.set_defaults(command="run")
run_parser.add_argument("testpattern", help="Name of the test (with globs)")
if sys.platform != "win32":
#no reason to have 'interactive' on windows - we're already 'interactive because there's no
#docker involved
run_parser.add_argument("-i", "--interactive", dest="interactive", default=False, help="Drop into an interactive terminal for this.", action="store_true")
else:
run_parser.set_defaults(interactive=False)
run_parser.add_argument("-c", dest="cmd", default=None, help="Just run this one command, instead of the full build")
run_parser.add_argument("-d", "--nodeps", dest="nodeps", default=False, help="Don't build dependencies, just this one. ", action="store_true")
run_parser.add_argument("-s", "--nologcapture", dest="nologcapture", default=False, help="Don't capture logs - show everything directly", action="store_true")
run_parser.add_argument("-j",
"--cores",
dest="cores",
default=1,
type=int,
help="Number of cores to expose"
)
info_parser = subparsers.add_parser("info", help="Get info on a particular test or group of tests.")
info_parser.set_defaults(command="info")
info_parser.add_argument("testpattern", help="Subset of tests to look at in particular", default=[], nargs='*')
info_parser.add_argument("-d", "--detail", help="Dump full test detail, not just the name", default=False, action="store_true")
info_parser.add_argument("--include_disabled", help="Show all tests and builds including disabled ones", default=False, action="store_true")
info_parser.add_argument("--all", help="Show all repos (even hidden ones) when displaying dependencies", default=False, action="store_true")
return parser
def loadConfiguration(configFile):
with open(configFile, 'r') as fin:
expanded = os.path.expandvars(fin.read())
return json.loads(expanded)
def find_cur_root(path):
path = os.path.abspath(path)
while True:
if os.path.exists(os.path.join(path, ".tl")):
return path
subpath = os.path.dirname(path)
if subpath == path:
return None
path = subpath
def run_init(args):
root = os.path.abspath(args.path)
curRoot = find_cur_root(root)
if curRoot:
raise UserWarning("Can't initialize a tl directory here. There is already one at\n\n%s" % curRoot)
if not os.path.exists(root):
os.makedirs(root)
os.mkdir(os.path.join(root, ".tl"))
config_file = os.path.join(root, ".tl", "config.yml")
with open(config_file, "w") as f:
f.write(
yaml.dump(
{"git_clone_root": args.git_clone_root,
"repo_path_override": args.repos,
"repo_prefixes_to_ignore": args.ignore,
"repo_prefixes_to_strip": args.strip
},
indent=4,
default_style='')
)
class DummyArtifactStorage(object):
def __init__(self):
object.__init__(self)
@staticmethod
def sanitizeName(name):
return name.replace("_", "_u_").replace("/","_s_").replace("\\", "_bs_").replace(":","_c_").replace(" ","_sp_")
def upload_build(self, testHash, key_name, file_name):
pass
def build_exists(self, testHash, key_name):
pass
def uploadSingleTestArtifact(self, testHash, testId, artifact_name, path):
pass
def uploadIndividualTestArtifacts(self, testHash, testId, pathsToUpload, logger=None):
pass
def uploadTestArtifacts(self, *args, **kwargs):
pass
class WorkerStateOverride(WorkerState.WorkerState):
def __init__(self, name_prefix, worker_directory, looperCtl, cores):
hwConfig = Config.HardwareConfig(cores=cores, ram_gb=8)
image_repo = os.getenv("TESTLOOPER_DOCKER_IMAGE_REPO") or None
WorkerState.WorkerState.__init__(self, name_prefix, worker_directory, None, DummyArtifactStorage(), "machine", hwConfig, docker_image_repo=image_repo)
self.looperCtl = looperCtl
self.extra_mappings = {}
self.resolver = TestDefinitionResolverOverride(looperCtl, None)
def wants_to_run_cleanup(self):
return False
def getRepoCacheByName(self, name):
return self.looperCtl.getGitRepo(name)
def resetToCommitInDir(self, repoName, commitHash, targetDir):
assert False
def cleanup(self):
if Docker is not None:
Docker.DockerImage.removeDanglingDockerImages()
#don't remove everything!
self.clearDirectoryAsRoot(
self.directories.scratch_dir,
#self.directories.test_inputs_dir,
self.directories.command_dir
)
else:
self.clearDirectoryAsRoot(self.directories.command_dir)
def volumesToExpose(self):
res = {
self.directories.scratch_dir: "/test_looper/scratch",
self.directories.test_output_dir: "/test_looper/output",
self.directories.build_output_dir: "/test_looper/build_output",
self.directories.test_inputs_dir: "/test_looper/test_inputs",
self.directories.ccache_dir: "/test_looper/ccache",
self.directories.command_dir: "/test_looper/command"
}
res.update(self.extra_mappings)
return res
def _upload_artifact(self, *args, **kwargs):
return True
def resetToCommit(self, repoName, commitHash):
self.extra_mappings[self.looperCtl.checkout_root_path(repoName)] = self.exposeAsDir("src")
return True
def exposeAsDir(self, expose_as):
if sys.platform == "win32":
return os.path.join(self.worker_directory, expose_as)
else:
assert expose_as.startswith("test_inputs/")
tgt = os.path.join("/test_looper/mountpoints", expose_as[len("test_inputs/"):])
target_linkpoint = os.path.join(self.directories.test_inputs_dir, expose_as[len("test_inputs/"):])
if not os.path.exists(os.path.dirname(target_linkpoint)):
os.makedirs(os.path.dirname(target_linkpoint))
if os.path.islink(target_linkpoint):
os.unlink(target_linkpoint)
os.symlink(tgt, target_linkpoint)
return tgt
def grabDependency(self, log_function, expose_as, dep, worker_callback):
if dep.matches.Build:
self.extra_mappings[
os.path.join(self.looperCtl.build_path(dep.name), "build_output")
] = self.exposeAsDir(expose_as)
return None
if dep.matches.Source:
self.extra_mappings[
self.looperCtl.checkout_root_path(dep.repo)
] = self.exposeAsDir(expose_as)
return None
return "Unknown dependency type: %s" % dep
def _windows_prerun_command(self):
def islink(fpath):
""" Windows islink implementation. """
if win32file.GetFileAttributes(fpath) & REPARSE_FOLDER == REPARSE_FOLDER:
return True
return False
def walkToSymlinks(dir):
if islink(dir):
os.rmdir(dir)
else:
for p in os.listdir(dir):
if not os.path.isdir(os.path.join(dir,p)):
os.unlink(os.path.join(dir,p))
else:
walkToSymlinks(os.path.join(dir,p))
os.rmdir(dir)
walkToSymlinks(self.directories.test_inputs_dir)
if os.path.exists(self.directories.repo_copy_dir):
os.rmdir(self.directories.repo_copy_dir)
for k,v in self.extra_mappings.iteritems():
if not os.path.exists(os.path.dirname(v)):
os.makedirs(os.path.dirname(v))
args = ["New-Item", "-Path", v, "-ItemType", "Junction", "-Value", k]
running_subprocess = subprocess.Popen(
["powershell.exe", "-ExecutionPolicy", "Bypass"] + args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True,
#env=env_to_pass,
#creationflags=subprocess.CREATE_NEW_CONSOLE
)
running_subprocess.wait()
class TestDefinitionResolverOverride(TestDefinitionResolver.TestDefinitionResolver):
def __init__(self, looperCtl, visitRepoRef):
TestDefinitionResolver.TestDefinitionResolver.__init__(self, looperCtl.getGitRepo)
self.looperCtl = looperCtl
self.visitRepoRef = visitRepoRef
def resolveRefWithinRepo(self, curRepoName, nameOfRef, actualRef):
"""
Allows subclasses to modify how we name repositories.
curRepoName - the name of the repo we're currently parsing
nameOfRef - the name of the reference within the testDefinitions file
actualRef - the RepoReference (not an Import) we're processing.
"""
path = ":".join(curRepoName.split(":")[:-1] + [nameOfRef])
actualRepoName = actualRef.reponame().split(":")[-1]
actualHash = actualRef.commitHash()
res = actualRef._withReplacement(reference=path + ":" + actualRepoName + "/" + actualHash)
if self.visitRepoRef:
self.visitRepoRef(res)
return res
def getRepoContentsAtPath(self, repoName, commitHash, path):
root_path = self.looperCtl.checkout_root_path(repoName)
if os.path.exists(root_path):
final_path = os.path.join(root_path, path)
if not os.path.exists(final_path):
return None
else:
return open(final_path, "r").read()
return None
def testDefinitionTextAndExtensionFor(self, repoName, commitHash):
root_path = self.looperCtl.checkout_root_path(repoName)
if os.path.exists(root_path):
#we have this checked out already, and want to use the local version of it
path = Git.Git.getTestDefinitionsPathFromDir(root_path)
if not path:
return None
text = open(os.path.join(root_path, path), "r").read()
return text, os.path.splitext(path)[1], path
return None
class TestLooperCtl:
def __init__(self, root_path):
self.root_path = root_path
self.repos = {}
self.path_to_repos = None
self._loadConfig()
self._loadState()
self.initializeAllRepoNames()
def _loadState(self):
try:
state_file_path = os.path.join(self.root_path, ".tl", "state.yml")
if not os.path.exists(state_file_path):
self.checkout_root = (None, None)
return
with open(state_file_path, "r") as f:
state = yaml.load(f.read())
#repo, branch (or None), commit
self.checkout_root = state.get("checkout_root", (None, None))
except Exception as e:
raise UserWarning("Corrupt state file: " + str(e))
def _loadConfig(self):
try:
config_file_path = os.path.join(self.root_path, ".tl", "config.yml")
with open(config_file_path, "r") as f:
config = yaml.load(f.read())
self.repo_prefixes_to_strip = config.get("repo_prefixes_to_strip", [])
self.repo_prefixes_to_ignore = config.get("repo_prefixes_to_ignore", [])
self.path_to_repos = config.get("repo_path_override", None) or os.path.join(self.root_path, ".tl", "repos")
self.git_clone_root = config["git_clone_root"]
except Exception as e:
raise UserWarning("Corrupt config file: " + str(e))
def repoShouldBeDisplayed(self, repo):
repo = repo.split(":")[-1]
for r in self.repo_prefixes_to_ignore:
if repo.startswith(r):
return False
return True
def repoShortname(self, repo):
if repo == ROOT_CHECKOUT_NAME:
return ""
prunes = [p for p in self.repo_prefixes_to_strip if repo.startswith(p)]
prunes = sorted(prunes, key=len)
if prunes:
return repo[len(prunes[-1]):]
return repo
def initializeAllRepoNames(self):
self.allRepoNames = set()
def walk(items):
dirpath = os.path.join(self.path_to_repos, *items)
if os.path.exists(dirpath):
for i in os.listdir(dirpath):
fullpath = os.path.join(dirpath, i)
if i != ".git" and os.path.isdir(fullpath):
if os.path.exists(os.path.join(fullpath, ".git")):
self.allRepoNames.add("/".join(items + (i,)))
else:
walk(items + (i,))
walk(())
def fetch(self, args):
threads = []
for reponame in self.allRepoNames:
if args.all or self.repoShouldBeDisplayed(reponame):
def makeUpdater(name):
def f():
try:
self.getGitRepo(name).fetchOrigin()
except:
logging.error("Failed to update repo %s: %s", name, traceback.format_exc())
return f
threads.append(threading.Thread(target=makeUpdater(reponame)))
threads[-1].daemon=True
threads[-1].start()
print "fetching origin for ", len(threads), " repos..."
for t in threads:
t.join()
def sanitize(self, name):
return name.replace("/","_").replace(":","_").replace("~", "--")
def build_path(self, buildname):
return os.path.abspath(os.path.join(self.root_path, "builds", self.sanitize(buildname)))
def checkout_root_path(self, reponame):
"""Return the checkout location of a given repo.
The reponame is actually an encoding of the reponame and the path (of
repo-variables) that lead us here from the root checkout. That is,
if the root test definition refers to hash "H1" in repo "MyRepo" as "my_repo_src",
the reponame will be encoded as
my_repo_src:MyRepo
if my_repo_src ends up referring to MySubRepo, that would be encoded as
my_repo_src:my_subrepo_src:MySubRepo
The root commit is always encoded as ROOT_CHECKOUT_NAME and mapped to 'src'.
"""
if reponame is ROOT_CHECKOUT_NAME:
return os.path.join(self.root_path, "src", "src")
path = ".".join(reponame.split(":")[:-1])
if not self.repoShouldBeDisplayed(reponame):
return os.path.join(self.root_path, "hidden", path)
return os.path.join(self.root_path, "src", path)
def writeState(self):
config = { "checkout_root": self.checkout_root }
with open(os.path.join(self.root_path, ".tl", "state.yml"), "w") as f:
f.write(yaml.dump(config, indent=4, default_style=''))
def getGitRepo(self, reponame):
if reponame in self.repos:
return self.repos[reponame]
self.repos[reponame] = Git.Git(os.path.join(*([self.path_to_repos] + reponame.split("/"))))
self.allRepoNames.add(reponame)
if not self.repos[reponame].isInitialized():
if not os.path.exists(self.repos[reponame].path_to_repo):
os.makedirs(self.repos[reponame].path_to_repo)
clone_root = self.git_clone_root + ":" + reponame + ".git"
if not self.repos[reponame].cloneFrom(clone_root):
del self.repos[reponame]
if os.path.exists(clone_root):
shutil.rmtree(clone_root)
return None
else:
print "Cloned " + clone_root + " into " + self.repos[reponame].path_to_repo
return self.repos[reponame]
def clearDirectoryAsRoot(self, *args):
image = Docker.DockerImage("ubuntu:16.04")
image.run(
"rm -rf " + " ".join(["%s/*" % p for p in args]),
volumes={a:a for a in args},
options="--rm"
)
def clear(self, args):
if args.src:
self.repo_and_hash_to_branch = {}
self.clearDirectoryAsRoot(os.path.join(self.root_path, "src"))
if args.build:
self.clearDirectoryAsRoot(os.path.join(self.root_path, "builds"))
def createNewBranchAndPush(self, repo, branchname, from_name):
if from_name:
hash = repo.gitCommitData("origin/" + from_name)[0]
hash = repo.createInitialCommit()
repo.pushCommit(hash, branchname, force=False, createBranch=True)
def setCurCheckoutRoot(self, args):
reponame = self.bestRepo(args.repo)
repo = self.getGitRepo(reponame)
if repo is None:
for path in self.repo_prefixes_to_strip:
repo = self.getGitRepo(path + reponame)
if repo:
break
if not repo:
raise UserWarning("Can't find repo %s" % reponame)
committish = args.committish
if not Git.isShaHash(committish) or not repo.commitExists(committish):
#make sure we have up-to-date branch info
repo.fetchOrigin()
if args.from_name:
self.createNewBranchAndPush(repo, committish, args.from_name)
elif args.orphan:
self.createNewBranchAndPush(repo, committish, None)
if Git.isShaHash(committish):
commitHash = committish
else:
commitHash = repo.gitCommitData("origin/" + committish)[0]
print "Checking out", reponame, commitHash
self.checkout_root = (reponame, commitHash)
def checkout(self, args):
self.setCurCheckoutRoot(args)
self.getGitRepo(self.checkout_root[0]).resetToCommitInDirectory
#make sure our root checkout (src/src) is populated
self._updateRepoCheckout(ROOT_CHECKOUT_NAME, self.checkout_root[1], args.hard)
paths_visited = set()
def visitRepoRef(ref):
path = self.checkout_root_path(ref.reponame())
if path in paths_visited:
return
else:
paths_visited.add(path)
self._updateRepoCheckout(ref.reponame(), ref.commitHash(), args.hard)
#walk all the repo definitions and make sure everything is up-to-date
resolver = TestDefinitionResolverOverride(self, visitRepoRef)
resolver.testEnvironmentAndRepoDefinitionsFor(ROOT_CHECKOUT_NAME, "HEAD")
if args.prune:
all_paths = self.allCheckoutPaths()
for path in all_paths:
if path not in paths_visited:
print "Removing", path
try:
shutil.rmtree(path)
except:
traceback.print_exc()
def allCheckoutPaths(self):
res = []
for dirname in os.listdir(os.path.join(self.root_path,"src")):
if dirname != "src":
res.append(os.path.join(self.root_path,"src",dirname))
for dirname in os.listdir(os.path.join(self.root_path,"hidden")):
res.append(os.path.join(self.root_path,"hidden",dirname))
return res
def _updateRepoCheckout(self, reponame, hash, hard=False):
if reponame == ROOT_CHECKOUT_NAME:
actualRepoName = self.checkout_root[0]
else:
actualRepoName = reponame.split(":")[-1]
path = self.checkout_root_path(reponame)
repo = Git.Git(path)
if not repo.isInitialized():
print "Checking out ", hash, " from ", actualRepoName, " to ", path
self.getGitRepo(actualRepoName).resetToCommitInDirectory(hash, path)
else:
if repo.getSourceRepoName("origin") != actualRepoName:
if repo.currentFileNumStat():
print "Repo reference for ", "/".join(reponame.split(":")[:-1]), "changed from ", repo.getSourceRepoName("origin"), "to", actualRepoName
print
print "You have outstanding changes. Please remove them before continuing."
os._exit(1)
else:
print "Repo reference for ", "/".join(reponame.split(":")[:-1]), "changed from ", repo.getSourceRepoName("origin"), "to", actualRepoName
print "No files are modified so we're replacing the directory."
shutil.rmtree(path)
self.getGitRepo(actualRepoName).resetToCommitInDirectory(hash, path)
if hash != repo.currentCheckedOutCommit() or hard:
if hard:
repo.resetHard()
print "Checkout commit ", hash, " to ", path, " currently at ", Git.Git(path).currentCheckedOutCommit()
repo.checkoutCommit(hash)
if repo.currentCheckedOutCommit() != hash:
print "Fetching origin for ", path
repo.fetchOrigin()
repo.checkoutCommit(hash)
if repo.currentCheckedOutCommit() != hash:
print "Failed to checkout ", hash, " into ", path
if repo.currentFileNumStat():
print "You have outstanding changes that are conflicting with the checkout."
os._exit(1)
def allTestsAndBuildsByName(self):
visited = set()
def visitRepoRef(ref):
path = self.checkout_root_path(ref.reponame())
if path in visited:
return
else:
visited.add(path)
repo = Git.Git(path)
if not repo.isInitialized():
self._updateRepoCheckout(ref.reponame(), ref.commitHash())
resolver = TestDefinitionResolverOverride(self, visitRepoRef)
resolver.testEnvironmentAndRepoDefinitionsFor(ROOT_CHECKOUT_NAME, "HEAD")
allTestsByName = {}
for (repo, hash), testDict in resolver.testDefinitionCache.iteritems():
for testName, testDefinition in testDict.iteritems():
if repo == ROOT_CHECKOUT_NAME:
allTestsByName[testName] = (testDefinition, repo)
else:
repoName = "/".join(repo.split(":")[:-1])
allTestsByName[repoName + "/" + testName] = (testDefinition, repo)
return allTestsByName
def artifactsInTestDef(self, testDef):
return [a.name for stage in testDef.stages for a in stage.artifacts]
def run(self, args):
if args.interactive:
if not args.nodeps:
print "Interactive implies no dependencies."
if not args.nologcapture:
print "Interactive implies nologcapture"
args.nologcapture = True
args.nodeps = True
if args.cmd:
args.nodeps = True
args.nologcapture = True
all_tests = self.allTestsAndBuildsByName()
possible_tests = {t:all_tests[t] for t in all_tests if fnmatch.fnmatchcase(t, args.testpattern)}
if not possible_tests:
print "Can't find a test or build matching pattern '%s' amongst " % args.testpattern
for test in sorted(all_tests):
print " " + test
else:
if args.cmd and len(possible_tests) != 1:
raise UserWarning("Explicit commands can only be passed to one target.")
buildToArtifactsNeeded = {}
for test in sorted(possible_tests):
testDef = possible_tests[test][0]
if testDef.matches.Build:
buildToArtifactsNeeded[testDef.name] = self.artifactsInTestDef(testDef)
for test in sorted(possible_tests):
self.walkGraphAndFillOutTestArtifacts(all_tests, possible_tests[test][0], buildToArtifactsNeeded)
for test in sorted(possible_tests):
self.runBuildOrTest(all_tests, possible_tests[test][1], possible_tests[test][0], args.cores, args.nologcapture, args.nodeps, args.interactive, set(), args.cmd, buildToArtifactsNeeded)
def walkGraphAndFillOutTestArtifacts(self, all_tests, testDef, buildToArtifactsNeeded, seen_already=None):
"""Walk all the dependent tests needed by 'testDef' and get a list of the artifacts we really need to build."""
if not seen_already:
seen_already = set()
path = self.build_path(testDef.name)
if path in seen_already:
return
seen_already.add(path)
for depname, dep in testDef.dependencies.iteritems():
if dep.matches.Build:
test_and_repo = None
for t in all_tests:
if all_tests[t][0].hash == dep.buildHash:
test_and_repo = all_tests[t]
if test_and_repo:
subdef, subrepo = test_and_repo
if subdef.name not in buildToArtifactsNeeded:
buildToArtifactsNeeded[subdef.name] = []
if dep.artifact not in buildToArtifactsNeeded[subdef.name]:
buildToArtifactsNeeded[subdef.name].append(dep.artifact)
self.walkGraphAndFillOutTestArtifacts(all_tests, subdef, buildToArtifactsNeeded, seen_already)
def runBuildOrTest(self, all_tests, reponame, testDef, cores, nologcapture, nodeps, interactive, seen_already, explicit_cmd=None, artifactSubsetByBuildName=None):
#walk all the repo definitions and make sure everything is up-to-date
path = self.build_path(testDef.name)
if path in seen_already:
return True
seen_already.add(path)
if not nodeps:
for depname, dep in testDef.dependencies.iteritems():
if dep.matches.Build:
test_and_repo = None
for t in all_tests:
if all_tests[t][0].hash == dep.buildHash:
test_and_repo = all_tests[t]
if test_and_repo:
subdef, subrepo = test_and_repo
if not self.runBuildOrTest(all_tests, subrepo, subdef, cores, nologcapture, nodeps, interactive, seen_already, artifactSubsetByBuildName=artifactSubsetByBuildName):
print "Dependent build ", self.repoShortname(subrepo.split(":")[-1]), subdef.name, " failed"
return False
print "Building", self.repoShortname(reponame.split(":")[-1]), testDef.name
artifactsNeeded = None
if testDef.matches.Build:
artifactsDefined = self.artifactsInTestDef(testDef)
artifactsRequested = artifactSubsetByBuildName[testDef.name]
#determine if we just want to run a subset of the stages in the build.
if artifactsDefined and set(artifactsRequested) != set(artifactsDefined) and artifactsDefined[-1] not in artifactsRequested:
print "\tOnly building until we've produced the following: ", artifactSubsetByBuildName[testDef.name]
artifactsNeeded = artifactSubsetByBuildName[testDef.name]
worker_state = WorkerStateOverride("test_looper_interactive_", path, self, cores)
if nologcapture:
logfile = sys.stdout
else:
logfile_dir = os.path.join(path, "logs")
worker_state.ensureDirectoryExists(logfile_dir)
t = time.gmtime()
log_path = os.path.join(logfile_dir, "Log-%s-%s-%s-%s-%s-%s.txt" % (t.tm_year, t.tm_mon, t.tm_mday, t.tm_hour, t.tm_min, t.tm_sec))
logfile = open(log_path, "w")
print "\tlogging output to ", log_path
if not interactive:
class Callbacks:
def __init__(self):
self.t0 = time.time()
self.total_lines = 0
def recordArtifactUploaded(self, artifact):
pass
def heartbeat(self, logMessage=None):
if logMessage:
logfile.write(logMessage)
self.total_lines += logMessage.count("\n")
if time.time() - self.t0 > 10 and not nologcapture:
print "\t", time.asctime(), " - ", self.total_lines, " logged"
self.t0 = time.time()
logfile.flush()
def terminalOutput(self, output):
pass
def subscribeToTerminalInput(self, callback):
pass
callbacks = Callbacks()
else:
callbacks = WorkerState.DummyWorkerCallbacks(localTerminal=True)
def onStageFinished(artifact):
print "\tFinished producing artifact", artifact
if artifactsNeeded is not None:
if artifact in artifactsNeeded:
artifactsNeeded.remove(artifact)
if not artifactsNeeded:
#condition for early stopping
print "Stopping build early after artifact", artifact,"completed."
return True
callbacks.recordArtifactUploaded = onStageFinished
if not worker_state.runTest("interactive", callbacks, testDef, interactive, command_override=explicit_cmd)[0]:
print "Build failed. Exiting."
return False
return True
def info(self, args):
byName = self.allTestsAndBuildsByName()
if args.detail:
for t in sorted(byName):
if any([fnmatch.fnmatchcase(t, p) for p in args.testpattern]) or not args.testpattern:
self.infoForTest(t, byName[t][0], args.all)
else:
grid = [["Name","Type","Project","Configuration","Environment"] + (["Disabled"] if args.include_disabled else [])]
for t in sorted(byName):
if args.include_disabled or not byName[t][0].disabled:
if any([fnmatch.fnmatchcase(t, p) for p in args.testpattern]) or not args.testpattern:
grid.append([t, byName[t][0]._which, byName[t][0].project, byName[t][0].configuration, byName[t][0].environment_name]
+ ([byName[t][0].disabled] if args.include_disabled else []))
printGrid(grid)
def infoForTest(self, test, testDef, showAll):
print test
print " type: ", testDef._which
print " dependencies: "
allDeps = dict(testDef.environment.dependencies)
allDeps.update(testDef.dependencies)
for depname, dep in sorted(allDeps.iteritems()):
if dep.matches.InternalBuild:
print " " + depname + ":", dep.name
elif dep.matches.ExternalBuild:
if self.repoShouldBeDisplayed(dep.repo) or showAll:
print " " + depname + ":", self.repoShortname(dep.repo) + ", commit=" + dep.commitHash + ", name=" + dep.name
elif dep.matches.Source:
if self.repoShouldBeDisplayed(dep.repo) or showAll:
print " " + depname + ":", self.repoShortname(dep.repo) + "/" + dep.commitHash
elif dep.matches.Build:
if self.repoShouldBeDisplayed(dep.repo) or showAll:
print " " + depname + ":", self.repoShortname(dep.repo) + ", hash=" + dep.buildHash, ", name=", dep.name
else:
print " " + depname + ":", "(unknown!!)", repr(dep)
def kvprint(key, value, indent):
if isinstance(value, str) and "\n" in value:
print indent + key + ": |"
print "\n".join([indent + " " + x for x in value.split("\n")])
else:
print indent + key + ":" + repr(value)
print " variables: "
for var, varval in sorted(testDef.variables.iteritems()):
kvprint(var, varval, " ")
toPrint = [
"name", "hash","environment_name","configuration","project",
"timeout", "max_cores","min_cores", "min_ram_gb"
]
for key in toPrint:
kvprint(key, getattr(testDef, key), " ")
print " stages:"
stage_ix = 0
for stage in testDef.stages:
print " stage %s:" % stage_ix
for key in ["order","command","cleanup"]:
kvprint(key, getattr(stage, key), " ")
if stage.artifacts:
print " artifacts:"
for artifact in stage.artifacts:
print " " + artifact.name + ":"
kvprint("directory", artifact.name, " ")
kvprint("include_patterns", str(artifact.include_patterns), " ")
kvprint("exclude_patterns", str(artifact.exclude_patterns), " ")
kvprint("format", str(artifact.format), " ")
def infoForRepo(self, reponame):
reponame = self.bestRepo(reponame)
print "repo: ", self.repoShortname(reponame)
git_repo = self.getGitRepo(reponame)
for branchname, sha_hash in git_repo.listBranchesForRemote("origin").iteritems():
print "\t", branchname, " -> ", sha_hash
for branchname in self.branchesCheckedOutForRepo(reponame):
print reponame, branchname
tests, environments, repos = self.resolver.testEnvironmentAndRepoDefinitionsFor(reponame, branchname)
print branchname
print "\tbuilds: "
for test, testDef in sorted(tests.iteritems()):
if testDef.matches.Build:
print "\t\t", test
print "\ttests: "
for test, testDef in sorted(tests.iteritems()):
if testDef.matches.Test:
print "\t\t", test
print "\trepos: "
for repo, repoDef in sorted(repos.iteritems()):
if repoDef.matches.Pin:
print "\t\t", repo, "->", "/".join(repoDef.reference.split("/")[:-1] + [repoDef.branch]), "=", repoDef.commitHash()
print "\trepo imports: "
for repo, repoDef in sorted(repos.iteritems()):
if repoDef.matches.ImportedReference:
print "\t\t", repo, "from", repoDef.import_source, "=", repoDef.orig_reference, "=", repoDef.commitHash()
print "\tenvironments: "
for envName, envDef in sorted(environments.iteritems()):
print "\t\t", envName
def bestRepo(self, reponame):
if reponame in self.allRepoNames:
return reponame
for path in sorted(self.repo_prefixes_to_strip, key=len):
if path + reponame in self.allRepoNames:
return path + reponame
for path in sorted(self.repo_prefixes_to_strip, key=len):
if self.getGitRepo(path + reponame):
return path + reponame
return reponame
def walkCheckedOutRepos(self, f):
if self.checkout_root is None:
return None
paths_visited = set()
results = []
def visitRepoRef(ref):
path = self.checkout_root_path(ref.reponame())
if path in paths_visited:
return
else:
paths_visited.add(path)
results.append((path, "/".join(ref.reponame().split(":")[:-1]), ref.reponame().split(":")[-1]))
#walk all the repo definitions and make sure everything is up-to-date
resolver = TestDefinitionResolverOverride(self, visitRepoRef)
resolver.testEnvironmentAndRepoDefinitionsFor(ROOT_CHECKOUT_NAME, "HEAD")
results.append(
(self.checkout_root_path(ROOT_CHECKOUT_NAME), "src", self.checkout_root[0])
)
for path, localname, actualrepo in sorted(results, key=lambda vals: vals[1]):
f(path, localname, actualrepo)
def status(self, args):
if self.checkout_root is None:
print "Nothing checked out..."
return
def printer(path, localname, remotename):
git = Git.Git(path)
if args.all or self.repoShouldBeDisplayed(remotename):
hash = git.currentCheckedOutCommit()
diffstat = git.currentFileNumStat() if git.isInitialized() else None
print "%-50s" % localname, "%-50s" % self.repoShortname(remotename), hash #, git.branchnameForCommitSloppy(hash)
if git.isInitialized():
diffstat = git.currentFileNumStat()
for path in diffstat:
print "\t++ %-5d -- %-5d %s" % (diffstat[path][0], diffstat[path][1], path)
else:
print "\tNOT INITIALIZED"
self.walkCheckedOutRepos(printer)
def main(argv):
try:
Git.Git.versionCheck()
except UserWarning as e:
print "Error:\n\n%s" % str(e)
#print traceback.format_exc()
return 1
parsedArgs = createArgumentParser().parse_args()
configureLogging(verbose=parsedArgs.verbose)
try:
if parsedArgs.command == "init":
run_init(parsedArgs)
else:
root = find_cur_root(os.getcwd())
if not root:
raise UserWarning("Not a tl path")
ctl = TestLooperCtl(root)
try:
if parsedArgs.command == "checkout":
ctl.checkout(parsedArgs)
elif parsedArgs.command == "info":
ctl.info(parsedArgs)
elif parsedArgs.command == "run":
ctl.run(parsedArgs)
elif parsedArgs.command == "fetch":
ctl.fetch(parsedArgs)
elif parsedArgs.command == "status":
ctl.status(parsedArgs)
else:
raise UserWarning("Unknown command " + parsedArgs.command)
finally:
ctl.writeState()
except UserWarning as e:
print "Error:\n\n%s" % str(e)
#print traceback.format_exc()
return 1
return 0
if __name__ == "__main__":
sys.exit(main(sys.argv))
|
host.py
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright (c) 2010 Citrix Systems, Inc.
# Copyright (c) 2011 Piston Cloud Computing, Inc
# Copyright (c) 2012 University Of Minho
# (c) Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Manages information about the host OS and hypervisor.
This class encapsulates a connection to the libvirt
daemon and provides certain higher level APIs around
the raw libvirt API. These APIs are then used by all
the other libvirt related classes
"""
import operator
import os
import socket
import sys
import threading
import eventlet
from eventlet import greenio
from eventlet import greenthread
from eventlet import patcher
from eventlet import tpool
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import importutils
from oslo_utils import units
import six
from nova import context as nova_context
from nova import exception
from nova.i18n import _
from nova.i18n import _LE
from nova.i18n import _LI
from nova.i18n import _LW
from nova import rpc
from nova import utils
from nova.virt import event as virtevent
from nova.virt.libvirt import config as vconfig
from nova.virt.libvirt import guest as libvirt_guest
libvirt = None
LOG = logging.getLogger(__name__)
native_socket = patcher.original('socket')
native_threading = patcher.original("threading")
native_Queue = patcher.original("queue" if six.PY3 else "Queue")
CONF = cfg.CONF
CONF.import_opt('host', 'nova.netconf')
CONF.import_opt('my_ip', 'nova.netconf')
# This list is for libvirt hypervisor drivers that need special handling.
# This is *not* the complete list of supported hypervisor drivers.
HV_DRIVER_QEMU = "QEMU"
HV_DRIVER_XEN = "Xen"
class DomainJobInfo(object):
"""Information about libvirt background jobs
This class encapsulates information about libvirt
background jobs. It provides a mapping from either
the old virDomainGetJobInfo API which returned a
fixed list of fields, or the modern virDomainGetJobStats
which returns an extendable dict of fields.
"""
_have_job_stats = True
def __init__(self, **kwargs):
self.type = kwargs.get("type", libvirt.VIR_DOMAIN_JOB_NONE)
self.time_elapsed = kwargs.get("time_elapsed", 0)
self.time_remaining = kwargs.get("time_remaining", 0)
self.downtime = kwargs.get("downtime", 0)
self.setup_time = kwargs.get("setup_time", 0)
self.data_total = kwargs.get("data_total", 0)
self.data_processed = kwargs.get("data_processed", 0)
self.data_remaining = kwargs.get("data_remaining", 0)
self.memory_total = kwargs.get("memory_total", 0)
self.memory_processed = kwargs.get("memory_processed", 0)
self.memory_remaining = kwargs.get("memory_remaining", 0)
self.memory_constant = kwargs.get("memory_constant", 0)
self.memory_normal = kwargs.get("memory_normal", 0)
self.memory_normal_bytes = kwargs.get("memory_normal_bytes", 0)
self.memory_bps = kwargs.get("memory_bps", 0)
self.disk_total = kwargs.get("disk_total", 0)
self.disk_processed = kwargs.get("disk_processed", 0)
self.disk_remaining = kwargs.get("disk_remaining", 0)
self.disk_bps = kwargs.get("disk_bps", 0)
self.comp_cache = kwargs.get("compression_cache", 0)
self.comp_bytes = kwargs.get("compression_bytes", 0)
self.comp_pages = kwargs.get("compression_pages", 0)
self.comp_cache_misses = kwargs.get("compression_cache_misses", 0)
self.comp_overflow = kwargs.get("compression_overflow", 0)
@classmethod
def _get_job_stats_compat(cls, dom):
# Make the old virDomainGetJobInfo method look similar to the
# modern virDomainGetJobStats method
try:
info = dom.jobInfo()
except libvirt.libvirtError as ex:
# When migration of a transient guest completes, the guest
# goes away so we'll see NO_DOMAIN error code
#
# When migration of a persistent guest completes, the guest
# merely shuts off, but libvirt unhelpfully raises an
# OPERATION_INVALID error code
#
# Lets pretend both of these mean success
if ex.get_error_code() in (libvirt.VIR_ERR_NO_DOMAIN,
libvirt.VIR_ERR_OPERATION_INVALID):
LOG.debug("Domain has shutdown/gone away: %s", ex)
return cls(type=libvirt.VIR_DOMAIN_JOB_COMPLETED)
else:
LOG.debug("Failed to get job info: %s", ex)
raise
return cls(
type=info[0],
time_elapsed=info[1],
time_remaining=info[2],
data_total=info[3],
data_processed=info[4],
data_remaining=info[5],
memory_total=info[6],
memory_processed=info[7],
memory_remaining=info[8],
disk_total=info[9],
disk_processed=info[10],
disk_remaining=info[11])
@classmethod
def for_domain(cls, dom):
'''Get job info for the domain
Query the libvirt job info for the domain (ie progress
of migration, or snapshot operation)
Returns: a DomainJobInfo instance
'''
if cls._have_job_stats:
try:
stats = dom.jobStats()
return cls(**stats)
except libvirt.libvirtError as ex:
if ex.get_error_code() == libvirt.VIR_ERR_NO_SUPPORT:
# Remote libvirt doesn't support new API
LOG.debug("Missing remote virDomainGetJobStats: %s", ex)
cls._have_job_stats = False
return cls._get_job_stats_compat(dom)
elif ex.get_error_code() in (
libvirt.VIR_ERR_NO_DOMAIN,
libvirt.VIR_ERR_OPERATION_INVALID):
# Transient guest finished migration, so it has gone
# away completely
LOG.debug("Domain has shutdown/gone away: %s", ex)
return cls(type=libvirt.VIR_DOMAIN_JOB_COMPLETED)
else:
LOG.debug("Failed to get job stats: %s", ex)
raise
except AttributeError as ex:
# Local python binding doesn't support new API
LOG.debug("Missing local virDomainGetJobStats: %s", ex)
cls._have_job_stats = False
return cls._get_job_stats_compat(dom)
else:
return cls._get_job_stats_compat(dom)
class Host(object):
def __init__(self, uri, read_only=False,
conn_event_handler=None,
lifecycle_event_handler=None):
global libvirt
if libvirt is None:
libvirt = importutils.import_module('libvirt')
self._uri = uri
self._read_only = read_only
self._conn_event_handler = conn_event_handler
self._lifecycle_event_handler = lifecycle_event_handler
self._skip_list_all_domains = False
self._caps = None
self._hostname = None
self._wrapped_conn = None
self._wrapped_conn_lock = threading.Lock()
self._event_queue = None
self._events_delayed = {}
# Note(toabctl): During a reboot of a domain, STOPPED and
# STARTED events are sent. To prevent shutting
# down the domain during a reboot, delay the
# STOPPED lifecycle event some seconds.
self._lifecycle_delay = 15
def _native_thread(self):
"""Receives async events coming in from libvirtd.
This is a native thread which runs the default
libvirt event loop implementation. This processes
any incoming async events from libvirtd and queues
them for later dispatch. This thread is only
permitted to use libvirt python APIs, and the
driver.queue_event method. In particular any use
of logging is forbidden, since it will confuse
eventlet's greenthread integration
"""
while True:
libvirt.virEventRunDefaultImpl()
def _dispatch_thread(self):
"""Dispatches async events coming in from libvirtd.
This is a green thread which waits for events to
arrive from the libvirt event loop thread. This
then dispatches the events to the compute manager.
"""
while True:
self._dispatch_events()
@staticmethod
def _event_lifecycle_callback(conn, dom, event, detail, opaque):
"""Receives lifecycle events from libvirt.
NB: this method is executing in a native thread, not
an eventlet coroutine. It can only invoke other libvirt
APIs, or use self._queue_event(). Any use of logging APIs
in particular is forbidden.
"""
self = opaque
uuid = dom.UUIDString()
transition = None
if event == libvirt.VIR_DOMAIN_EVENT_STOPPED:
transition = virtevent.EVENT_LIFECYCLE_STOPPED
elif event == libvirt.VIR_DOMAIN_EVENT_STARTED:
transition = virtevent.EVENT_LIFECYCLE_STARTED
elif event == libvirt.VIR_DOMAIN_EVENT_SUSPENDED:
transition = virtevent.EVENT_LIFECYCLE_PAUSED
elif event == libvirt.VIR_DOMAIN_EVENT_RESUMED:
transition = virtevent.EVENT_LIFECYCLE_RESUMED
if transition is not None:
self._queue_event(virtevent.LifecycleEvent(uuid, transition))
def _close_callback(self, conn, reason, opaque):
close_info = {'conn': conn, 'reason': reason}
self._queue_event(close_info)
@staticmethod
def _test_connection(conn):
try:
conn.getLibVersion()
return True
except libvirt.libvirtError as e:
if (e.get_error_code() in (libvirt.VIR_ERR_SYSTEM_ERROR,
libvirt.VIR_ERR_INTERNAL_ERROR) and
e.get_error_domain() in (libvirt.VIR_FROM_REMOTE,
libvirt.VIR_FROM_RPC)):
LOG.debug('Connection to libvirt broke')
return False
raise
@staticmethod
def _connect_auth_cb(creds, opaque):
if len(creds) == 0:
return 0
raise exception.NovaException(
_("Can not handle authentication request for %d credentials")
% len(creds))
@staticmethod
def _connect(uri, read_only):
auth = [[libvirt.VIR_CRED_AUTHNAME,
libvirt.VIR_CRED_ECHOPROMPT,
libvirt.VIR_CRED_REALM,
libvirt.VIR_CRED_PASSPHRASE,
libvirt.VIR_CRED_NOECHOPROMPT,
libvirt.VIR_CRED_EXTERNAL],
Host._connect_auth_cb,
None]
flags = 0
if read_only:
flags = libvirt.VIR_CONNECT_RO
# tpool.proxy_call creates a native thread. Due to limitations
# with eventlet locking we cannot use the logging API inside
# the called function.
return tpool.proxy_call(
(libvirt.virDomain, libvirt.virConnect),
libvirt.openAuth, uri, auth, flags)
def _queue_event(self, event):
"""Puts an event on the queue for dispatch.
This method is called by the native event thread to
put events on the queue for later dispatch by the
green thread. Any use of logging APIs is forbidden.
"""
if self._event_queue is None:
return
# Queue the event...
self._event_queue.put(event)
# ...then wakeup the green thread to dispatch it
c = ' '.encode()
self._event_notify_send.write(c)
self._event_notify_send.flush()
def _dispatch_events(self):
"""Wait for & dispatch events from native thread
Blocks until native thread indicates some events
are ready. Then dispatches all queued events.
"""
# Wait to be notified that there are some
# events pending
try:
_c = self._event_notify_recv.read(1)
assert _c
except ValueError:
return # will be raised when pipe is closed
# Process as many events as possible without
# blocking
last_close_event = None
while not self._event_queue.empty():
try:
event = self._event_queue.get(block=False)
if isinstance(event, virtevent.LifecycleEvent):
# call possibly with delay
self._event_emit_delayed(event)
elif 'conn' in event and 'reason' in event:
last_close_event = event
except native_Queue.Empty:
pass
if last_close_event is None:
return
conn = last_close_event['conn']
# get_new_connection may already have disabled the host,
# in which case _wrapped_conn is None.
with self._wrapped_conn_lock:
if conn == self._wrapped_conn:
reason = str(last_close_event['reason'])
msg = _("Connection to libvirt lost: %s") % reason
self._wrapped_conn = None
if self._conn_event_handler is not None:
self._conn_event_handler(False, msg)
def _event_emit_delayed(self, event):
"""Emit events - possibly delayed."""
def event_cleanup(gt, *args, **kwargs):
"""Callback function for greenthread. Called
to cleanup the _events_delayed dictionary when a event
was called.
"""
event = args[0]
self._events_delayed.pop(event.uuid, None)
# Cleanup possible delayed stop events.
if event.uuid in self._events_delayed.keys():
self._events_delayed[event.uuid].cancel()
self._events_delayed.pop(event.uuid, None)
LOG.debug("Removed pending event for %s due to "
"lifecycle event", event.uuid)
if event.transition == virtevent.EVENT_LIFECYCLE_STOPPED:
# Delay STOPPED event, as they may be followed by a STARTED
# event in case the instance is rebooting
id_ = greenthread.spawn_after(self._lifecycle_delay,
self._event_emit, event)
self._events_delayed[event.uuid] = id_
# add callback to cleanup self._events_delayed dict after
# event was called
id_.link(event_cleanup, event)
else:
self._event_emit(event)
def _event_emit(self, event):
if self._lifecycle_event_handler is not None:
self._lifecycle_event_handler(event)
def _init_events_pipe(self):
"""Create a self-pipe for the native thread to synchronize on.
This code is taken from the eventlet tpool module, under terms
of the Apache License v2.0.
"""
self._event_queue = native_Queue.Queue()
try:
rpipe, wpipe = os.pipe()
self._event_notify_send = greenio.GreenPipe(wpipe, 'wb', 0)
self._event_notify_recv = greenio.GreenPipe(rpipe, 'rb', 0)
except (ImportError, NotImplementedError):
# This is Windows compatibility -- use a socket instead
# of a pipe because pipes don't really exist on Windows.
sock = native_socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(('localhost', 0))
sock.listen(50)
csock = native_socket.socket(socket.AF_INET, socket.SOCK_STREAM)
csock.connect(('localhost', sock.getsockname()[1]))
nsock, addr = sock.accept()
self._event_notify_send = nsock.makefile('wb', 0)
gsock = greenio.GreenSocket(csock)
self._event_notify_recv = gsock.makefile('rb', 0)
def _init_events(self):
"""Initializes the libvirt events subsystem.
This requires running a native thread to provide the
libvirt event loop integration. This forwards events
to a green thread which does the actual dispatching.
"""
self._init_events_pipe()
LOG.debug("Starting native event thread")
self._event_thread = native_threading.Thread(
target=self._native_thread)
self._event_thread.setDaemon(True)
self._event_thread.start()
LOG.debug("Starting green dispatch thread")
eventlet.spawn(self._dispatch_thread)
def _get_new_connection(self):
# call with _wrapped_conn_lock held
LOG.debug('Connecting to libvirt: %s', self._uri)
wrapped_conn = None
try:
wrapped_conn = self._connect(self._uri, self._read_only)
finally:
# Enabling the compute service, in case it was disabled
# since the connection was successful.
disable_reason = None
if not wrapped_conn:
disable_reason = 'Failed to connect to libvirt'
if self._conn_event_handler is not None:
self._conn_event_handler(bool(wrapped_conn), disable_reason)
self._wrapped_conn = wrapped_conn
try:
LOG.debug("Registering for lifecycle events %s", self)
wrapped_conn.domainEventRegisterAny(
None,
libvirt.VIR_DOMAIN_EVENT_ID_LIFECYCLE,
self._event_lifecycle_callback,
self)
except Exception as e:
LOG.warn(_LW("URI %(uri)s does not support events: %(error)s"),
{'uri': self._uri, 'error': e})
try:
LOG.debug("Registering for connection events: %s", str(self))
wrapped_conn.registerCloseCallback(self._close_callback, None)
except (TypeError, AttributeError) as e:
# NOTE: The registerCloseCallback of python-libvirt 1.0.1+
# is defined with 3 arguments, and the above registerClose-
# Callback succeeds. However, the one of python-libvirt 1.0.0
# is defined with 4 arguments and TypeError happens here.
# Then python-libvirt 0.9 does not define a method register-
# CloseCallback.
LOG.debug("The version of python-libvirt does not support "
"registerCloseCallback or is too old: %s", e)
except libvirt.libvirtError as e:
LOG.warn(_LW("URI %(uri)s does not support connection"
" events: %(error)s"),
{'uri': self._uri, 'error': e})
return wrapped_conn
def _get_connection(self):
# multiple concurrent connections are protected by _wrapped_conn_lock
with self._wrapped_conn_lock:
wrapped_conn = self._wrapped_conn
if not wrapped_conn or not self._test_connection(wrapped_conn):
wrapped_conn = self._get_new_connection()
return wrapped_conn
def get_connection(self):
"""Returns a connection to the hypervisor
This method should be used to create and return a well
configured connection to the hypervisor.
:returns: a libvirt.virConnect object
"""
try:
conn = self._get_connection()
except libvirt.libvirtError as ex:
LOG.exception(_LE("Connection to libvirt failed: %s"), ex)
payload = dict(ip=CONF.my_ip,
method='_connect',
reason=ex)
rpc.get_notifier('compute').error(nova_context.get_admin_context(),
'compute.libvirt.error',
payload)
raise exception.HypervisorUnavailable(host=CONF.host)
return conn
@staticmethod
def _libvirt_error_handler(context, err):
# Just ignore instead of default outputting to stderr.
pass
def initialize(self):
# NOTE(dkliban): Error handler needs to be registered before libvirt
# connection is used for the first time. Otherwise, the
# handler does not get registered.
libvirt.registerErrorHandler(self._libvirt_error_handler, None)
libvirt.virEventRegisterDefaultImpl()
self._init_events()
self._initialized = True
def _version_check(self, lv_ver=None, hv_ver=None, hv_type=None,
op=operator.lt):
"""Check libvirt version, hypervisor version, and hypervisor type
:param hv_type: hypervisor driver from the top of this file.
"""
conn = self.get_connection()
try:
if lv_ver is not None:
libvirt_version = conn.getLibVersion()
if op(libvirt_version, utils.convert_version_to_int(lv_ver)):
return False
if hv_ver is not None:
hypervisor_version = conn.getVersion()
if op(hypervisor_version,
utils.convert_version_to_int(hv_ver)):
return False
if hv_type is not None:
hypervisor_type = conn.getType()
if hypervisor_type != hv_type:
return False
return True
except Exception:
return False
def has_min_version(self, lv_ver=None, hv_ver=None, hv_type=None):
return self._version_check(
lv_ver=lv_ver, hv_ver=hv_ver, hv_type=hv_type, op=operator.lt)
def has_version(self, lv_ver=None, hv_ver=None, hv_type=None):
return self._version_check(
lv_ver=lv_ver, hv_ver=hv_ver, hv_type=hv_type, op=operator.ne)
# TODO(sahid): needs to be private
def get_domain(self, instance):
"""Retrieve libvirt domain object for an instance.
:param instance: an nova.objects.Instance object
Attempt to lookup the libvirt domain objects
corresponding to the Nova instance, based on
its name. If not found it will raise an
exception.InstanceNotFound exception. On other
errors, it will raise a exception.NovaException
exception.
:returns: a libvirt.Domain object
"""
return self._get_domain_by_name(instance.name)
def get_guest(self, instance):
"""Retrieve libvirt domain object for an instance.
:param instance: an nova.objects.Instance object
:returns: a nova.virt.libvirt.Guest object
"""
return libvirt_guest.Guest(
self.get_domain(instance))
def _get_domain_by_id(self, instance_id):
"""Retrieve libvirt domain object given an instance id.
All libvirt error handling should be handled in this method and
relevant nova exceptions should be raised in response.
"""
try:
conn = self.get_connection()
return conn.lookupByID(instance_id)
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_DOMAIN:
raise exception.InstanceNotFound(instance_id=instance_id)
msg = (_("Error from libvirt while looking up %(instance_id)s: "
"[Error Code %(error_code)s] %(ex)s")
% {'instance_id': instance_id,
'error_code': error_code,
'ex': ex})
raise exception.NovaException(msg)
def _get_domain_by_name(self, instance_name):
"""Retrieve libvirt domain object given an instance name.
All libvirt error handling should be handled in this method and
relevant nova exceptions should be raised in response.
"""
try:
conn = self.get_connection()
return conn.lookupByName(instance_name)
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_DOMAIN:
raise exception.InstanceNotFound(instance_id=instance_name)
msg = (_('Error from libvirt while looking up %(instance_name)s: '
'[Error Code %(error_code)s] %(ex)s') %
{'instance_name': instance_name,
'error_code': error_code,
'ex': ex})
raise exception.NovaException(msg)
def _list_instance_domains_fast(self, only_running=True):
# The modern (>= 0.9.13) fast way - 1 single API call for all domains
flags = libvirt.VIR_CONNECT_LIST_DOMAINS_ACTIVE
if not only_running:
flags = flags | libvirt.VIR_CONNECT_LIST_DOMAINS_INACTIVE
return self.get_connection().listAllDomains(flags)
def _list_instance_domains_slow(self, only_running=True):
# The legacy (< 0.9.13) slow way - O(n) API call for n domains
uuids = []
doms = []
# Redundant numOfDomains check is for libvirt bz #836647
if self.get_connection().numOfDomains() > 0:
for id in self.get_connection().listDomainsID():
try:
dom = self._get_domain_by_id(id)
doms.append(dom)
uuids.append(dom.UUIDString())
except exception.InstanceNotFound:
continue
if only_running:
return doms
for name in self.get_connection().listDefinedDomains():
try:
dom = self._get_domain_by_name(name)
if dom.UUIDString() not in uuids:
doms.append(dom)
except exception.InstanceNotFound:
continue
return doms
def list_instance_domains(self, only_running=True, only_guests=True):
"""Get a list of libvirt.Domain objects for nova instances
:param only_running: True to only return running instances
:param only_guests: True to filter out any host domain (eg Dom-0)
Query libvirt to a get a list of all libvirt.Domain objects
that correspond to nova instances. If the only_running parameter
is true this list will only include active domains, otherwise
inactive domains will be included too. If the only_guests parameter
is true the list will have any "host" domain (aka Xen Domain-0)
filtered out.
:returns: list of libvirt.Domain objects
"""
if not self._skip_list_all_domains:
try:
alldoms = self._list_instance_domains_fast(only_running)
except (libvirt.libvirtError, AttributeError) as ex:
LOG.info(_LI("Unable to use bulk domain list APIs, "
"falling back to slow code path: %(ex)s"),
{'ex': ex})
self._skip_list_all_domains = True
if self._skip_list_all_domains:
# Old libvirt, or a libvirt driver which doesn't
# implement the new API
alldoms = self._list_instance_domains_slow(only_running)
doms = []
for dom in alldoms:
if only_guests and dom.ID() == 0:
continue
doms.append(dom)
return doms
def get_online_cpus(self):
"""Get the set of CPUs that are online on the host
Method is only used by NUMA code paths which check on
libvirt version >= 1.0.4. getCPUMap() was introduced in
libvirt 1.0.0.
:returns: set of online CPUs, raises libvirtError on error
"""
(cpus, cpu_map, online) = self.get_connection().getCPUMap()
online_cpus = set()
for cpu in range(cpus):
if cpu_map[cpu]:
online_cpus.add(cpu)
return online_cpus
def get_capabilities(self):
"""Returns the host capabilities information
Returns an instance of config.LibvirtConfigCaps representing
the capabilities of the host.
Note: The result is cached in the member attribute _caps.
:returns: a config.LibvirtConfigCaps object
"""
if not self._caps:
xmlstr = self.get_connection().getCapabilities()
LOG.info(_LI("Libvirt host capabilities %s"), xmlstr)
self._caps = vconfig.LibvirtConfigCaps()
self._caps.parse_str(xmlstr)
# NOTE(mriedem): Don't attempt to get baseline CPU features
# if libvirt can't determine the host cpu model.
if (hasattr(libvirt, 'VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES')
and self._caps.host.cpu.model is not None):
try:
features = self.get_connection().baselineCPU(
[self._caps.host.cpu.to_xml()],
libvirt.VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES)
# FIXME(wangpan): the return value of baselineCPU should be
# None or xml string, but libvirt has a bug
# of it from 1.1.2 which is fixed in 1.2.0,
# this -1 checking should be removed later.
if features and features != -1:
cpu = vconfig.LibvirtConfigCPU()
cpu.parse_str(features)
self._caps.host.cpu.features = cpu.features
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_SUPPORT:
LOG.warn(_LW("URI %(uri)s does not support full set"
" of host capabilities: %(error)s"),
{'uri': self._uri, 'error': ex})
else:
raise
return self._caps
def get_driver_type(self):
"""Get hypervisor type.
:returns: hypervisor type (ex. qemu)
"""
return self.get_connection().getType()
def get_version(self):
"""Get hypervisor version.
:returns: hypervisor version (ex. 12003)
"""
return self.get_connection().getVersion()
def get_hostname(self):
"""Returns the hostname of the hypervisor."""
hostname = self.get_connection().getHostname()
if self._hostname is None:
self._hostname = hostname
elif hostname != self._hostname:
LOG.error(_LE('Hostname has changed from %(old)s '
'to %(new)s. A restart is required to take effect.'),
{'old': self._hostname,
'new': hostname})
return self._hostname
def find_secret(self, usage_type, usage_id):
"""Find a secret.
usage_type: one of 'iscsi', 'ceph', 'rbd' or 'volume'
usage_id: name of resource in secret
"""
if usage_type == 'iscsi':
usage_type_const = libvirt.VIR_SECRET_USAGE_TYPE_ISCSI
elif usage_type in ('rbd', 'ceph'):
usage_type_const = libvirt.VIR_SECRET_USAGE_TYPE_CEPH
elif usage_type == 'volume':
usage_type_const = libvirt.VIR_SECRET_USAGE_TYPE_VOLUME
else:
msg = _("Invalid usage_type: %s")
raise exception.NovaException(msg % usage_type)
try:
conn = self.get_connection()
return conn.secretLookupByUsage(usage_type_const, usage_id)
except libvirt.libvirtError as e:
if e.get_error_code() == libvirt.VIR_ERR_NO_SECRET:
return None
def create_secret(self, usage_type, usage_id, password=None):
"""Create a secret.
:param usage_type: one of 'iscsi', 'ceph', 'rbd' or 'volume'
'rbd' will be converted to 'ceph'.
:param usage_id: name of resource in secret
:param password: optional secret value to set
"""
secret_conf = vconfig.LibvirtConfigSecret()
secret_conf.ephemeral = False
secret_conf.private = False
secret_conf.usage_id = usage_id
if usage_type in ('rbd', 'ceph'):
secret_conf.usage_type = 'ceph'
elif usage_type == 'iscsi':
secret_conf.usage_type = 'iscsi'
elif usage_type == 'volume':
secret_conf.usage_type = 'volume'
else:
msg = _("Invalid usage_type: %s")
raise exception.NovaException(msg % usage_type)
xml = secret_conf.to_xml()
try:
LOG.debug('Secret XML: %s' % xml)
conn = self.get_connection()
secret = conn.secretDefineXML(xml)
if password is not None:
secret.setValue(password)
return secret
except libvirt.libvirtError:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Error defining a secret with XML: %s') % xml)
def delete_secret(self, usage_type, usage_id):
"""Delete a secret.
usage_type: one of 'iscsi', 'ceph', 'rbd' or 'volume'
usage_id: name of resource in secret
"""
secret = self.find_secret(usage_type, usage_id)
if secret is not None:
secret.undefine()
def _get_hardware_info(self):
"""Returns hardware information about the Node.
Note that the memory size is reported in MiB instead of KiB.
"""
return self.get_connection().getInfo()
def get_cpu_count(self):
"""Returns the total numbers of cpu in the host."""
return self._get_hardware_info()[2]
def get_memory_mb_total(self):
"""Get the total memory size(MB) of physical computer.
:returns: the total amount of memory(MB).
"""
return self._get_hardware_info()[1]
def get_memory_mb_used(self):
"""Get the used memory size(MB) of physical computer.
:returns: the total usage of memory(MB).
"""
if sys.platform.upper() not in ['LINUX2', 'LINUX3']:
return 0
with open('/proc/meminfo') as fp:
m = fp.read().split()
idx1 = m.index('MemFree:')
idx2 = m.index('Buffers:')
idx3 = m.index('Cached:')
if CONF.libvirt.virt_type == 'xen':
used = 0
for dom in self.list_instance_domains(only_guests=False):
try:
# TODO(sahid): we should have method list_guests()
# which returns Guest's objects
guest = libvirt_guest.Guest(dom)
# TODO(sahid): Use get_info...
dom_mem = int(guest._get_domain_info(self)[2])
except libvirt.libvirtError as e:
LOG.warn(_LW("couldn't obtain the memory from domain:"
" %(uuid)s, exception: %(ex)s") %
{"uuid": dom.UUIDString(), "ex": e})
continue
# skip dom0
if dom.ID() != 0:
used += dom_mem
else:
# the mem reported by dom0 is be greater of what
# it is being used
used += (dom_mem -
(int(m[idx1 + 1]) +
int(m[idx2 + 1]) +
int(m[idx3 + 1])))
# Convert it to MB
return used / units.Ki
else:
avail = (int(m[idx1 + 1]) + int(m[idx2 + 1]) + int(m[idx3 + 1]))
# Convert it to MB
return self.get_memory_mb_total() - avail / units.Ki
def get_cpu_stats(self):
"""Returns the current CPU state of the host with frequency."""
stats = self.get_connection().getCPUStats(
libvirt.VIR_NODE_CPU_STATS_ALL_CPUS, 0)
# getInfo() returns various information about the host node
# No. 3 is the expected CPU frequency.
stats["frequency"] = self._get_hardware_info()[3]
return stats
def write_instance_config(self, xml):
"""Defines a domain, but does not start it.
:param xml: XML domain definition of the guest.
:returns: a virDomain instance
"""
return self.get_connection().defineXML(xml)
def device_lookup_by_name(self, name):
"""Lookup a node device by its name.
:returns: a virNodeDevice instance
"""
return self.get_connection().nodeDeviceLookupByName(name)
def list_pci_devices(self, flags=0):
"""Lookup pci devices.
:returns: a list of virNodeDevice instance
"""
return self.get_connection().listDevices("pci", flags)
def compare_cpu(self, xmlDesc, flags=0):
"""Compares the given CPU description with the host CPU."""
return self.get_connection().compareCPU(xmlDesc, flags)
|
GUI.py
|
from tkinter import Tk, Frame, Scrollbar, Label, END, Entry, Text, VERTICAL, Button
import socket
import threading
from tkinter import messagebox
class GUI:
client_socket = None
last_received_message = None
def __init__(self, master):
self.root = master
self.chat_transcript_area = None
self.name_widget = None
self.enter_text_widget = None
self.join_button = None
self.initialize_socket()
self.initialize_gui()
self.listen_for_incoming_messages_in_a_thread()
def initialize_socket(self):
self.client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
remote_ip = '127.0.0.1'
remote_port = 10319
self.client_socket.connect((remote_ip, remote_port))
def initialize_gui(self):
self.root.title("Socket Chat")
self.root.resizable(0, 0)
self.display_chat_box()
self.display_name_section()
self.display_chat_entry_box()
def listen_for_incoming_messages_in_a_thread(self):
thread = threading.Thread(target=self.receive_message_from_server, args=(self.client_socket,))
thread.start()
def receive_message_from_server(self, so):
while True:
buffer = so.recv(256)
if not buffer:
break
message = buffer.decode('utf-8')
# self.chat_transcript_area.insert('end', message + '\n')
# self.chat_transcript_area.yview(END)
if "joined" in message:
user = message.split(":")[1]
message = user + " has joined"
self.chat_transcript_area.insert('end', message + '\n')
self.chat_transcript_area.yview(END)
else:
self.chat_transcript_area.insert('end', message + '\n')
self.chat_transcript_area.yview(END)
so.close()
def display_name_section(self):
frame = Frame()
Label(frame, text='Enter your name:', font=("Helvetica", 16)).pack(side='left', padx=10)
self.name_widget = Entry(frame, width=50, borderwidth=2)
self.name_widget.pack(side='left', anchor='e')
self.join_button = Button(frame, text="Join", width=10, command=self.on_join).pack(side='left')
frame.pack(side='top', anchor='nw')
def display_chat_box(self):
frame = Frame()
Label(frame, text='Chat Box:', font=("Serif", 12)).pack(side='top', anchor='w')
self.chat_transcript_area = Text(frame, width=60, height=10, font=("Serif", 12))
scrollbar = Scrollbar(frame, command=self.chat_transcript_area.yview, orient=VERTICAL)
self.chat_transcript_area.config(yscrollcommand=scrollbar.set)
self.chat_transcript_area.bind('<KeyPress>', lambda e: 'break')
self.chat_transcript_area.pack(side='left', padx=10)
scrollbar.pack(side='right', fill='y')
frame.pack(side='top')
def display_chat_entry_box(self):
frame = Frame()
Label(frame, text='Enter message:', font=("Serif", 12)).pack(side='top', anchor='w')
self.enter_text_widget = Text(frame, width=60, height=3, font=("Serif", 12))
self.enter_text_widget.pack(side='left', pady=15)
self.enter_text_widget.bind('<Return>', self.on_enter_key_pressed)
frame.pack(side='top')
def on_join(self):
if len(self.name_widget.get()) == 0:
messagebox.showerror(
"Enter your name", "Enter your name to send a message")
return
self.name_widget.config(state='disabled')
self.client_socket.send(("joined:" + self.name_widget.get()).encode('utf-8'))
def on_enter_key_pressed(self, event):
if len(self.name_widget.get()) == 0:
messagebox.showerror(
"Enter your name", "Enter your name to send a message")
return
self.send_chat()
self.clear_text()
def clear_text(self):
self.enter_text_widget.delete(1.0, 'end')
def send_chat(self):
senders_name = self.name_widget.get().strip() + ": "
data = self.enter_text_widget.get(1.0, 'end').strip()
message = (senders_name + data).encode('utf-8')
self.chat_transcript_area.insert('end', message.decode('utf-8') + '\n')
self.chat_transcript_area.yview(END)
self.client_socket.send(message)
self.enter_text_widget.delete(1.0, 'end')
return 'break'
def on_close_window(self):
if messagebox.askokcancel("Quit", "Do you want to quit?"):
self.root.destroy()
self.client_socket.close()
exit(0)
if __name__ == '__main__':
root = Tk()
gui = GUI(root)
root.protocol("WM_DELETE_WINDOW", gui.on_close_window)
root.mainloop()
|
gunicorn_worker.py
|
import logging
import logging.config
from multiprocessing import Process
from typing import Union, TYPE_CHECKING
from util.log import logfile_path
if TYPE_CHECKING:
from features import FeatureNameValue
from workers.worker import Worker
class GunicornWorker:
"""
GunicornWorker allows a Quay worker to run as a Gunicorn worker.
The Quay worker is launched as a sub-process.
name: the quay worker this class delegates for.
worker: a quay worker type which implements a .start method.
feature_flag: a boolean value determine if the worker thread should be launched
"""
def __init__(
self, name: str, worker: "Worker", feature_flag: Union[bool, "FeatureNameValue"]
) -> None:
logging.config.fileConfig(logfile_path(debug=False), disable_existing_loggers=False)
self.name = name
self.worker = worker
self.feature_flag = feature_flag
self.logger = logging.getLogger(name)
if self.feature_flag:
self.logger.debug("starting {} thread".format(self.name))
p = Process(target=self.worker.start)
p.start()
def __call__(self, environ, start_response):
raise NotImplementedError()
|
db_tests.py
|
from itertools import permutations
try:
from Queue import Queue
except ImportError:
from queue import Queue
import re
import threading
from peewee import *
from peewee import Database
from peewee import FIELD
from peewee import attrdict
from peewee import sort_models
from .base import BaseTestCase
from .base import DatabaseTestCase
from .base import IS_CRDB
from .base import IS_MYSQL
from .base import IS_POSTGRESQL
from .base import IS_SQLITE
from .base import ModelTestCase
from .base import TestModel
from .base import db
from .base import db_loader
from .base import get_in_memory_db
from .base import requires_models
from .base import requires_postgresql
from .base_models import Category
from .base_models import Tweet
from .base_models import User
class TestDatabase(DatabaseTestCase):
database = db_loader('sqlite3')
def test_pragmas(self):
self.database.cache_size = -2048
self.assertEqual(self.database.cache_size, -2048)
self.database.cache_size = -4096
self.assertEqual(self.database.cache_size, -4096)
self.database.foreign_keys = 'on'
self.assertEqual(self.database.foreign_keys, 1)
self.database.foreign_keys = 'off'
self.assertEqual(self.database.foreign_keys, 0)
def test_timeout_semantics(self):
self.assertEqual(self.database.timeout, 5)
self.assertEqual(self.database.pragma('busy_timeout'), 5000)
self.database.timeout = 2.5
self.assertEqual(self.database.timeout, 2.5)
self.assertEqual(self.database.pragma('busy_timeout'), 2500)
self.database.close()
self.database.connect()
self.assertEqual(self.database.timeout, 2.5)
self.assertEqual(self.database.pragma('busy_timeout'), 2500)
def test_pragmas_deferred(self):
pragmas = (('journal_mode', 'wal'),)
db = SqliteDatabase(None, pragmas=pragmas)
self.assertEqual(db._pragmas, pragmas)
# Test pragmas preserved after initializing.
db.init(':memory:')
self.assertEqual(db._pragmas, pragmas)
db = SqliteDatabase(None)
self.assertEqual(db._pragmas, ())
# Test pragmas are set and subsequently overwritten.
db.init(':memory:', pragmas=pragmas)
self.assertEqual(db._pragmas, pragmas)
db.init(':memory:', pragmas=())
self.assertEqual(db._pragmas, ())
# Test when specified twice, the previous value is overwritten.
db = SqliteDatabase(None, pragmas=pragmas)
db.init(':memory:', pragmas=(('cache_size', -8000),))
self.assertEqual(db._pragmas, (('cache_size', -8000),))
def test_pragmas_as_dict(self):
pragmas = {'journal_mode': 'wal'}
pragma_list = [('journal_mode', 'wal')]
db = SqliteDatabase(':memory:', pragmas=pragmas)
self.assertEqual(db._pragmas, pragma_list)
# Test deferred databases correctly handle pragma dicts.
db = SqliteDatabase(None, pragmas=pragmas)
self.assertEqual(db._pragmas, pragma_list)
db.init(':memory:')
self.assertEqual(db._pragmas, pragma_list)
db.init(':memory:', pragmas={})
self.assertEqual(db._pragmas, [])
def test_pragmas_permanent(self):
db = SqliteDatabase(':memory:')
db.execute_sql('pragma foreign_keys=0')
self.assertEqual(db.foreign_keys, 0)
db.pragma('foreign_keys', 1, True)
self.assertEqual(db.foreign_keys, 1)
db.close()
db.connect()
self.assertEqual(db.foreign_keys, 1)
def test_context_settings(self):
class TestDatabase(Database):
field_types = {'BIGINT': 'TEST_BIGINT', 'TEXT': 'TEST_TEXT'}
operations = {'LIKE': '~', 'NEW': '->>'}
param = '$'
test_db = TestDatabase(None)
state = test_db.get_sql_context().state
self.assertEqual(state.field_types['BIGINT'], 'TEST_BIGINT')
self.assertEqual(state.field_types['TEXT'], 'TEST_TEXT')
self.assertEqual(state.field_types['INT'], FIELD.INT)
self.assertEqual(state.field_types['VARCHAR'], FIELD.VARCHAR)
self.assertEqual(state.operations['LIKE'], '~')
self.assertEqual(state.operations['NEW'], '->>')
self.assertEqual(state.operations['ILIKE'], 'ILIKE')
self.assertEqual(state.param, '$')
self.assertEqual(state.quote, '""')
test_db2 = TestDatabase(None, field_types={'BIGINT': 'XXX_BIGINT',
'INT': 'XXX_INT'})
state = test_db2.get_sql_context().state
self.assertEqual(state.field_types['BIGINT'], 'XXX_BIGINT')
self.assertEqual(state.field_types['TEXT'], 'TEST_TEXT')
self.assertEqual(state.field_types['INT'], 'XXX_INT')
self.assertEqual(state.field_types['VARCHAR'], FIELD.VARCHAR)
def test_connection_state(self):
conn = self.database.connection()
self.assertFalse(self.database.is_closed())
self.database.close()
self.assertTrue(self.database.is_closed())
conn = self.database.connection()
self.assertFalse(self.database.is_closed())
def test_db_context_manager(self):
self.database.close()
self.assertTrue(self.database.is_closed())
with self.database:
self.assertFalse(self.database.is_closed())
self.assertTrue(self.database.is_closed())
self.database.connect()
self.assertFalse(self.database.is_closed())
# Enter context with an already-open db.
with self.database:
self.assertFalse(self.database.is_closed())
# Closed after exit.
self.assertTrue(self.database.is_closed())
def test_connection_initialization(self):
state = {'count': 0}
class TestDatabase(SqliteDatabase):
def _initialize_connection(self, conn):
state['count'] += 1
db = TestDatabase(':memory:')
self.assertEqual(state['count'], 0)
conn = db.connection()
self.assertEqual(state['count'], 1)
# Since already connected, nothing happens here.
conn = db.connection()
self.assertEqual(state['count'], 1)
def test_connect_semantics(self):
state = {'count': 0}
class TestDatabase(SqliteDatabase):
def _initialize_connection(self, conn):
state['count'] += 1
db = TestDatabase(':memory:')
db.connect()
self.assertEqual(state['count'], 1)
self.assertRaises(OperationalError, db.connect)
self.assertEqual(state['count'], 1)
self.assertFalse(db.connect(reuse_if_open=True))
self.assertEqual(state['count'], 1)
with db:
self.assertEqual(state['count'], 1)
self.assertFalse(db.is_closed())
self.assertTrue(db.is_closed())
with db:
self.assertEqual(state['count'], 2)
def test_execute_sql(self):
self.database.execute_sql('CREATE TABLE register (val INTEGER);')
self.database.execute_sql('INSERT INTO register (val) VALUES (?), (?)',
(1337, 31337))
cursor = self.database.execute_sql(
'SELECT val FROM register ORDER BY val')
self.assertEqual(cursor.fetchall(), [(1337,), (31337,)])
self.database.execute_sql('DROP TABLE register;')
def test_bind_helpers(self):
db = get_in_memory_db()
alt_db = get_in_memory_db()
class Base(Model):
class Meta:
database = db
class A(Base):
a = TextField()
class B(Base):
b = TextField()
db.create_tables([A, B])
# Temporarily bind A to alt_db.
with alt_db.bind_ctx([A]):
self.assertFalse(A.table_exists())
self.assertTrue(B.table_exists())
self.assertTrue(A.table_exists())
self.assertTrue(B.table_exists())
alt_db.bind([A])
self.assertFalse(A.table_exists())
self.assertTrue(B.table_exists())
db.close()
alt_db.close()
def test_bind_regression(self):
class Base(Model):
class Meta:
database = None
class A(Base): pass
class B(Base): pass
class AB(Base):
a = ForeignKeyField(A)
b = ForeignKeyField(B)
self.assertTrue(A._meta.database is None)
db = get_in_memory_db()
with db.bind_ctx([A, B]):
self.assertEqual(A._meta.database, db)
self.assertEqual(B._meta.database, db)
self.assertEqual(AB._meta.database, db)
self.assertTrue(A._meta.database is None)
self.assertTrue(B._meta.database is None)
self.assertTrue(AB._meta.database is None)
class C(Base):
a = ForeignKeyField(A)
with db.bind_ctx([C], bind_refs=False):
self.assertEqual(C._meta.database, db)
self.assertTrue(A._meta.database is None)
self.assertTrue(C._meta.database is None)
self.assertTrue(A._meta.database is None)
def test_batch_commit(self):
class PatchCommitDatabase(SqliteDatabase):
commits = 0
def begin(self): pass
def commit(self):
self.commits += 1
db = PatchCommitDatabase(':memory:')
def assertBatches(n_objs, batch_size, n_commits):
accum = []
source = range(n_objs)
db.commits = 0
for item in db.batch_commit(source, batch_size):
accum.append(item)
self.assertEqual(accum, list(range(n_objs)))
self.assertEqual(db.commits, n_commits)
assertBatches(12, 1, 12)
assertBatches(12, 2, 6)
assertBatches(12, 3, 4)
assertBatches(12, 4, 3)
assertBatches(12, 5, 3)
assertBatches(12, 6, 2)
assertBatches(12, 7, 2)
assertBatches(12, 11, 2)
assertBatches(12, 12, 1)
assertBatches(12, 13, 1)
def test_server_version(self):
class FakeDatabase(Database):
server_version = None
def _connect(self):
return 1
def _close(self, conn):
pass
def _set_server_version(self, conn):
self.server_version = (1, 33, 7)
db = FakeDatabase(':memory:')
self.assertTrue(db.server_version is None)
db.connect()
self.assertEqual(db.server_version, (1, 33, 7))
db.close()
self.assertEqual(db.server_version, (1, 33, 7))
db.server_version = (1, 2, 3)
db.connect()
self.assertEqual(db.server_version, (1, 2, 3))
db.close()
def test_explicit_connect(self):
db = get_in_memory_db(autoconnect=False)
self.assertRaises(InterfaceError, db.execute_sql, 'pragma cache_size')
with db:
db.execute_sql('pragma cache_size')
self.assertRaises(InterfaceError, db.cursor)
class TestThreadSafety(ModelTestCase):
nthreads = 4
nrows = 10
requires = [User]
def test_multiple_writers(self):
def create_users(idx):
for i in range(idx * self.nrows, (idx + 1) * self.nrows):
User.create(username='u%d' % i)
threads = []
for i in range(self.nthreads):
threads.append(threading.Thread(target=create_users, args=(i,)))
for t in threads: t.start()
for t in threads: t.join()
self.assertEqual(User.select().count(), self.nrows * self.nthreads)
def test_multiple_readers(self):
data = Queue()
def read_user_count(n):
for i in range(n):
data.put(User.select().count())
threads = []
for i in range(self.nthreads):
threads.append(threading.Thread(target=read_user_count,
args=(self.nrows,)))
for t in threads: t.start()
for t in threads: t.join()
self.assertEqual(data.qsize(), self.nrows * self.nthreads)
class TestDeferredDatabase(BaseTestCase):
def test_deferred_database(self):
deferred_db = SqliteDatabase(None)
self.assertTrue(deferred_db.deferred)
class DeferredModel(Model):
class Meta:
database = deferred_db
self.assertRaises(Exception, deferred_db.connect)
query = DeferredModel.select()
self.assertRaises(Exception, query.execute)
deferred_db.init(':memory:')
self.assertFalse(deferred_db.deferred)
conn = deferred_db.connect()
self.assertFalse(deferred_db.is_closed())
DeferredModel._schema.create_all()
self.assertEqual(list(DeferredModel.select()), [])
deferred_db.init(None)
self.assertTrue(deferred_db.deferred)
# The connection was automatically closed.
self.assertTrue(deferred_db.is_closed())
class CatToy(TestModel):
description = TextField()
class Meta:
schema = 'huey'
@requires_postgresql
class TestSchemaNamespace(ModelTestCase):
requires = [CatToy]
def setUp(self):
with self.database:
self.execute('CREATE SCHEMA huey;')
super(TestSchemaNamespace, self).setUp()
def tearDown(self):
super(TestSchemaNamespace, self).tearDown()
with self.database:
self.execute('DROP SCHEMA huey;')
def test_schema(self):
toy = CatToy.create(description='fur mouse')
toy_db = CatToy.select().where(CatToy.id == toy.id).get()
self.assertEqual(toy.id, toy_db.id)
self.assertEqual(toy.description, toy_db.description)
class TestSqliteIsolation(ModelTestCase):
database = db_loader('sqlite3')
requires = [User]
def test_sqlite_isolation(self):
for username in ('u1', 'u2', 'u3'): User.create(username=username)
new_db = db_loader('sqlite3')
curs = new_db.execute_sql('SELECT COUNT(*) FROM users')
self.assertEqual(curs.fetchone()[0], 3)
self.assertEqual(User.select().count(), 3)
self.assertEqual(User.delete().execute(), 3)
with self.database.atomic():
User.create(username='u4')
User.create(username='u5')
# Second conn does not see the changes.
curs = new_db.execute_sql('SELECT COUNT(*) FROM users')
self.assertEqual(curs.fetchone()[0], 0)
# Third conn does not see the changes.
new_db2 = db_loader('sqlite3')
curs = new_db2.execute_sql('SELECT COUNT(*) FROM users')
self.assertEqual(curs.fetchone()[0], 0)
# Original connection sees its own changes.
self.assertEqual(User.select().count(), 2)
curs = new_db.execute_sql('SELECT COUNT(*) FROM users')
self.assertEqual(curs.fetchone()[0], 2)
class UniqueModel(TestModel):
name = CharField(unique=True)
class IndexedModel(TestModel):
first = CharField()
last = CharField()
dob = DateField()
class Meta:
indexes = (
(('first', 'last', 'dob'), True),
(('first', 'last'), False),
)
class Note(TestModel):
content = TextField()
ts = DateTimeField()
status = IntegerField()
class Meta:
table_name = 'notes'
class Person(TestModel):
first = CharField()
last = CharField()
email = CharField()
class Meta:
indexes = (
(('last', 'first'), False),
)
class TestIntrospection(ModelTestCase):
requires = [Category, User, UniqueModel, IndexedModel, Person]
def test_table_exists(self):
self.assertTrue(self.database.table_exists(User._meta.table_name))
self.assertFalse(self.database.table_exists('nuggies'))
def test_get_tables(self):
tables = self.database.get_tables()
required = set(m._meta.table_name for m in self.requires)
self.assertTrue(required.issubset(set(tables)))
UniqueModel._schema.drop_all()
tables = self.database.get_tables()
self.assertFalse(UniqueModel._meta.table_name in tables)
def test_get_indexes(self):
indexes = self.database.get_indexes('unique_model')
data = [(index.name, index.columns, index.unique, index.table)
for index in indexes
if index.name not in ('unique_model_pkey', 'PRIMARY')]
self.assertEqual(data, [
('unique_model_name', ['name'], True, 'unique_model')])
indexes = self.database.get_indexes('indexed_model')
data = [(index.name, index.columns, index.unique, index.table)
for index in indexes
if index.name not in ('indexed_model_pkey', 'PRIMARY')]
self.assertEqual(sorted(data), [
('indexed_model_first_last', ['first', 'last'], False,
'indexed_model'),
('indexed_model_first_last_dob', ['first', 'last', 'dob'], True,
'indexed_model')])
# Multi-column index where columns are in different order than declared
# on the table.
indexes = self.database.get_indexes('person')
data = [(index.name, index.columns, index.unique)
for index in indexes
if index.name not in ('person_pkey', 'PRIMARY')]
self.assertEqual(data, [
('person_last_first', ['last', 'first'], False)])
def test_get_columns(self):
columns = self.database.get_columns('indexed_model')
data = [(c.name, c.null, c.primary_key, c.table)
for c in columns]
self.assertEqual(data, [
('id', False, True, 'indexed_model'),
('first', False, False, 'indexed_model'),
('last', False, False, 'indexed_model'),
('dob', False, False, 'indexed_model')])
columns = self.database.get_columns('category')
data = [(c.name, c.null, c.primary_key, c.table)
for c in columns]
self.assertEqual(data, [
('name', False, True, 'category'),
('parent_id', True, False, 'category')])
def test_get_primary_keys(self):
primary_keys = self.database.get_primary_keys('users')
self.assertEqual(primary_keys, ['id'])
primary_keys = self.database.get_primary_keys('category')
self.assertEqual(primary_keys, ['name'])
@requires_models(Note)
def test_get_views(self):
def normalize_view_meta(view_meta):
sql_ws_norm = re.sub(r'[\n\s]+', ' ', view_meta.sql.strip('; '))
return view_meta.name, (sql_ws_norm
.replace('`peewee_test`.', '')
.replace('`notes`.', '')
.replace('`', ''))
def assertViews(expected):
# Create two sample views.
self.database.execute_sql('CREATE VIEW notes_public AS '
'SELECT content, ts FROM notes '
'WHERE status = 1 ORDER BY ts DESC')
self.database.execute_sql('CREATE VIEW notes_deleted AS '
'SELECT content FROM notes '
'WHERE status = 9 ORDER BY id DESC')
try:
views = self.database.get_views()
normalized = sorted([normalize_view_meta(v) for v in views])
self.assertEqual(normalized, expected)
# Ensure that we can use get_columns to introspect views.
columns = self.database.get_columns('notes_deleted')
self.assertEqual([c.name for c in columns], ['content'])
columns = self.database.get_columns('notes_public')
self.assertEqual([c.name for c in columns], ['content', 'ts'])
finally:
self.database.execute_sql('DROP VIEW notes_public;')
self.database.execute_sql('DROP VIEW notes_deleted;')
# Unfortunately, all databases seem to represent VIEW definitions
# differently internally.
if IS_SQLITE:
assertViews([
('notes_deleted', ('CREATE VIEW notes_deleted AS '
'SELECT content FROM notes '
'WHERE status = 9 ORDER BY id DESC')),
('notes_public', ('CREATE VIEW notes_public AS '
'SELECT content, ts FROM notes '
'WHERE status = 1 ORDER BY ts DESC'))])
elif IS_MYSQL:
assertViews([
('notes_deleted',
('select content AS content from notes '
'where status = 9 order by id desc')),
('notes_public',
('select content AS content,ts AS ts from notes '
'where status = 1 order by ts desc'))])
elif IS_POSTGRESQL:
assertViews([
('notes_deleted',
('SELECT notes.content FROM notes '
'WHERE (notes.status = 9) ORDER BY notes.id DESC')),
('notes_public',
('SELECT notes.content, notes.ts FROM notes '
'WHERE (notes.status = 1) ORDER BY notes.ts DESC'))])
elif IS_CRDB:
assertViews([
('notes_deleted',
('SELECT content FROM peewee_test.public.notes '
'WHERE status = 9 ORDER BY id DESC')),
('notes_public',
('SELECT content, ts FROM peewee_test.public.notes '
'WHERE status = 1 ORDER BY ts DESC'))])
@requires_models(User, Tweet, Category)
def test_get_foreign_keys(self):
foreign_keys = self.database.get_foreign_keys('tweet')
data = [(fk.column, fk.dest_table, fk.dest_column, fk.table)
for fk in foreign_keys]
self.assertEqual(data, [
('user_id', 'users', 'id', 'tweet')])
foreign_keys = self.database.get_foreign_keys('category')
data = [(fk.column, fk.dest_table, fk.dest_column, fk.table)
for fk in foreign_keys]
self.assertEqual(data, [
('parent_id', 'category', 'name', 'category')])
class TestSortModels(BaseTestCase):
def test_sort_models(self):
class A(Model):
pass
class B(Model):
a = ForeignKeyField(A)
class C(Model):
b = ForeignKeyField(B)
class D(Model):
c = ForeignKeyField(C)
class E(Model):
pass
models = [A, B, C, D, E]
for list_of_models in permutations(models):
sorted_models = sort_models(list_of_models)
self.assertEqual(sorted_models, models)
class TestDBProxy(BaseTestCase):
def test_proxy_context_manager(self):
db = Proxy()
class User(Model):
username = TextField()
class Meta:
database = db
self.assertRaises(AttributeError, User.create_table)
sqlite_db = SqliteDatabase(':memory:')
db.initialize(sqlite_db)
User.create_table()
with db:
self.assertFalse(db.is_closed())
self.assertTrue(db.is_closed())
def test_db_proxy(self):
db = Proxy()
class BaseModel(Model):
class Meta:
database = db
class User(BaseModel):
username = TextField()
class Tweet(BaseModel):
user = ForeignKeyField(User, backref='tweets')
message = TextField()
sqlite_db = SqliteDatabase(':memory:')
db.initialize(sqlite_db)
self.assertEqual(User._meta.database.database, ':memory:')
self.assertEqual(Tweet._meta.database.database, ':memory:')
self.assertTrue(User._meta.database.is_closed())
self.assertTrue(Tweet._meta.database.is_closed())
sqlite_db.connect()
self.assertFalse(User._meta.database.is_closed())
self.assertFalse(Tweet._meta.database.is_closed())
sqlite_db.close()
def test_proxy_decorator(self):
db = DatabaseProxy()
@db.connection_context()
def with_connection():
self.assertFalse(db.is_closed())
@db.atomic()
def with_transaction():
self.assertTrue(db.in_transaction())
@db.manual_commit()
def with_manual_commit():
self.assertTrue(db.in_transaction())
db.initialize(SqliteDatabase(':memory:'))
with_connection()
self.assertTrue(db.is_closed())
with_transaction()
self.assertFalse(db.in_transaction())
with_manual_commit()
self.assertFalse(db.in_transaction())
class Data(TestModel):
key = TextField()
value = TextField()
class Meta:
schema = 'main'
class TestAttachDatabase(ModelTestCase):
database = db_loader('sqlite3')
requires = [Data]
def test_attach(self):
database = self.database
Data.create(key='k1', value='v1')
Data.create(key='k2', value='v2')
# Attach an in-memory cache database.
database.attach(':memory:', 'cache')
# Clone data into the in-memory cache.
class CacheData(Data):
class Meta:
schema = 'cache'
self.assertFalse(CacheData.table_exists())
CacheData.create_table(safe=False)
self.assertTrue(CacheData.table_exists())
(CacheData
.insert_from(Data.select(), fields=[Data.id, Data.key, Data.value])
.execute())
# Update the source data.
query = Data.update({Data.value: Data.value + '-x'})
self.assertEqual(query.execute(), 2)
# Verify the source data was updated.
query = Data.select(Data.key, Data.value).order_by(Data.key)
self.assertSQL(query, (
'SELECT "t1"."key", "t1"."value" '
'FROM "main"."data" AS "t1" '
'ORDER BY "t1"."key"'), [])
self.assertEqual([v for k, v in query.tuples()], ['v1-x', 'v2-x'])
# Verify the cached data reflects the original data, pre-update.
query = (CacheData
.select(CacheData.key, CacheData.value)
.order_by(CacheData.key))
self.assertSQL(query, (
'SELECT "t1"."key", "t1"."value" '
'FROM "cache"."cache_data" AS "t1" '
'ORDER BY "t1"."key"'), [])
self.assertEqual([v for k, v in query.tuples()], ['v1', 'v2'])
database.close()
# On re-connecting, the in-memory database will re-attached.
database.connect()
# Cache-Data table does not exist.
self.assertFalse(CacheData.table_exists())
# Double-check the sqlite master table.
curs = database.execute_sql('select * from cache.sqlite_master;')
self.assertEqual(curs.fetchall(), [])
# Because it's in-memory, the table needs to be re-created.
CacheData.create_table(safe=False)
self.assertEqual(CacheData.select().count(), 0)
# Original data is still there.
self.assertEqual(Data.select().count(), 2)
def test_attach_detach(self):
database = self.database
Data.create(key='k1', value='v1')
Data.create(key='k2', value='v2')
# Attach an in-memory cache database.
database.attach(':memory:', 'cache')
curs = database.execute_sql('select * from cache.sqlite_master')
self.assertEqual(curs.fetchall(), [])
self.assertFalse(database.attach(':memory:', 'cache'))
self.assertRaises(OperationalError, database.attach, 'foo.db', 'cache')
self.assertTrue(database.detach('cache'))
self.assertFalse(database.detach('cache'))
self.assertRaises(OperationalError, database.execute_sql,
'select * from cache.sqlite_master')
def test_sqlite_schema_support(self):
class CacheData(Data):
class Meta:
schema = 'cache'
# Attach an in-memory cache database and create the cache table.
self.database.attach(':memory:', 'cache')
CacheData.create_table()
tables = self.database.get_tables()
self.assertEqual(tables, ['data'])
tables = self.database.get_tables(schema='cache')
self.assertEqual(tables, ['cache_data'])
class TestDatabaseConnection(DatabaseTestCase):
def test_is_connection_usable(self):
# Ensure a connection is open.
conn = self.database.connection()
self.assertTrue(self.database.is_connection_usable())
self.database.close()
self.assertFalse(self.database.is_connection_usable())
self.database.connect()
self.assertTrue(self.database.is_connection_usable())
@requires_postgresql
def test_is_connection_usable_pg(self):
self.database.execute_sql('drop table if exists foo')
self.database.execute_sql('create table foo (data text not null)')
self.assertTrue(self.database.is_connection_usable())
with self.assertRaises(IntegrityError):
self.database.execute_sql('insert into foo (data) values (NULL)')
self.assertFalse(self.database.is_closed())
self.assertFalse(self.database.is_connection_usable())
self.database.rollback()
self.assertTrue(self.database.is_connection_usable())
curs = self.database.execute_sql('select * from foo')
self.assertEqual(list(curs), [])
self.database.execute_sql('drop table foo')
class TestExceptionWrapper(ModelTestCase):
database = get_in_memory_db()
requires = [User]
def test_exception_wrapper(self):
exc = None
try:
User.create(username=None)
except IntegrityError as e:
exc = e
if exc is None: raise Exception('expected integrity error not raised')
self.assertTrue(exc.orig.__module__ != 'peewee')
|
sendmail.py
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
import time
from multiprocessing import Process
import email
from email import encoders
from email.header import Header
from email.mime.text import MIMEText
from email.utils import parseaddr, formataddr
import smtplib
from django.conf import settings
class MailSender(object):
def __init__(self):
try:
self.MAIL_REVIEW_SMTP_SERVER = getattr(settings, 'MAIL_REVIEW_SMTP_SERVER')
self.MAIL_REVIEW_SMTP_PORT = int(getattr(settings, 'MAIL_REVIEW_SMTP_PORT'))
self.MAIL_REVIEW_FROM_ADDR = getattr(settings, 'MAIL_REVIEW_FROM_ADDR')
self.MAIL_REVIEW_FROM_PASSWORD = getattr(settings, 'MAIL_REVIEW_FROM_PASSWORD')
except AttributeError as a:
print("Error: %s" % a)
except ValueError as v:
print("Error: %s" % v)
def _format_addr(self, s):
name, addr = parseaddr(s)
return formataddr((Header(name, 'utf-8').encode(), addr))
def _send(self, strTitle, strContent, listToAddr):
msg = MIMEText(strContent, 'plain', 'utf-8')
# 收件人地址:
msg['From'] = self._format_addr(self.MAIL_REVIEW_FROM_ADDR)
#msg['To'] = self._format_addr(listToAddr)
msg['To'] = ','.join(listToAddr)
msg['Subject'] = Header(strTitle, "utf-8").encode()
server = smtplib.SMTP(self.MAIL_REVIEW_SMTP_SERVER, self.MAIL_REVIEW_SMTP_PORT) # SMTP协议默认端口是25
#server.set_debuglevel(1)
#如果提供的密码为空,则不需要登录SMTP server
if self.MAIL_REVIEW_FROM_PASSWORD != '':
server.login(self.MAIL_REVIEW_FROM_ADDR, self.MAIL_REVIEW_FROM_PASSWORD)
sendResult = server.sendmail(self.MAIL_REVIEW_FROM_ADDR, listToAddr, msg.as_string())
server.quit()
#调用方应该调用此方法,采用子进程方式异步阻塞地发送邮件,避免邮件服务挂掉影响archer主服务
def sendEmail(self, strTitle, strContent, listToAddr):
p = Process(target=self._send, args=(strTitle, strContent, listToAddr))
p.start()
|
test_browser.py
|
# coding=utf-8
# Copyright 2013 The Emscripten Authors. All rights reserved.
# Emscripten is available under two separate licenses, the MIT license and the
# University of Illinois/NCSA Open Source License. Both these licenses can be
# found in the LICENSE file.
import argparse
import json
import multiprocessing
import os
import random
import re
import shlex
import shutil
import subprocess
import time
import unittest
import webbrowser
import zlib
from http.server import BaseHTTPRequestHandler, HTTPServer
from urllib.request import urlopen
from runner import BrowserCore, path_from_root, has_browser, EMTEST_BROWSER
from runner import no_wasm_backend, create_test_file, parameterized, ensure_dir
from tools import building
from tools import shared
from tools import system_libs
from tools.shared import PYTHON, EMCC, WINDOWS, FILE_PACKAGER, PIPE, V8_ENGINE
from tools.shared import try_delete
def test_chunked_synchronous_xhr_server(support_byte_ranges, chunkSize, data, checksum, port):
class ChunkedServerHandler(BaseHTTPRequestHandler):
def sendheaders(s, extra=[], length=len(data)):
s.send_response(200)
s.send_header("Content-Length", str(length))
s.send_header("Access-Control-Allow-Origin", "http://localhost:%s" % port)
s.send_header('Cross-Origin-Resource-Policy', 'cross-origin')
s.send_header('Cache-Control', 'no-cache, no-store, must-revalidate')
s.send_header("Access-Control-Expose-Headers", "Content-Length, Accept-Ranges")
s.send_header("Content-type", "application/octet-stream")
if support_byte_ranges:
s.send_header("Accept-Ranges", "bytes")
for i in extra:
s.send_header(i[0], i[1])
s.end_headers()
def do_HEAD(s):
s.sendheaders()
def do_OPTIONS(s):
s.sendheaders([("Access-Control-Allow-Headers", "Range")], 0)
def do_GET(s):
if s.path == '/':
s.sendheaders()
elif not support_byte_ranges:
s.sendheaders()
s.wfile.write(data)
else:
start, end = s.headers.get("range").split("=")[1].split("-")
start = int(start)
end = int(end)
end = min(len(data) - 1, end)
length = end - start + 1
s.sendheaders([], length)
s.wfile.write(data[start:end + 1])
# CORS preflight makes OPTIONS requests which we need to account for.
expectedConns = 22
httpd = HTTPServer(('localhost', 11111), ChunkedServerHandler)
for i in range(expectedConns + 1):
httpd.handle_request()
def shell_with_script(shell_file, output_file, replacement):
with open(path_from_root('src', shell_file)) as input:
with open(output_file, 'w') as output:
output.write(input.read().replace('{{{ SCRIPT }}}', replacement))
def is_chrome():
return EMTEST_BROWSER and 'chrom' in EMTEST_BROWSER.lower()
def no_chrome(note='chrome is not supported'):
if is_chrome():
return unittest.skip(note)
return lambda f: f
def is_firefox():
return EMTEST_BROWSER and 'firefox' in EMTEST_BROWSER.lower()
def no_firefox(note='firefox is not supported'):
if is_firefox():
return unittest.skip(note)
return lambda f: f
def no_swiftshader(f):
def decorated(self):
if is_chrome() and '--use-gl=swiftshader' in EMTEST_BROWSER:
self.skipTest('not compatible with swiftshader')
return f(self)
return decorated
def requires_threads(f):
def decorated(self, *args, **kwargs):
if os.environ.get('EMTEST_LACKS_THREAD_SUPPORT'):
self.skipTest('EMTEST_LACKS_THREAD_SUPPORT is set')
return f(self, *args, **kwargs)
return decorated
def requires_asmfs(f):
def decorated(self, *args, **kwargs):
# https://github.com/emscripten-core/emscripten/issues/9534
self.skipTest('ASMFS is looking for a maintainer')
return f(self, *args, **kwargs)
return decorated
requires_graphics_hardware = unittest.skipIf(os.getenv('EMTEST_LACKS_GRAPHICS_HARDWARE'), "This test requires graphics hardware")
requires_sound_hardware = unittest.skipIf(os.getenv('EMTEST_LACKS_SOUND_HARDWARE'), "This test requires sound hardware")
requires_sync_compilation = unittest.skipIf(is_chrome(), "This test requires synchronous compilation, which does not work in Chrome (except for tiny wasms)")
requires_offscreen_canvas = unittest.skipIf(os.getenv('EMTEST_LACKS_OFFSCREEN_CANVAS'), "This test requires a browser with OffscreenCanvas")
class browser(BrowserCore):
@classmethod
def setUpClass(cls):
super(browser, cls).setUpClass()
cls.browser_timeout = 60
print()
print('Running the browser tests. Make sure the browser allows popups from localhost.')
print()
def test_sdl1_in_emscripten_nonstrict_mode(self):
if 'EMCC_STRICT' in os.environ and int(os.environ['EMCC_STRICT']):
self.skipTest('This test requires being run in non-strict mode (EMCC_STRICT env. variable unset)')
# TODO: This test is verifying behavior that will be deprecated at some point in the future, remove this test once
# system JS libraries are no longer automatically linked to anymore.
self.btest('hello_world_sdl.cpp', reference='htmltest.png')
def test_sdl1(self):
self.btest('hello_world_sdl.cpp', reference='htmltest.png', args=['-lSDL', '-lGL'])
self.btest('hello_world_sdl.cpp', reference='htmltest.png', args=['-s', 'USE_SDL=1', '-lGL']) # is the default anyhow
# Deliberately named as test_zzz_* to make this test the last one
# as this test may take the focus away from the main test window
# by opening a new window and possibly not closing it.
def test_zzz_html_source_map(self):
if not has_browser():
self.skipTest('need a browser')
cpp_file = 'src.cpp'
html_file = 'src.html'
# browsers will try to 'guess' the corresponding original line if a
# generated line is unmapped, so if we want to make sure that our
# numbering is correct, we need to provide a couple of 'possible wrong
# answers'. thus, we add some printf calls so that the cpp file gets
# multiple mapped lines. in other words, if the program consists of a
# single 'throw' statement, browsers may just map any thrown exception to
# that line, because it will be the only mapped line.
with open(cpp_file, 'w') as f:
f.write(r'''
#include <cstdio>
int main() {
printf("Starting test\n");
try {
throw 42; // line 8
} catch (int e) { }
printf("done\n");
return 0;
}
''')
# use relative paths when calling emcc, because file:// URIs can only load
# sourceContent when the maps are relative paths
try_delete(html_file)
try_delete(html_file + '.map')
self.compile_btest(['src.cpp', '-o', 'src.html', '-g4'])
self.assertExists(html_file)
self.assertExists('src.wasm.map')
webbrowser.open_new('file://' + html_file)
print('''
If manually bisecting:
Check that you see src.cpp among the page sources.
Even better, add a breakpoint, e.g. on the printf, then reload, then step
through and see the print (best to run with EMTEST_SAVE_DIR=1 for the reload).
''')
def test_emscripten_log(self):
create_test_file('src.cpp', self.with_report_result(open(path_from_root('tests', 'emscripten_log', 'emscripten_log.cpp')).read()))
self.compile_btest(['src.cpp', '--pre-js', path_from_root('src', 'emscripten-source-map.min.js'), '-g4', '-o', 'page.html'])
self.run_browser('page.html', None, '/report_result?1')
def test_preload_file(self):
absolute_src_path = os.path.join(self.get_dir(), 'somefile.txt').replace('\\', '/')
open(absolute_src_path, 'w').write('''load me right before running the code please''')
absolute_src_path2 = os.path.join(self.get_dir(), '.somefile.txt').replace('\\', '/')
open(absolute_src_path2, 'w').write('''load me right before running the code please''')
absolute_src_path3 = os.path.join(self.get_dir(), '[email protected]').replace('\\', '/')
open(absolute_src_path3, 'w').write('''load me right before running the code please''')
def make_main(path):
print('make main at', path)
path = path.replace('\\', '\\\\').replace('"', '\\"') # Escape tricky path name for use inside a C string.
create_test_file('main.cpp', self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = !strcmp("load me right before", buf);
REPORT_RESULT(result);
return 0;
}
''' % path))
test_cases = [
# (source preload-file string, file on target FS to load)
("somefile.txt", "somefile.txt"),
("[email protected]", "somefile.txt"),
("./somefile.txt", "somefile.txt"),
("[email protected]", "file.txt"),
("./[email protected]", "file.txt"),
("./somefile.txt@./file.txt", "file.txt"),
("somefile.txt@/file.txt", "file.txt"),
("somefile.txt@/", "somefile.txt"),
(absolute_src_path + "@file.txt", "file.txt"),
(absolute_src_path + "@/file.txt", "file.txt"),
(absolute_src_path + "@/", "somefile.txt"),
("somefile.txt@/directory/file.txt", "/directory/file.txt"),
("somefile.txt@/directory/file.txt", "directory/file.txt"),
(absolute_src_path + "@/directory/file.txt", "directory/file.txt"),
("some@@[email protected]", "other.txt"),
("some@@file.txt@some@@otherfile.txt", "[email protected]")]
for srcpath, dstpath in test_cases:
print('Testing', srcpath, dstpath)
make_main(dstpath)
self.compile_btest(['main.cpp', '--preload-file', srcpath, '-o', 'page.html'])
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
if WINDOWS:
# On Windows, the following non-alphanumeric non-control code ASCII characters are supported.
# The characters <, >, ", |, ?, * are not allowed, because the Windows filesystem doesn't support those.
tricky_filename = '!#$%&\'()+,-. ;=@[]^_`{}~.txt'
else:
# All 7-bit non-alphanumeric non-control code ASCII characters except /, : and \ are allowed.
tricky_filename = '!#$%&\'()+,-. ;=@[]^_`{}~ "*<>?|.txt'
open(os.path.join(self.get_dir(), tricky_filename), 'w').write('''load me right before running the code please''')
make_main(tricky_filename)
# As an Emscripten-specific feature, the character '@' must be escaped in the form '@@' to not confuse with the 'src@dst' notation.
self.compile_btest(['main.cpp', '--preload-file', tricky_filename.replace('@', '@@'), '-o', 'page.html'])
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
# By absolute path
make_main('somefile.txt') # absolute becomes relative
self.compile_btest(['main.cpp', '--preload-file', absolute_src_path, '-o', 'page.html'])
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
# Test subdirectory handling with asset packaging.
try_delete('assets')
ensure_dir('assets/sub/asset1/'.replace('\\', '/'))
ensure_dir('assets/sub/asset1/.git'.replace('\\', '/')) # Test adding directory that shouldn't exist.
ensure_dir('assets/sub/asset2/'.replace('\\', '/'))
create_test_file('assets/sub/asset1/file1.txt', '''load me right before running the code please''')
create_test_file('assets/sub/asset1/.git/shouldnt_be_embedded.txt', '''this file should not get embedded''')
create_test_file('assets/sub/asset2/file2.txt', '''load me right before running the code please''')
absolute_assets_src_path = 'assets'.replace('\\', '/')
def make_main_two_files(path1, path2, nonexistingpath):
create_test_file('main.cpp', self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = !strcmp("load me right before", buf);
f = fopen("%s", "r");
if (f == NULL)
result = 0;
fclose(f);
f = fopen("%s", "r");
if (f != NULL)
result = 0;
REPORT_RESULT(result);
return 0;
}
''' % (path1, path2, nonexistingpath)))
test_cases = [
# (source directory to embed, file1 on target FS to load, file2 on target FS to load, name of a file that *shouldn't* exist on VFS)
("assets", "assets/sub/asset1/file1.txt", "assets/sub/asset2/file2.txt", "assets/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets/", "assets/sub/asset1/file1.txt", "assets/sub/asset2/file2.txt", "assets/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets@/", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets/@/", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets@./", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
(absolute_assets_src_path + "@/", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
(absolute_assets_src_path + "@/assets", "/assets/sub/asset1/file1.txt", "/assets/sub/asset2/file2.txt", "assets/sub/asset1/.git/shouldnt_be_embedded.txt")]
for test in test_cases:
(srcpath, dstpath1, dstpath2, nonexistingpath) = test
make_main_two_files(dstpath1, dstpath2, nonexistingpath)
print(srcpath)
self.compile_btest(['main.cpp', '--preload-file', srcpath, '--exclude-file', '*/.*', '-o', 'page.html'])
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
# Should still work with -o subdir/..
make_main('somefile.txt') # absolute becomes relative
ensure_dir('dirrey')
self.compile_btest(['main.cpp', '--preload-file', absolute_src_path, '-o', 'dirrey/page.html'])
self.run_browser('dirrey/page.html', 'You should see |load me right before|.', '/report_result?1')
# With FS.preloadFile
create_test_file('pre.js', '''
Module.preRun = function() {
FS.createPreloadedFile('/', 'someotherfile.txt', 'somefile.txt', true, false); // we need --use-preload-plugins for this.
};
''')
make_main('someotherfile.txt')
self.compile_btest(['main.cpp', '--pre-js', 'pre.js', '-o', 'page.html', '--use-preload-plugins'])
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
# Tests that user .html shell files can manually download .data files created with --preload-file cmdline.
def test_preload_file_with_manual_data_download(self):
create_test_file('src.cpp', self.with_report_result(open(os.path.join(path_from_root('tests/manual_download_data.cpp'))).read()))
create_test_file('file.txt', '''Hello!''')
self.compile_btest(['src.cpp', '-o', 'manual_download_data.js', '--preload-file', 'file.txt@/file.txt'])
shutil.copyfile(path_from_root('tests', 'manual_download_data.html'), 'manual_download_data.html')
self.run_browser('manual_download_data.html', 'Hello!', '/report_result?1')
# Tests that if the output files have single or double quotes in them, that it will be handled by correctly escaping the names.
def test_output_file_escaping(self):
tricky_part = '\'' if WINDOWS else '\' and \"' # On Windows, files/directories may not contain a double quote character. On non-Windowses they can, so test that.
d = 'dir with ' + tricky_part
abs_d = os.path.join(self.get_dir(), d)
ensure_dir(abs_d)
txt = 'file with ' + tricky_part + '.txt'
abs_txt = os.path.join(abs_d, txt)
open(abs_txt, 'w').write('load me right before')
cpp = os.path.join(d, 'file with ' + tricky_part + '.cpp')
open(cpp, 'w').write(self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = !strcmp("|load me right before|", buf);
REPORT_RESULT(result);
return 0;
}
''' % (txt.replace('\'', '\\\'').replace('\"', '\\"'))))
data_file = os.path.join(abs_d, 'file with ' + tricky_part + '.data')
data_js_file = os.path.join(abs_d, 'file with ' + tricky_part + '.js')
self.run_process([PYTHON, FILE_PACKAGER, data_file, '--use-preload-cache', '--indexedDB-name=testdb', '--preload', abs_txt + '@' + txt, '--js-output=' + data_js_file])
page_file = os.path.join(d, 'file with ' + tricky_part + '.html')
abs_page_file = os.path.join(self.get_dir(), page_file)
self.compile_btest([cpp, '--pre-js', data_js_file, '-o', abs_page_file, '-s', 'FORCE_FILESYSTEM=1'])
self.run_browser(page_file, '|load me right before|.', '/report_result?0')
def test_preload_caching(self):
create_test_file('main.cpp', self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
extern "C" {
extern int checkPreloadResults();
}
int main(int argc, char** argv) {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = 0;
result += !strcmp("load me right before", buf);
result += checkPreloadResults();
REPORT_RESULT(result);
return 0;
}
''' % 'somefile.txt'))
create_test_file('test.js', '''
mergeInto(LibraryManager.library, {
checkPreloadResults: function() {
var cached = 0;
var packages = Object.keys(Module['preloadResults']);
packages.forEach(function(package) {
var fromCache = Module['preloadResults'][package]['fromCache'];
if (fromCache)
++ cached;
});
return cached;
}
});
''')
# test caching of various sizes, including sizes higher than 128MB which is
# chrome's limit on IndexedDB item sizes, see
# https://cs.chromium.org/chromium/src/content/renderer/indexed_db/webidbdatabase_impl.cc?type=cs&q=%22The+serialized+value+is+too+large%22&sq=package:chromium&g=0&l=177
# https://cs.chromium.org/chromium/src/out/Debug/gen/third_party/blink/public/mojom/indexeddb/indexeddb.mojom.h?type=cs&sq=package:chromium&g=0&l=60
for extra_size in (0, 1 * 1024 * 1024, 100 * 1024 * 1024, 150 * 1024 * 1024):
if is_chrome() and extra_size >= 100 * 1024 * 1024:
continue
create_test_file('somefile.txt', '''load me right before running the code please''' + ('_' * extra_size))
print('size:', os.path.getsize('somefile.txt'))
self.compile_btest(['main.cpp', '--use-preload-cache', '--js-library', 'test.js', '--preload-file', 'somefile.txt', '-o', 'page.html', '-s', 'ALLOW_MEMORY_GROWTH=1'])
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?2')
def test_preload_caching_indexeddb_name(self):
create_test_file('somefile.txt', '''load me right before running the code please''')
def make_main(path):
print(path)
create_test_file('main.cpp', self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
extern "C" {
extern int checkPreloadResults();
}
int main(int argc, char** argv) {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = 0;
result += !strcmp("load me right before", buf);
result += checkPreloadResults();
REPORT_RESULT(result);
return 0;
}
''' % path))
create_test_file('test.js', '''
mergeInto(LibraryManager.library, {
checkPreloadResults: function() {
var cached = 0;
var packages = Object.keys(Module['preloadResults']);
packages.forEach(function(package) {
var fromCache = Module['preloadResults'][package]['fromCache'];
if (fromCache)
++ cached;
});
return cached;
}
});
''')
make_main('somefile.txt')
self.run_process([PYTHON, FILE_PACKAGER, 'somefile.data', '--use-preload-cache', '--indexedDB-name=testdb', '--preload', 'somefile.txt', '--js-output=' + 'somefile.js'])
self.compile_btest(['main.cpp', '--js-library', 'test.js', '--pre-js', 'somefile.js', '-o', 'page.html', '-s', 'FORCE_FILESYSTEM=1'])
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?2')
def test_multifile(self):
# a few files inside a directory
ensure_dir(os.path.join('subdirr', 'moar'))
create_test_file(os.path.join('subdirr', 'data1.txt'), '1214141516171819')
create_test_file(os.path.join('subdirr', 'moar', 'data2.txt'), '3.14159265358979')
create_test_file('main.cpp', self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
char buf[17];
FILE *f = fopen("subdirr/data1.txt", "r");
fread(buf, 1, 16, f);
buf[16] = 0;
fclose(f);
printf("|%s|\n", buf);
int result = !strcmp("1214141516171819", buf);
FILE *f2 = fopen("subdirr/moar/data2.txt", "r");
fread(buf, 1, 16, f2);
buf[16] = 0;
fclose(f2);
printf("|%s|\n", buf);
result = result && !strcmp("3.14159265358979", buf);
REPORT_RESULT(result);
return 0;
}
'''))
# by individual files
self.compile_btest(['main.cpp', '--preload-file', 'subdirr/data1.txt', '--preload-file', 'subdirr/moar/data2.txt', '-o', 'page.html'])
self.run_browser('page.html', 'You should see two cool numbers', '/report_result?1')
os.remove('page.html')
# by directory, and remove files to make sure
self.compile_btest(['main.cpp', '--preload-file', 'subdirr', '-o', 'page.html'])
shutil.rmtree('subdirr')
self.run_browser('page.html', 'You should see two cool numbers', '/report_result?1')
def test_custom_file_package_url(self):
# a few files inside a directory
ensure_dir('subdirr')
ensure_dir('cdn')
create_test_file(os.path.join('subdirr', 'data1.txt'), '1214141516171819')
# change the file package base dir to look in a "cdn". note that normally
# you would add this in your own custom html file etc., and not by
# modifying the existing shell in this manner
create_test_file('shell.html', open(path_from_root('src', 'shell.html')).read().replace('var Module = {', 'var Module = { locateFile: function (path, prefix) {if (path.endsWith(".wasm")) {return prefix + path;} else {return "cdn/" + path;}}, '))
create_test_file('main.cpp', self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
char buf[17];
FILE *f = fopen("subdirr/data1.txt", "r");
fread(buf, 1, 16, f);
buf[16] = 0;
fclose(f);
printf("|%s|\n", buf);
int result = !strcmp("1214141516171819", buf);
REPORT_RESULT(result);
return 0;
}
'''))
self.compile_btest(['main.cpp', '--shell-file', 'shell.html', '--preload-file', 'subdirr/data1.txt', '-o', 'test.html'])
shutil.move('test.data', os.path.join('cdn', 'test.data'))
self.run_browser('test.html', '', '/report_result?1')
def test_missing_data_throws_error(self):
def setup(assetLocalization):
self.clear()
create_test_file('data.txt', 'data')
create_test_file('main.cpp', self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
// This code should never be executed in terms of missing required dependency file.
REPORT_RESULT(0);
return 0;
}
'''))
create_test_file('on_window_error_shell.html', r'''
<html>
<center><canvas id='canvas' width='256' height='256'></canvas></center>
<hr><div id='output'></div><hr>
<script type='text/javascript'>
window.onerror = function(error) {
window.onerror = null;
var result = error.indexOf("test.data") >= 0 ? 1 : 0;
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:8888/report_result?' + result, true);
xhr.send();
setTimeout(function() { window.close() }, 1000);
}
var Module = {
locateFile: function (path, prefix) {if (path.endsWith(".wasm")) {return prefix + path;} else {return "''' + assetLocalization + r'''" + path;}},
print: (function() {
var element = document.getElementById('output');
return function(text) { element.innerHTML += text.replace('\n', '<br>', 'g') + '<br>';};
})(),
canvas: document.getElementById('canvas')
};
</script>
{{{ SCRIPT }}}
</body>
</html>''')
def test():
# test test missing file should run xhr.onload with status different than 200, 304 or 206
setup("")
self.compile_btest(['main.cpp', '--shell-file', 'on_window_error_shell.html', '--preload-file', 'data.txt', '-o', 'test.html'])
shutil.move('test.data', 'missing.data')
self.run_browser('test.html', '', '/report_result?1')
# test unknown protocol should go through xhr.onerror
setup("unknown_protocol://")
self.compile_btest(['main.cpp', '--shell-file', 'on_window_error_shell.html', '--preload-file', 'data.txt', '-o', 'test.html'])
self.run_browser('test.html', '', '/report_result?1')
# test wrong protocol and port
setup("https://localhost:8800/")
self.compile_btest(['main.cpp', '--shell-file', 'on_window_error_shell.html', '--preload-file', 'data.txt', '-o', 'test.html'])
self.run_browser('test.html', '', '/report_result?1')
test()
# TODO: CORS, test using a full url for locateFile
# create_test_file('shell.html', open(path_from_root('src', 'shell.html')).read().replace('var Module = {', 'var Module = { locateFile: function (path) {return "http:/localhost:8888/cdn/" + path;}, '))
# test()
def test_dev_random(self):
self.btest(os.path.join('filesystem', 'dev_random.cpp'), expected='0')
def test_sdl_swsurface(self):
self.btest('sdl_swsurface.c', args=['-lSDL', '-lGL'], expected='1')
def test_sdl_surface_lock_opts(self):
# Test Emscripten-specific extensions to optimize SDL_LockSurface and SDL_UnlockSurface.
self.btest('hello_world_sdl.cpp', reference='htmltest.png', message='You should see "hello, world!" and a colored cube.', args=['-DTEST_SDL_LOCK_OPTS', '-lSDL', '-lGL'])
def test_sdl_image(self):
# load an image file, get pixel data. Also O2 coverage for --preload-file, and memory-init
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.jpg')
create_test_file('sdl_image.c', self.with_report_result(open(path_from_root('tests', 'sdl_image.c')).read()))
for mem in [0, 1]:
for dest, dirname, basename in [('screenshot.jpg', '/', 'screenshot.jpg'),
('screenshot.jpg@/assets/screenshot.jpg', '/assets', 'screenshot.jpg')]:
self.compile_btest([
'sdl_image.c', '-o', 'page.html', '-O2', '-lSDL', '-lGL', '--memory-init-file', str(mem),
'--preload-file', dest, '-DSCREENSHOT_DIRNAME="' + dirname + '"', '-DSCREENSHOT_BASENAME="' + basename + '"', '--use-preload-plugins'
])
self.run_browser('page.html', '', '/report_result?600')
def test_sdl_image_jpeg(self):
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.jpeg')
create_test_file('sdl_image_jpeg.c', self.with_report_result(open(path_from_root('tests', 'sdl_image.c')).read()))
self.compile_btest([
'sdl_image_jpeg.c', '-o', 'page.html', '-lSDL', '-lGL',
'--preload-file', 'screenshot.jpeg', '-DSCREENSHOT_DIRNAME="/"', '-DSCREENSHOT_BASENAME="screenshot.jpeg"', '--use-preload-plugins'
])
self.run_browser('page.html', '', '/report_result?600')
def test_sdl_image_prepare(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.not')
self.btest('sdl_image_prepare.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.not', '-lSDL', '-lGL'], also_proxied=True, manually_trigger_reftest=True)
def test_sdl_image_prepare_data(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.not')
self.btest('sdl_image_prepare_data.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.not', '-lSDL', '-lGL'], manually_trigger_reftest=True)
def test_sdl_image_must_prepare(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.jpg')
self.btest('sdl_image_must_prepare.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.jpg', '-lSDL', '-lGL'], manually_trigger_reftest=True)
def test_sdl_stb_image(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.not')
self.btest('sdl_stb_image.c', reference='screenshot.jpg', args=['-s', 'STB_IMAGE=1', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
def test_sdl_stb_image_bpp(self):
# load grayscale image without alpha
self.clear()
shutil.copyfile(path_from_root('tests', 'sdl-stb-bpp1.png'), 'screenshot.not')
self.btest('sdl_stb_image.c', reference='sdl-stb-bpp1.png', args=['-s', 'STB_IMAGE=1', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
# load grayscale image with alpha
self.clear()
shutil.copyfile(path_from_root('tests', 'sdl-stb-bpp2.png'), 'screenshot.not')
self.btest('sdl_stb_image.c', reference='sdl-stb-bpp2.png', args=['-s', 'STB_IMAGE=1', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
# load RGB image
self.clear()
shutil.copyfile(path_from_root('tests', 'sdl-stb-bpp3.png'), 'screenshot.not')
self.btest('sdl_stb_image.c', reference='sdl-stb-bpp3.png', args=['-s', 'STB_IMAGE=1', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
# load RGBA image
self.clear()
shutil.copyfile(path_from_root('tests', 'sdl-stb-bpp4.png'), 'screenshot.not')
self.btest('sdl_stb_image.c', reference='sdl-stb-bpp4.png', args=['-s', 'STB_IMAGE=1', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
def test_sdl_stb_image_data(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.not')
self.btest('sdl_stb_image_data.c', reference='screenshot.jpg', args=['-s', 'STB_IMAGE=1', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
def test_sdl_stb_image_cleanup(self):
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.not')
self.btest('sdl_stb_image_cleanup.c', expected='0', args=['-s', 'STB_IMAGE=1', '--preload-file', 'screenshot.not', '-lSDL', '-lGL', '--memoryprofiler'])
def test_sdl_canvas(self):
self.clear()
self.btest('sdl_canvas.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION=1', '-lSDL', '-lGL'])
# some extra coverage
self.clear()
self.btest('sdl_canvas.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION=1', '-O0', '-s', 'SAFE_HEAP=1', '-lSDL', '-lGL'])
self.clear()
self.btest('sdl_canvas.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION=1', '-O2', '-s', 'SAFE_HEAP=1', '-lSDL', '-lGL'])
def post_manual_reftest(self, reference=None):
self.reftest(path_from_root('tests', self.reference if reference is None else reference))
html = open('test.html').read()
html = html.replace('</body>', '''
<script>
function assert(x, y) { if (!x) throw 'assertion failed ' + y }
%s
var windowClose = window.close;
window.close = function() {
// wait for rafs to arrive and the screen to update before reftesting
setTimeout(function() {
doReftest();
setTimeout(windowClose, 5000);
}, 1000);
};
</script>
</body>''' % open('reftest.js').read())
create_test_file('test.html', html)
def test_sdl_canvas_proxy(self):
create_test_file('data.txt', 'datum')
self.btest('sdl_canvas_proxy.c', reference='sdl_canvas_proxy.png', args=['--proxy-to-worker', '--preload-file', 'data.txt', '-lSDL', '-lGL'], manual_reference=True, post_build=self.post_manual_reftest)
@requires_graphics_hardware
@no_wasm_backend('This modifies JS code with regexes in such a way that does not currently work in WASM2JS')
def test_glgears_proxy(self):
# we modify the asm.js, this is a non-wasm test
self.btest('hello_world_gles_proxy.c', reference='gears.png', args=['--proxy-to-worker', '-s', 'GL_TESTING=1', '-DSTATIC_GEARS=1', '-lGL', '-lglut', '-s', 'WASM=0'], manual_reference=True, post_build=self.post_manual_reftest)
# test noProxy option applied at runtime
# run normally (duplicates above test, but verifies we can run outside of the btest harness
self.run_browser('test.html', None, ['/report_result?0'])
# run with noProxy
self.run_browser('test.html?noProxy', None, ['/report_result?0'])
def copy(to, js_mod, html_mod=lambda x: x):
create_test_file(to + '.html', html_mod(open('test.html').read().replace('test.js', to + '.js')))
create_test_file(to + '.js', js_mod(open('test.js').read()))
# run with noProxy, but make main thread fail
copy('two', lambda original: re.sub(r'function _main\(\$(.+),\$(.+)\) {', r'function _main($\1,$\2) { if (ENVIRONMENT_IS_WEB) { var xhr = new XMLHttpRequest(); xhr.open("GET", "http://localhost:%s/report_result?999");xhr.send(); return; }' % self.port, original),
lambda original: original.replace('function doReftest() {', 'function doReftest() { return; ')) # don't reftest on main thread, it would race
self.run_browser('two.html?noProxy', None, ['/report_result?999'])
copy('two', lambda original: re.sub(r'function _main\(\$(.+),\$(.+)\) {', r'function _main($\1,$\2) { if (ENVIRONMENT_IS_WEB) { var xhr = new XMLHttpRequest(); xhr.open("GET", "http://localhost:%s/report_result?999");xhr.send(); return; }' % self.port, original))
self.run_browser('two.html', None, ['/report_result?0']) # this is still cool
# run without noProxy, so proxy, but make worker fail
copy('three', lambda original: re.sub(r'function _main\(\$(.+),\$(.+)\) {', r'function _main($\1,$\2) { if (ENVIRONMENT_IS_WORKER) { var xhr = new XMLHttpRequest(); xhr.open("GET", "http://localhost:%s/report_result?999");xhr.send(); return; }' % self.port, original),
lambda original: original.replace('function doReftest() {', 'function doReftest() { return; ')) # don't reftest on main thread, it would race
self.run_browser('three.html', None, ['/report_result?999'])
copy('three', lambda original: re.sub(r'function _main\(\$(.+),\$(.+)\) {', r'function _main($\1,$\2) { if (ENVIRONMENT_IS_WORKER) { var xhr = new XMLHttpRequest(); xhr.open("GET", "http://localhost:%s/report_result?999");xhr.send(); return; }' % self.port, original))
self.run_browser('three.html?noProxy', None, ['/report_result?0']) # this is still cool
@requires_graphics_hardware
def test_glgears_proxy_jstarget(self):
# test .js target with --proxy-worker; emits 2 js files, client and worker
self.compile_btest([path_from_root('tests', 'hello_world_gles_proxy.c'), '-o', 'test.js', '--proxy-to-worker', '-s', 'GL_TESTING=1', '-lGL', '-lglut'])
shell_with_script('shell_minimal.html', 'test.html', '<script src="test.js"></script>')
self.post_manual_reftest('gears.png')
self.run_browser('test.html', None, '/report_result?0')
def test_sdl_canvas_alpha(self):
# N.B. On Linux with Intel integrated graphics cards, this test needs Firefox 49 or newer.
# See https://github.com/emscripten-core/emscripten/issues/4069.
create_test_file('flag_0.js', '''
Module['arguments'] = ['-0'];
''')
self.btest('sdl_canvas_alpha.c', args=['-lSDL', '-lGL'], reference='sdl_canvas_alpha.png', reference_slack=12)
self.btest('sdl_canvas_alpha.c', args=['--pre-js', 'flag_0.js', '-lSDL', '-lGL'], reference='sdl_canvas_alpha_flag_0.png', reference_slack=12)
def get_async_args(self):
return ['-s', 'ASYNCIFY']
def test_sdl_key(self):
for delay in [0, 1]:
for defines in [
[],
['-DTEST_EMSCRIPTEN_SDL_SETEVENTHANDLER']
]:
for async_ in [
[],
['-DTEST_SLEEP', '-s', 'ASSERTIONS=1', '-s', 'SAFE_HEAP=1'] + self.get_async_args()
]:
print(delay, defines, async_)
create_test_file('pre.js', '''
function keydown(c) {
%s
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
%s
}
function keyup(c) {
%s
var event = new KeyboardEvent("keyup", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
%s
}
''' % ('setTimeout(function() {' if delay else '', '}, 1);' if delay else '', 'setTimeout(function() {' if delay else '', '}, 1);' if delay else ''))
create_test_file('sdl_key.c', self.with_report_result(open(path_from_root('tests', 'sdl_key.c')).read()))
self.compile_btest(['sdl_key.c', '-o', 'page.html'] + defines + async_ + ['--pre-js', 'pre.js', '-s', '''EXPORTED_FUNCTIONS=['_main']''', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?223092870')
def test_sdl_key_proxy(self):
create_test_file('pre.js', '''
var Module = {};
Module.postRun = function() {
function doOne() {
Module._one();
setTimeout(doOne, 1000/60);
}
setTimeout(doOne, 1000/60);
}
''')
def post():
html = open('test.html').read()
html = html.replace('</body>', '''
<script>
function keydown(c) {
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
function keyup(c) {
var event = new KeyboardEvent("keyup", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
keydown(1250);keydown(38);keyup(38);keyup(1250); // alt, up
keydown(1248);keydown(1249);keydown(40);keyup(40);keyup(1249);keyup(1248); // ctrl, shift, down
keydown(37);keyup(37); // left
keydown(39);keyup(39); // right
keydown(65);keyup(65); // a
keydown(66);keyup(66); // b
keydown(100);keyup(100); // trigger the end
</script>
</body>''')
create_test_file('test.html', html)
self.btest('sdl_key_proxy.c', '223092870', args=['--proxy-to-worker', '--pre-js', 'pre.js', '-s', '''EXPORTED_FUNCTIONS=['_main', '_one']''', '-lSDL', '-lGL'], manual_reference=True, post_build=post)
def test_canvas_focus(self):
self.btest('canvas_focus.c', '1')
def test_keydown_preventdefault_proxy(self):
def post():
html = open('test.html').read()
html = html.replace('</body>', '''
<script>
function keydown(c) {
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
return document.dispatchEvent(event);
}
function keypress(c) {
var event = new KeyboardEvent("keypress", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
return document.dispatchEvent(event);
}
function keyup(c) {
var event = new KeyboardEvent("keyup", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
return document.dispatchEvent(event);
}
function sendKey(c) {
// Simulate the sending of the keypress event when the
// prior keydown event is not prevent defaulted.
if (keydown(c) === false) {
console.log('keydown prevent defaulted, NOT sending keypress!!!');
} else {
keypress(c);
}
keyup(c);
}
// Send 'a'. Simulate the sending of the keypress event when the
// prior keydown event is not prevent defaulted.
sendKey(65);
// Send backspace. Keypress should not be sent over as default handling of
// the Keydown event should be prevented.
sendKey(8);
keydown(100);keyup(100); // trigger the end
</script>
</body>''')
create_test_file('test.html', html)
self.btest('keydown_preventdefault_proxy.cpp', '300', args=['--proxy-to-worker', '-s', '''EXPORTED_FUNCTIONS=['_main']'''], manual_reference=True, post_build=post)
def test_sdl_text(self):
create_test_file('pre.js', '''
Module.postRun = function() {
function doOne() {
Module._one();
setTimeout(doOne, 1000/60);
}
setTimeout(doOne, 1000/60);
}
function simulateKeyEvent(c) {
var event = new KeyboardEvent("keypress", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.body.dispatchEvent(event);
}
''')
create_test_file('sdl_text.c', self.with_report_result(open(path_from_root('tests', 'sdl_text.c')).read()))
self.compile_btest(['sdl_text.c', '-o', 'page.html', '--pre-js', 'pre.js', '-s', '''EXPORTED_FUNCTIONS=['_main', '_one']''', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?1')
def test_sdl_mouse(self):
create_test_file('pre.js', '''
function simulateMouseEvent(x, y, button) {
var event = document.createEvent("MouseEvents");
if (button >= 0) {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousedown', true, true, window,
1, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event1);
var event2 = document.createEvent("MouseEvents");
event2.initMouseEvent('mouseup', true, true, window,
1, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event2);
} else {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousemove', true, true, window,
0, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
0, null);
Module['canvas'].dispatchEvent(event1);
}
}
window['simulateMouseEvent'] = simulateMouseEvent;
''')
create_test_file('sdl_mouse.c', self.with_report_result(open(path_from_root('tests', 'sdl_mouse.c')).read()))
self.compile_btest(['sdl_mouse.c', '-O2', '--minify', '0', '-o', 'page.html', '--pre-js', 'pre.js', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?1')
def test_sdl_mouse_offsets(self):
create_test_file('pre.js', '''
function simulateMouseEvent(x, y, button) {
var event = document.createEvent("MouseEvents");
if (button >= 0) {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousedown', true, true, window,
1, x, y, x, y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event1);
var event2 = document.createEvent("MouseEvents");
event2.initMouseEvent('mouseup', true, true, window,
1, x, y, x, y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event2);
} else {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousemove', true, true, window,
0, x, y, x, y,
0, 0, 0, 0,
0, null);
Module['canvas'].dispatchEvent(event1);
}
}
window['simulateMouseEvent'] = simulateMouseEvent;
''')
create_test_file('page.html', '''
<html>
<head>
<style type="text/css">
html, body { margin: 0; padding: 0; }
#container {
position: absolute;
left: 5px; right: 0;
top: 5px; bottom: 0;
}
#canvas {
position: absolute;
left: 0; width: 600px;
top: 0; height: 450px;
}
textarea {
margin-top: 500px;
margin-left: 5px;
width: 600px;
}
</style>
</head>
<body>
<div id="container">
<canvas id="canvas"></canvas>
</div>
<textarea id="output" rows="8"></textarea>
<script type="text/javascript">
var Module = {
canvas: document.getElementById('canvas'),
print: (function() {
var element = document.getElementById('output');
element.value = ''; // clear browser cache
return function(text) {
if (arguments.length > 1) text = Array.prototype.slice.call(arguments).join(' ');
element.value += text + "\\n";
element.scrollTop = element.scrollHeight; // focus on bottom
};
})()
};
</script>
<script type="text/javascript" src="sdl_mouse.js"></script>
</body>
</html>
''')
create_test_file('sdl_mouse.c', self.with_report_result(open(path_from_root('tests', 'sdl_mouse.c')).read()))
self.compile_btest(['sdl_mouse.c', '-DTEST_SDL_MOUSE_OFFSETS', '-O2', '--minify', '0', '-o', 'sdl_mouse.js', '--pre-js', 'pre.js', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?1')
def test_glut_touchevents(self):
self.btest('glut_touchevents.c', '1', args=['-lglut'])
def test_glut_wheelevents(self):
self.btest('glut_wheelevents.c', '1', args=['-lglut'])
@requires_graphics_hardware
def test_glut_glutget_no_antialias(self):
self.btest('glut_glutget.c', '1', args=['-lglut', '-lGL'])
self.btest('glut_glutget.c', '1', args=['-lglut', '-lGL', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED'])
# This test supersedes the one above, but it's skipped in the CI because anti-aliasing is not well supported by the Mesa software renderer.
@requires_graphics_hardware
def test_glut_glutget(self):
self.btest('glut_glutget.c', '1', args=['-lglut', '-lGL'])
self.btest('glut_glutget.c', '1', args=['-lglut', '-lGL', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED'])
def test_sdl_joystick_1(self):
# Generates events corresponding to the Working Draft of the HTML5 Gamepad API.
# http://www.w3.org/TR/2012/WD-gamepad-20120529/#gamepad-interface
create_test_file('pre.js', '''
var gamepads = [];
// Spoof this function.
navigator['getGamepads'] = function() {
return gamepads;
};
window['addNewGamepad'] = function(id, numAxes, numButtons) {
var index = gamepads.length;
gamepads.push({
axes: new Array(numAxes),
buttons: new Array(numButtons),
id: id,
index: index
});
var i;
for (i = 0; i < numAxes; i++) gamepads[index].axes[i] = 0;
for (i = 0; i < numButtons; i++) gamepads[index].buttons[i] = 0;
};
window['simulateGamepadButtonDown'] = function (index, button) {
gamepads[index].buttons[button] = 1;
};
window['simulateGamepadButtonUp'] = function (index, button) {
gamepads[index].buttons[button] = 0;
};
window['simulateAxisMotion'] = function (index, axis, value) {
gamepads[index].axes[axis] = value;
};
''')
create_test_file('sdl_joystick.c', self.with_report_result(open(path_from_root('tests', 'sdl_joystick.c')).read()))
self.compile_btest(['sdl_joystick.c', '-O2', '--minify', '0', '-o', 'page.html', '--pre-js', 'pre.js', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?2')
def test_sdl_joystick_2(self):
# Generates events corresponding to the Editor's Draft of the HTML5 Gamepad API.
# https://dvcs.w3.org/hg/gamepad/raw-file/default/gamepad.html#idl-def-Gamepad
create_test_file('pre.js', '''
var gamepads = [];
// Spoof this function.
navigator['getGamepads'] = function() {
return gamepads;
};
window['addNewGamepad'] = function(id, numAxes, numButtons) {
var index = gamepads.length;
gamepads.push({
axes: new Array(numAxes),
buttons: new Array(numButtons),
id: id,
index: index
});
var i;
for (i = 0; i < numAxes; i++) gamepads[index].axes[i] = 0;
// Buttons are objects
for (i = 0; i < numButtons; i++) gamepads[index].buttons[i] = { pressed: false, value: 0 };
};
// FF mutates the original objects.
window['simulateGamepadButtonDown'] = function (index, button) {
gamepads[index].buttons[button].pressed = true;
gamepads[index].buttons[button].value = 1;
};
window['simulateGamepadButtonUp'] = function (index, button) {
gamepads[index].buttons[button].pressed = false;
gamepads[index].buttons[button].value = 0;
};
window['simulateAxisMotion'] = function (index, axis, value) {
gamepads[index].axes[axis] = value;
};
''')
create_test_file('sdl_joystick.c', self.with_report_result(open(path_from_root('tests', 'sdl_joystick.c')).read()))
self.compile_btest(['sdl_joystick.c', '-O2', '--minify', '0', '-o', 'page.html', '--pre-js', 'pre.js', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?2')
@requires_graphics_hardware
def test_glfw_joystick(self):
# Generates events corresponding to the Editor's Draft of the HTML5 Gamepad API.
# https://dvcs.w3.org/hg/gamepad/raw-file/default/gamepad.html#idl-def-Gamepad
create_test_file('pre.js', '''
var gamepads = [];
// Spoof this function.
navigator['getGamepads'] = function() {
return gamepads;
};
window['addNewGamepad'] = function(id, numAxes, numButtons) {
var index = gamepads.length;
var gamepad = {
axes: new Array(numAxes),
buttons: new Array(numButtons),
id: id,
index: index
};
gamepads.push(gamepad)
var i;
for (i = 0; i < numAxes; i++) gamepads[index].axes[i] = 0;
// Buttons are objects
for (i = 0; i < numButtons; i++) gamepads[index].buttons[i] = { pressed: false, value: 0 };
// Dispatch event (required for glfw joystick; note not used in SDL test)
var event = new Event('gamepadconnected');
event.gamepad = gamepad;
window.dispatchEvent(event);
};
// FF mutates the original objects.
window['simulateGamepadButtonDown'] = function (index, button) {
gamepads[index].buttons[button].pressed = true;
gamepads[index].buttons[button].value = 1;
};
window['simulateGamepadButtonUp'] = function (index, button) {
gamepads[index].buttons[button].pressed = false;
gamepads[index].buttons[button].value = 0;
};
window['simulateAxisMotion'] = function (index, axis, value) {
gamepads[index].axes[axis] = value;
};
''')
create_test_file('test_glfw_joystick.c', self.with_report_result(open(path_from_root('tests', 'test_glfw_joystick.c')).read()))
self.compile_btest(['test_glfw_joystick.c', '-O2', '--minify', '0', '-o', 'page.html', '--pre-js', 'pre.js', '-lGL', '-lglfw3', '-s', 'USE_GLFW=3'])
self.run_browser('page.html', '', '/report_result?2')
@requires_graphics_hardware
def test_webgl_context_attributes(self):
# Javascript code to check the attributes support we want to test in the WebGL implementation
# (request the attribute, create a context and check its value afterwards in the context attributes).
# Tests will succeed when an attribute is not supported.
create_test_file('check_webgl_attributes_support.js', '''
mergeInto(LibraryManager.library, {
webglAntialiasSupported: function() {
canvas = document.createElement('canvas');
context = canvas.getContext('experimental-webgl', {antialias: true});
attributes = context.getContextAttributes();
return attributes.antialias;
},
webglDepthSupported: function() {
canvas = document.createElement('canvas');
context = canvas.getContext('experimental-webgl', {depth: true});
attributes = context.getContextAttributes();
return attributes.depth;
},
webglStencilSupported: function() {
canvas = document.createElement('canvas');
context = canvas.getContext('experimental-webgl', {stencil: true});
attributes = context.getContextAttributes();
return attributes.stencil;
},
webglAlphaSupported: function() {
canvas = document.createElement('canvas');
context = canvas.getContext('experimental-webgl', {alpha: true});
attributes = context.getContextAttributes();
return attributes.alpha;
}
});
''')
# Copy common code file to temporary directory
filepath = path_from_root('tests/test_webgl_context_attributes_common.c')
temp_filepath = os.path.join(self.get_dir(), os.path.basename(filepath))
shutil.copyfile(filepath, temp_filepath)
# perform tests with attributes activated
self.btest('test_webgl_context_attributes_glut.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED', '-lGL', '-lglut', '-lGLEW'])
self.btest('test_webgl_context_attributes_sdl.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED', '-lGL', '-lSDL', '-lGLEW'])
self.btest('test_webgl_context_attributes_sdl2.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED', '-lGL', '-s', 'USE_SDL=2', '-lGLEW'])
self.btest('test_webgl_context_attributes_glfw.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED', '-lGL', '-lglfw', '-lGLEW'])
# perform tests with attributes desactivated
self.btest('test_webgl_context_attributes_glut.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-lGL', '-lglut', '-lGLEW'])
self.btest('test_webgl_context_attributes_sdl.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-lGL', '-lSDL', '-lGLEW'])
self.btest('test_webgl_context_attributes_glfw.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-lGL', '-lglfw', '-lGLEW'])
@requires_graphics_hardware
def test_webgl_no_double_error(self):
self.btest('webgl_error.cpp', '0')
# Test that -s GL_PREINITIALIZED_CONTEXT=1 works and allows user to set Module['preinitializedWebGLContext'] to a preinitialized WebGL context.
@requires_graphics_hardware
def test_preinitialized_webgl_context(self):
self.btest('preinitialized_webgl_context.cpp', '5', args=['-s', 'GL_PREINITIALIZED_CONTEXT=1', '--shell-file', path_from_root('tests/preinitialized_webgl_context.html')])
@requires_threads
def test_emscripten_get_now(self):
for args in [[], ['-s', 'USE_PTHREADS=1'], ['-s', 'ENVIRONMENT=web', '-O2', '--closure', '1']]:
self.btest('emscripten_get_now.cpp', '1', args=args)
def test_write_file_in_environment_web(self):
self.btest('write_file.cpp', '0', args=['-s', 'ENVIRONMENT=web', '-Os', '--closure', '1'])
@unittest.skip('Skipping due to https://github.com/emscripten-core/emscripten/issues/2770')
def test_fflush(self):
self.btest('test_fflush.cpp', '0', args=['--shell-file', path_from_root('tests', 'test_fflush.html')])
def test_file_db(self):
secret = str(time.time())
create_test_file('moar.txt', secret)
self.btest('file_db.cpp', '1', args=['--preload-file', 'moar.txt', '-DFIRST'])
shutil.copyfile('test.html', 'first.html')
self.btest('file_db.cpp', secret, args=['-s', 'FORCE_FILESYSTEM=1'])
shutil.copyfile('test.html', 'second.html')
create_test_file('moar.txt', 'aliantha')
self.btest('file_db.cpp', secret, args=['--preload-file', 'moar.txt']) # even with a file there, we load over it
shutil.move('test.html', 'third.html')
def test_fs_idbfs_sync(self):
for extra in [[], ['-DEXTRA_WORK']]:
secret = str(time.time())
self.btest(path_from_root('tests', 'fs', 'test_idbfs_sync.c'), '1', force_c=True, args=['-lidbfs.js', '-DFIRST', '-DSECRET=\"' + secret + '\"', '-s', '''EXPORTED_FUNCTIONS=['_main', '_test', '_success']''', '-lidbfs.js'])
self.btest(path_from_root('tests', 'fs', 'test_idbfs_sync.c'), '1', force_c=True, args=['-lidbfs.js', '-DSECRET=\"' + secret + '\"', '-s', '''EXPORTED_FUNCTIONS=['_main', '_test', '_success']''', '-lidbfs.js'] + extra)
def test_fs_idbfs_sync_force_exit(self):
secret = str(time.time())
self.btest(path_from_root('tests', 'fs', 'test_idbfs_sync.c'), '1', force_c=True, args=['-lidbfs.js', '-DFIRST', '-DSECRET=\"' + secret + '\"', '-s', '''EXPORTED_FUNCTIONS=['_main', '_test', '_success']''', '-s', 'EXIT_RUNTIME=1', '-DFORCE_EXIT', '-lidbfs.js'])
self.btest(path_from_root('tests', 'fs', 'test_idbfs_sync.c'), '1', force_c=True, args=['-lidbfs.js', '-DSECRET=\"' + secret + '\"', '-s', '''EXPORTED_FUNCTIONS=['_main', '_test', '_success']''', '-s', 'EXIT_RUNTIME=1', '-DFORCE_EXIT', '-lidbfs.js'])
def test_fs_idbfs_fsync(self):
# sync from persisted state into memory before main()
create_test_file('pre.js', '''
Module.preRun = function() {
addRunDependency('syncfs');
FS.mkdir('/working1');
FS.mount(IDBFS, {}, '/working1');
FS.syncfs(true, function (err) {
if (err) throw err;
removeRunDependency('syncfs');
});
};
''')
args = ['--pre-js', 'pre.js', '-lidbfs.js', '-s', 'EXIT_RUNTIME=1'] + self.get_async_args()
secret = str(time.time())
self.btest(path_from_root('tests', 'fs', 'test_idbfs_fsync.c'), '1', force_c=True, args=args + ['-DFIRST', '-DSECRET=\"' + secret + '\"', '-s', '''EXPORTED_FUNCTIONS=['_main', '_success']''', '-lidbfs.js'])
self.btest(path_from_root('tests', 'fs', 'test_idbfs_fsync.c'), '1', force_c=True, args=args + ['-DSECRET=\"' + secret + '\"', '-s', '''EXPORTED_FUNCTIONS=['_main', '_success']''', '-lidbfs.js'])
def test_fs_memfs_fsync(self):
args = self.get_async_args() + ['-s', 'EXIT_RUNTIME=1']
secret = str(time.time())
self.btest(path_from_root('tests', 'fs', 'test_memfs_fsync.c'), '1', force_c=True, args=args + ['-DSECRET=\"' + secret + '\"'])
def test_fs_workerfs_read(self):
secret = 'a' * 10
secret2 = 'b' * 10
create_test_file('pre.js', '''
var Module = {};
Module.preRun = function() {
var blob = new Blob(['%s']);
var file = new File(['%s'], 'file.txt');
FS.mkdir('/work');
FS.mount(WORKERFS, {
blobs: [{ name: 'blob.txt', data: blob }],
files: [file],
}, '/work');
};
''' % (secret, secret2))
self.btest(path_from_root('tests', 'fs', 'test_workerfs_read.c'), '1', force_c=True, args=['-lworkerfs.js', '--pre-js', 'pre.js', '-DSECRET=\"' + secret + '\"', '-DSECRET2=\"' + secret2 + '\"', '--proxy-to-worker', '-lworkerfs.js'])
def test_fs_workerfs_package(self):
create_test_file('file1.txt', 'first')
ensure_dir('sub')
open(os.path.join('sub', 'file2.txt'), 'w').write('second')
self.run_process([PYTHON, FILE_PACKAGER, 'files.data', '--preload', 'file1.txt', os.path.join('sub', 'file2.txt'), '--separate-metadata', '--js-output=files.js'])
self.btest(os.path.join('fs', 'test_workerfs_package.cpp'), '1', args=['-lworkerfs.js', '--proxy-to-worker', '-lworkerfs.js'])
def test_fs_lz4fs_package(self):
# generate data
ensure_dir('subdir')
create_test_file('file1.txt', '0123456789' * (1024 * 128))
open(os.path.join('subdir', 'file2.txt'), 'w').write('1234567890' * (1024 * 128))
random_data = bytearray(random.randint(0, 255) for x in range(1024 * 128 * 10 + 1))
random_data[17] = ord('X')
open('file3.txt', 'wb').write(random_data)
# compress in emcc, -s LZ4=1 tells it to tell the file packager
print('emcc-normal')
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '2', args=['-s', 'LZ4=1', '--preload-file', 'file1.txt', '--preload-file', 'subdir/file2.txt', '--preload-file', 'file3.txt'])
assert os.path.getsize('file1.txt') + os.path.getsize(os.path.join('subdir', 'file2.txt')) + os.path.getsize('file3.txt') == 3 * 1024 * 128 * 10 + 1
assert os.path.getsize('test.data') < (3 * 1024 * 128 * 10) / 2 # over half is gone
print(' emcc-opts')
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '2', args=['-s', 'LZ4=1', '--preload-file', 'file1.txt', '--preload-file', 'subdir/file2.txt', '--preload-file', 'file3.txt', '-O2'])
# compress in the file packager, on the server. the client receives compressed data and can just use it. this is typical usage
print('normal')
out = subprocess.check_output([PYTHON, FILE_PACKAGER, 'files.data', '--preload', 'file1.txt', 'subdir/file2.txt', 'file3.txt', '--lz4'])
open('files.js', 'wb').write(out)
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '2', args=['--pre-js', 'files.js', '-s', 'LZ4=1', '-s', 'FORCE_FILESYSTEM=1'])
print(' opts')
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '2', args=['--pre-js', 'files.js', '-s', 'LZ4=1', '-s', 'FORCE_FILESYSTEM=1', '-O2'])
# load the data into LZ4FS manually at runtime. This means we compress on the client. This is generally not recommended
print('manual')
subprocess.check_output([PYTHON, FILE_PACKAGER, 'files.data', '--preload', 'file1.txt', 'subdir/file2.txt', 'file3.txt', '--separate-metadata', '--js-output=files.js'])
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '1', args=['-DLOAD_MANUALLY', '-s', 'LZ4=1', '-s', 'FORCE_FILESYSTEM=1'])
print(' opts')
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '1', args=['-DLOAD_MANUALLY', '-s', 'LZ4=1', '-s', 'FORCE_FILESYSTEM=1', '-O2'])
print(' opts+closure')
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '1', args=['-DLOAD_MANUALLY', '-s', 'LZ4=1', '-s', 'FORCE_FILESYSTEM=1', '-O2', '--closure', '1', '-g1', '-s', 'CLOSURE_WARNINGS=quiet'])
'''# non-lz4 for comparison
try:
os.mkdir('files')
except OSError:
pass
shutil.copyfile('file1.txt', os.path.join('files', 'file1.txt'))
shutil.copyfile('file2.txt', os.path.join('files', 'file2.txt'))
shutil.copyfile('file3.txt', os.path.join('files', 'file3.txt'))
out = subprocess.check_output([PYTHON, FILE_PACKAGER, 'files.data', '--preload', 'files/file1.txt', 'files/file2.txt', 'files/file3.txt'])
open('files.js', 'wb').write(out)
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '2', args=['--pre-js', 'files.js'])'''
def test_separate_metadata_later(self):
# see issue #6654 - we need to handle separate-metadata both when we run before
# the main program, and when we are run later
create_test_file('data.dat', ' ')
self.run_process([PYTHON, FILE_PACKAGER, 'more.data', '--preload', 'data.dat', '--separate-metadata', '--js-output=more.js'])
self.btest(os.path.join('browser', 'separate_metadata_later.cpp'), '1', args=['-s', 'FORCE_FILESYSTEM=1'])
def test_idbstore(self):
secret = str(time.time())
for stage in [0, 1, 2, 3, 0, 1, 2, 0, 0, 1, 4, 2, 5]:
self.clear()
self.btest(path_from_root('tests', 'idbstore.c'), str(stage), force_c=True, args=['-lidbstore.js', '-DSTAGE=' + str(stage), '-DSECRET=\"' + secret + '\"'])
def test_idbstore_sync(self):
secret = str(time.time())
self.clear()
self.btest(path_from_root('tests', 'idbstore_sync.c'), '6', force_c=True, args=['-lidbstore.js', '-DSECRET=\"' + secret + '\"', '--memory-init-file', '1', '-O3', '-g2'] + self.get_async_args())
def test_idbstore_sync_worker(self):
secret = str(time.time())
self.clear()
self.btest(path_from_root('tests', 'idbstore_sync_worker.c'), '6', force_c=True, args=['-lidbstore.js', '-DSECRET=\"' + secret + '\"', '--memory-init-file', '1', '-O3', '-g2', '--proxy-to-worker', '-s', 'INITIAL_MEMORY=80MB'] + self.get_async_args())
def test_force_exit(self):
self.btest('force_exit.c', force_c=True, expected='17', args=['-s', 'EXIT_RUNTIME=1'])
def test_sdl_pumpevents(self):
# key events should be detected using SDL_PumpEvents
create_test_file('pre.js', '''
function keydown(c) {
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
''')
self.btest('sdl_pumpevents.c', expected='7', args=['--pre-js', 'pre.js', '-lSDL', '-lGL'])
def test_sdl_canvas_size(self):
self.btest('sdl_canvas_size.c', expected='1',
args=['-O2', '--minify', '0', '--shell-file',
path_from_root('tests', 'sdl_canvas_size.html'), '-lSDL', '-lGL'])
@requires_graphics_hardware
def test_sdl_gl_read(self):
# SDL, OpenGL, readPixels
create_test_file('sdl_gl_read.c', self.with_report_result(open(path_from_root('tests', 'sdl_gl_read.c')).read()))
self.compile_btest(['sdl_gl_read.c', '-o', 'something.html', '-lSDL', '-lGL'])
self.run_browser('something.html', '.', '/report_result?1')
@requires_graphics_hardware
def test_sdl_gl_mapbuffers(self):
self.btest('sdl_gl_mapbuffers.c', expected='1', args=['-s', 'FULL_ES3=1', '-lSDL', '-lGL'],
message='You should see a blue triangle.')
@requires_graphics_hardware
def test_sdl_ogl(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_ogl.c', reference='screenshot-gray-purple.png', reference_slack=1,
args=['-O2', '--minify', '0', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with gray at the top.')
@requires_graphics_hardware
def test_sdl_ogl_regal(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_ogl.c', reference='screenshot-gray-purple.png', reference_slack=1,
args=['-O2', '--minify', '0', '--preload-file', 'screenshot.png', '-s', 'USE_REGAL=1', '-DUSE_REGAL', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with gray at the top.')
@requires_graphics_hardware
def test_sdl_ogl_defaultmatrixmode(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_ogl_defaultMatrixMode.c', reference='screenshot-gray-purple.png', reference_slack=1,
args=['--minify', '0', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with gray at the top.')
@requires_graphics_hardware
def test_sdl_ogl_p(self):
# Immediate mode with pointers
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_ogl_p.c', reference='screenshot-gray.png', reference_slack=1,
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with gray at the top.')
@requires_graphics_hardware
def test_sdl_ogl_proc_alias(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_ogl_proc_alias.c', reference='screenshot-gray-purple.png', reference_slack=1,
args=['-O2', '-g2', '-s', 'INLINING_LIMIT=1', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins', '-lSDL', '-lGL'])
@requires_graphics_hardware
def test_sdl_fog_simple(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_fog_simple.c', reference='screenshot-fog-simple.png',
args=['-O2', '--minify', '0', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl_fog_negative(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_fog_negative.c', reference='screenshot-fog-negative.png',
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl_fog_density(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_fog_density.c', reference='screenshot-fog-density.png',
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl_fog_exp2(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_fog_exp2.c', reference='screenshot-fog-exp2.png',
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl_fog_linear(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_fog_linear.c', reference='screenshot-fog-linear.png', reference_slack=1,
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_glfw(self):
self.btest('glfw.c', '1', args=['-s', 'LEGACY_GL_EMULATION=1', '-lglfw', '-lGL'])
self.btest('glfw.c', '1', args=['-s', 'LEGACY_GL_EMULATION=1', '-s', 'USE_GLFW=2', '-lglfw', '-lGL'])
def test_glfw_minimal(self):
self.btest('glfw_minimal.c', '1', args=['-lglfw', '-lGL'])
self.btest('glfw_minimal.c', '1', args=['-s', 'USE_GLFW=2', '-lglfw', '-lGL'])
def test_glfw_time(self):
self.btest('test_glfw_time.c', '1', args=['-s', 'USE_GLFW=3', '-lglfw', '-lGL'])
def _test_egl_base(self, *args):
create_test_file('test_egl.c', self.with_report_result(open(path_from_root('tests', 'test_egl.c')).read()))
self.compile_btest(['-O2', 'test_egl.c', '-o', 'page.html', '-lEGL', '-lGL'] + list(args))
self.run_browser('page.html', '', '/report_result?1')
@requires_graphics_hardware
def test_egl(self):
self._test_egl_base()
@requires_threads
@requires_graphics_hardware
def test_egl_with_proxy_to_pthread(self):
self._test_egl_base('-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1', '-s', 'OFFSCREEN_FRAMEBUFFER=1')
def _test_egl_width_height_base(self, *args):
create_test_file('test_egl_width_height.c', self.with_report_result(open(path_from_root('tests', 'test_egl_width_height.c')).read()))
self.compile_btest(['-O2', 'test_egl_width_height.c', '-o', 'page.html', '-lEGL', '-lGL'] + list(args))
self.run_browser('page.html', 'Should print "(300, 150)" -- the size of the canvas in pixels', '/report_result?1')
def test_egl_width_height(self):
self._test_egl_width_height_base()
@requires_threads
def test_egl_width_height_with_proxy_to_pthread(self):
self._test_egl_width_height_base('-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD=1')
@requires_graphics_hardware
def test_egl_createcontext_error(self):
self.btest('test_egl_createcontext_error.c', '1', args=['-lEGL', '-lGL'])
def test_worker(self):
# Test running in a web worker
create_test_file('file.dat', 'data for worker')
html_file = open('main.html', 'w')
html_file.write('''
<html>
<body>
Worker Test
<script>
var worker = new Worker('worker.js');
worker.onmessage = function(event) {
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:%s/report_result?' + event.data);
xhr.send();
setTimeout(function() { window.close() }, 1000);
};
</script>
</body>
</html>
''' % self.port)
html_file.close()
for file_data in [1, 0]:
cmd = [EMCC, path_from_root('tests', 'hello_world_worker.cpp'), '-o', 'worker.js'] + (['--preload-file', 'file.dat'] if file_data else [])
print(cmd)
subprocess.check_call(cmd)
self.assertExists('worker.js')
self.run_browser('main.html', '', '/report_result?hello from worker, and :' + ('data for w' if file_data else '') + ':')
self.assertContained('you should not see this text when in a worker!', self.run_js('worker.js')) # code should run standalone too
@no_firefox('keeps sending OPTIONS requests, and eventually errors')
def test_chunked_synchronous_xhr(self):
main = 'chunked_sync_xhr.html'
worker_filename = "download_and_checksum_worker.js"
html_file = open(main, 'w')
html_file.write(r"""
<!doctype html>
<html>
<head><meta charset="utf-8"><title>Chunked XHR</title></head>
<html>
<body>
Chunked XHR Web Worker Test
<script>
var worker = new Worker(""" + json.dumps(worker_filename) + r""");
var buffer = [];
worker.onmessage = function(event) {
if (event.data.channel === "stdout") {
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:%s/report_result?' + event.data.line);
xhr.send();
setTimeout(function() { window.close() }, 1000);
} else {
if (event.data.trace) event.data.trace.split("\n").map(function(v) { console.error(v); });
if (event.data.line) {
console.error(event.data.line);
} else {
var v = event.data.char;
if (v == 10) {
var line = buffer.splice(0);
console.error(line = line.map(function(charCode){return String.fromCharCode(charCode);}).join(''));
} else {
buffer.push(v);
}
}
}
};
</script>
</body>
</html>
""" % self.port)
html_file.close()
c_source_filename = "checksummer.c"
prejs_filename = "worker_prejs.js"
prejs_file = open(prejs_filename, 'w')
prejs_file.write(r"""
if (typeof(Module) === "undefined") Module = {};
Module["arguments"] = ["/bigfile"];
Module["preInit"] = function() {
FS.createLazyFile('/', "bigfile", "http://localhost:11111/bogus_file_path", true, false);
};
var doTrace = true;
Module["print"] = function(s) { self.postMessage({channel: "stdout", line: s}); };
Module["printErr"] = function(s) { self.postMessage({channel: "stderr", char: s, trace: ((doTrace && s === 10) ? new Error().stack : null)}); doTrace = false; };
""")
prejs_file.close()
# vs. os.path.join(self.get_dir(), filename)
# vs. path_from_root('tests', 'hello_world_gles.c')
self.compile_btest([path_from_root('tests', c_source_filename), '-g', '-s', 'SMALL_XHR_CHUNKS=1', '-o', worker_filename,
'--pre-js', prejs_filename])
chunkSize = 1024
data = os.urandom(10 * chunkSize + 1) # 10 full chunks and one 1 byte chunk
checksum = zlib.adler32(data) & 0xffffffff # Python 2 compatibility: force bigint
server = multiprocessing.Process(target=test_chunked_synchronous_xhr_server, args=(True, chunkSize, data, checksum, self.port))
server.start()
# block until the server is actually ready
for i in range(60):
try:
urlopen('http://localhost:11111')
break
except Exception as e:
print('(sleep for server)')
time.sleep(1)
if i == 60:
raise e
try:
self.run_browser(main, 'Chunked binary synchronous XHR in Web Workers!', '/report_result?' + str(checksum))
finally:
server.terminate()
# Avoid race condition on cleanup, wait a bit so that processes have released file locks so that test tearDown won't
# attempt to rmdir() files in use.
if WINDOWS:
time.sleep(2)
@requires_graphics_hardware
def test_glgears(self, extra_args=[]):
self.btest('hello_world_gles.c', reference='gears.png', reference_slack=3,
args=['-DHAVE_BUILTIN_SINCOS', '-lGL', '-lglut'] + extra_args)
@requires_graphics_hardware
@requires_threads
def test_glgears_pthreads(self, extra_args=[]):
# test that a program that doesn't use pthreads still works with with pthreads enabled
# (regression test for https://github.com/emscripten-core/emscripten/pull/8059#issuecomment-488105672)
self.test_glgears(['-s', 'USE_PTHREADS=1'])
@requires_graphics_hardware
def test_glgears_long(self):
for proxy in [0, 1]:
print('proxy', proxy)
self.btest('hello_world_gles.c', expected=list(map(str, range(15, 500))), args=['-DHAVE_BUILTIN_SINCOS', '-DLONGTEST', '-lGL', '-lglut', '-DANIMATE'] + (['--proxy-to-worker'] if proxy else []))
@requires_graphics_hardware
def test_glgears_animation(self):
es2_suffix = ['', '_full', '_full_944']
for full_es2 in [0, 1, 2]:
print(full_es2)
self.compile_btest([path_from_root('tests', 'hello_world_gles%s.c' % es2_suffix[full_es2]), '-o', 'something.html',
'-DHAVE_BUILTIN_SINCOS', '-s', 'GL_TESTING=1', '-lGL', '-lglut',
'--shell-file', path_from_root('tests', 'hello_world_gles_shell.html')] +
(['-s', 'FULL_ES2=1'] if full_es2 else []))
self.run_browser('something.html', 'You should see animating gears.', '/report_gl_result?true')
@requires_graphics_hardware
def test_fulles2_sdlproc(self):
self.btest('full_es2_sdlproc.c', '1', args=['-s', 'GL_TESTING=1', '-DHAVE_BUILTIN_SINCOS', '-s', 'FULL_ES2=1', '-lGL', '-lSDL', '-lglut'])
@requires_graphics_hardware
def test_glgears_deriv(self):
self.btest('hello_world_gles_deriv.c', reference='gears.png', reference_slack=2,
args=['-DHAVE_BUILTIN_SINCOS', '-lGL', '-lglut'], outfile='something.html',
message='You should see animating gears.')
with open('something.html') as f:
assert 'gl-matrix' not in f.read(), 'Should not include glMatrix when not needed'
@requires_graphics_hardware
def test_glbook(self):
self.emcc_args.remove('-Werror')
self.emcc_args += ['-Wno-pointer-sign', '-Wno-int-conversion']
programs = self.get_library('glbook', [
os.path.join('Chapter_2', 'Hello_Triangle', 'CH02_HelloTriangle.o'),
os.path.join('Chapter_8', 'Simple_VertexShader', 'CH08_SimpleVertexShader.o'),
os.path.join('Chapter_9', 'Simple_Texture2D', 'CH09_SimpleTexture2D.o'),
os.path.join('Chapter_9', 'Simple_TextureCubemap', 'CH09_TextureCubemap.o'),
os.path.join('Chapter_9', 'TextureWrap', 'CH09_TextureWrap.o'),
os.path.join('Chapter_10', 'MultiTexture', 'CH10_MultiTexture.o'),
os.path.join('Chapter_13', 'ParticleSystem', 'CH13_ParticleSystem.o'),
], configure=None)
def book_path(*pathelems):
return path_from_root('tests', 'glbook', *pathelems)
for program in programs:
print(program)
basename = os.path.basename(program)
args = ['-lGL', '-lEGL', '-lX11']
if basename == 'CH10_MultiTexture.o':
shutil.copyfile(book_path('Chapter_10', 'MultiTexture', 'basemap.tga'), 'basemap.tga')
shutil.copyfile(book_path('Chapter_10', 'MultiTexture', 'lightmap.tga'), 'lightmap.tga')
args += ['--preload-file', 'basemap.tga', '--preload-file', 'lightmap.tga']
elif basename == 'CH13_ParticleSystem.o':
shutil.copyfile(book_path('Chapter_13', 'ParticleSystem', 'smoke.tga'), 'smoke.tga')
args += ['--preload-file', 'smoke.tga', '-O2'] # test optimizations and closure here as well for more coverage
self.btest(program,
reference=book_path(basename.replace('.o', '.png')),
args=args)
@requires_graphics_hardware
@parameterized({
'normal': (['-s', 'FULL_ES2=1'],),
# Enabling FULL_ES3 also enables ES2 automatically
'full_es3': (['-s', 'FULL_ES3=1'],)
})
def test_gles2_emulation(self, args):
print(args)
shutil.copyfile(path_from_root('tests', 'glbook', 'Chapter_10', 'MultiTexture', 'basemap.tga'), 'basemap.tga')
shutil.copyfile(path_from_root('tests', 'glbook', 'Chapter_10', 'MultiTexture', 'lightmap.tga'), 'lightmap.tga')
shutil.copyfile(path_from_root('tests', 'glbook', 'Chapter_13', 'ParticleSystem', 'smoke.tga'), 'smoke.tga')
for source, reference in [
(os.path.join('glbook', 'Chapter_2', 'Hello_Triangle', 'Hello_Triangle_orig.c'), path_from_root('tests', 'glbook', 'CH02_HelloTriangle.png')),
# (os.path.join('glbook', 'Chapter_8', 'Simple_VertexShader', 'Simple_VertexShader_orig.c'), path_from_root('tests', 'glbook', 'CH08_SimpleVertexShader.png')), # XXX needs INT extension in WebGL
(os.path.join('glbook', 'Chapter_9', 'TextureWrap', 'TextureWrap_orig.c'), path_from_root('tests', 'glbook', 'CH09_TextureWrap.png')),
# (os.path.join('glbook', 'Chapter_9', 'Simple_TextureCubemap', 'Simple_TextureCubemap_orig.c'), path_from_root('tests', 'glbook', 'CH09_TextureCubemap.png')), # XXX needs INT extension in WebGL
(os.path.join('glbook', 'Chapter_9', 'Simple_Texture2D', 'Simple_Texture2D_orig.c'), path_from_root('tests', 'glbook', 'CH09_SimpleTexture2D.png')),
(os.path.join('glbook', 'Chapter_10', 'MultiTexture', 'MultiTexture_orig.c'), path_from_root('tests', 'glbook', 'CH10_MultiTexture.png')),
(os.path.join('glbook', 'Chapter_13', 'ParticleSystem', 'ParticleSystem_orig.c'), path_from_root('tests', 'glbook', 'CH13_ParticleSystem.png')),
]:
print(source)
self.btest(source,
reference=reference,
args=['-I' + path_from_root('tests', 'glbook', 'Common'),
path_from_root('tests', 'glbook', 'Common', 'esUtil.c'),
path_from_root('tests', 'glbook', 'Common', 'esShader.c'),
path_from_root('tests', 'glbook', 'Common', 'esShapes.c'),
path_from_root('tests', 'glbook', 'Common', 'esTransform.c'),
'-lGL', '-lEGL', '-lX11',
'--preload-file', 'basemap.tga', '--preload-file', 'lightmap.tga', '--preload-file', 'smoke.tga'] + args)
@requires_graphics_hardware
def test_clientside_vertex_arrays_es3(self):
self.btest('clientside_vertex_arrays_es3.c', reference='gl_triangle.png', args=['-s', 'FULL_ES3=1', '-s', 'USE_GLFW=3', '-lglfw', '-lGLESv2'])
def test_emscripten_api(self):
self.btest('emscripten_api_browser.cpp', '1', args=['-s', '''EXPORTED_FUNCTIONS=['_main', '_third']''', '-lSDL'])
def test_emscripten_api2(self):
def setup():
create_test_file('script1.js', '''
Module._set(456);
''')
create_test_file('file1.txt', 'first')
create_test_file('file2.txt', 'second')
setup()
self.run_process([PYTHON, FILE_PACKAGER, 'test.data', '--preload', 'file1.txt', 'file2.txt'], stdout=open('script2.js', 'w'))
self.btest('emscripten_api_browser2.cpp', '1', args=['-s', '''EXPORTED_FUNCTIONS=['_main', '_set']''', '-s', 'FORCE_FILESYSTEM=1'])
# check using file packager to another dir
self.clear()
setup()
ensure_dir('sub')
self.run_process([PYTHON, FILE_PACKAGER, 'sub/test.data', '--preload', 'file1.txt', 'file2.txt'], stdout=open('script2.js', 'w'))
shutil.copyfile(os.path.join('sub', 'test.data'), 'test.data')
self.btest('emscripten_api_browser2.cpp', '1', args=['-s', '''EXPORTED_FUNCTIONS=['_main', '_set']''', '-s', 'FORCE_FILESYSTEM=1'])
def test_emscripten_api_infloop(self):
self.btest('emscripten_api_browser_infloop.cpp', '7')
def test_emscripten_fs_api(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png') # preloaded *after* run
self.btest('emscripten_fs_api_browser.cpp', '1', args=['-lSDL'])
def test_emscripten_fs_api2(self):
self.btest('emscripten_fs_api_browser2.cpp', '1', args=['-s', "ASSERTIONS=0"])
self.btest('emscripten_fs_api_browser2.cpp', '1', args=['-s', "ASSERTIONS=1"])
@requires_threads
def test_emscripten_main_loop(self):
for args in [[], ['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1', '-s', 'EXIT_RUNTIME=1']]:
self.btest('emscripten_main_loop.cpp', '0', args=args)
@requires_threads
def test_emscripten_main_loop_settimeout(self):
for args in [
[],
# test pthreads + AUTO_JS_LIBRARIES mode as well
['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1', '-s', 'AUTO_JS_LIBRARIES=0']
]:
self.btest('emscripten_main_loop_settimeout.cpp', '1', args=args)
@requires_threads
def test_emscripten_main_loop_and_blocker(self):
for args in [[], ['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1']]:
self.btest('emscripten_main_loop_and_blocker.cpp', '0', args=args)
@requires_threads
def test_emscripten_main_loop_setimmediate(self):
for args in [[], ['--proxy-to-worker'], ['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1']]:
self.btest('emscripten_main_loop_setimmediate.cpp', '1', args=args)
def test_fs_after_main(self):
for args in [[], ['-O1']]:
self.btest('fs_after_main.cpp', '0', args=args)
def test_sdl_quit(self):
self.btest('sdl_quit.c', '1', args=['-lSDL', '-lGL'])
def test_sdl_resize(self):
self.btest('sdl_resize.c', '1', args=['-lSDL', '-lGL'])
def test_glshaderinfo(self):
self.btest('glshaderinfo.cpp', '1', args=['-lGL', '-lglut'])
@requires_graphics_hardware
def test_glgetattachedshaders(self):
self.btest('glgetattachedshaders.c', '1', args=['-lGL', '-lEGL'])
# Covered by dEQP text suite (we can remove it later if we add coverage for that).
@requires_graphics_hardware
def test_glframebufferattachmentinfo(self):
self.btest('glframebufferattachmentinfo.c', '1', args=['-lGLESv2', '-lEGL'])
@requires_graphics_hardware
def test_sdlglshader(self):
self.btest('sdlglshader.c', reference='sdlglshader.png', args=['-O2', '--closure', '1', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_sdlglshader2(self):
self.btest('sdlglshader2.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_gl_glteximage(self):
self.btest('gl_teximage.c', '1', args=['-lGL', '-lSDL'])
@requires_graphics_hardware
@requires_threads
def test_gl_textures(self):
for args in [[], ['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1', '-s', 'OFFSCREEN_FRAMEBUFFER=1']]:
self.btest('gl_textures.cpp', '0', args=['-lGL'] + args)
@requires_graphics_hardware
def test_gl_ps(self):
# pointers and a shader
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('gl_ps.c', reference='gl_ps.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL', '--use-preload-plugins'], reference_slack=1)
@requires_graphics_hardware
def test_gl_ps_packed(self):
# packed data that needs to be strided
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('gl_ps_packed.c', reference='gl_ps.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL', '--use-preload-plugins'], reference_slack=1)
@requires_graphics_hardware
def test_gl_ps_strides(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('gl_ps_strides.c', reference='gl_ps_strides.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL', '--use-preload-plugins'])
@requires_graphics_hardware
def test_gl_ps_worker(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('gl_ps_worker.c', reference='gl_ps.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL', '--use-preload-plugins'], reference_slack=1, also_proxied=True)
@requires_graphics_hardware
def test_gl_renderers(self):
self.btest('gl_renderers.c', reference='gl_renderers.png', args=['-s', 'GL_UNSAFE_OPTS=0', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_gl_stride(self):
self.btest('gl_stride.c', reference='gl_stride.png', args=['-s', 'GL_UNSAFE_OPTS=0', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_gl_vertex_buffer_pre(self):
self.btest('gl_vertex_buffer_pre.c', reference='gl_vertex_buffer_pre.png', args=['-s', 'GL_UNSAFE_OPTS=0', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_gl_vertex_buffer(self):
self.btest('gl_vertex_buffer.c', reference='gl_vertex_buffer.png', args=['-s', 'GL_UNSAFE_OPTS=0', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'], reference_slack=1)
@requires_graphics_hardware
def test_gles2_uniform_arrays(self):
self.btest('gles2_uniform_arrays.cpp', args=['-s', 'GL_ASSERTIONS=1', '-lGL', '-lSDL'], expected=['1'], also_proxied=True)
@requires_graphics_hardware
def test_gles2_conformance(self):
self.btest('gles2_conformance.cpp', args=['-s', 'GL_ASSERTIONS=1', '-lGL', '-lSDL'], expected=['1'])
@requires_graphics_hardware
def test_matrix_identity(self):
self.btest('gl_matrix_identity.c', expected=['-1882984448', '460451840', '1588195328', '2411982848'], args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre(self):
self.btest('cubegeom_pre.c', reference='cubegeom_pre.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre_regal(self):
self.btest('cubegeom_pre.c', reference='cubegeom_pre.png', args=['-s', 'USE_REGAL=1', '-DUSE_REGAL', '-lGL', '-lSDL'])
@requires_graphics_hardware
@requires_sync_compilation
def test_cubegeom_pre_relocatable(self):
self.btest('cubegeom_pre.c', reference='cubegeom_pre.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL', '-s', 'RELOCATABLE=1'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre2(self):
self.btest('cubegeom_pre2.c', reference='cubegeom_pre2.png', args=['-s', 'GL_DEBUG=1', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL']) # some coverage for GL_DEBUG not breaking the build
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre3(self):
self.btest('cubegeom_pre3.c', reference='cubegeom_pre2.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@parameterized({
'': ([],),
'tracing': (['-sTRACE_WEBGL_CALLS'],),
})
@requires_graphics_hardware
def test_cubegeom(self, args):
# proxy only in the simple, normal case (we can't trace GL calls when
# proxied)
self.btest('cubegeom.c', reference='cubegeom.png', args=['-O2', '-g', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'] + args, also_proxied=not args)
@requires_graphics_hardware
def test_cubegeom_regal(self):
self.btest('cubegeom.c', reference='cubegeom.png', args=['-O2', '-g', '-DUSE_REGAL', '-s', 'USE_REGAL=1', '-lGL', '-lSDL'], also_proxied=True)
@requires_threads
@requires_graphics_hardware
def test_cubegeom_regal_mt(self):
self.btest('cubegeom.c', reference='cubegeom.png', args=['-O2', '-g', '-pthread', '-DUSE_REGAL', '-s', 'USE_PTHREADS=1', '-s', 'USE_REGAL=1', '-lGL', '-lSDL'], also_proxied=False)
@requires_graphics_hardware
def test_cubegeom_proc(self):
create_test_file('side.c', r'''
extern void* SDL_GL_GetProcAddress(const char *);
void *glBindBuffer = 0; // same name as the gl function, to check that the collision does not break us
void *getBindBuffer() {
if (!glBindBuffer) glBindBuffer = SDL_GL_GetProcAddress("glBindBuffer");
return glBindBuffer;
}
''')
# also test -Os in wasm, which uses meta-dce, which should not break legacy gl emulation hacks
for opts in [[], ['-O1'], ['-Os']]:
self.btest('cubegeom_proc.c', reference='cubegeom.png', args=opts + ['side.c', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_glew(self):
self.btest('cubegeom_glew.c', reference='cubegeom.png', args=['-O2', '--closure', '1', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lGLEW', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_color(self):
self.btest('cubegeom_color.c', reference='cubegeom_color.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_normal(self):
self.btest('cubegeom_normal.c', reference='cubegeom_normal.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_cubegeom_normal_dap(self): # draw is given a direct pointer to clientside memory, no element array buffer
self.btest('cubegeom_normal_dap.c', reference='cubegeom_normal.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_cubegeom_normal_dap_far(self): # indices do nto start from 0
self.btest('cubegeom_normal_dap_far.c', reference='cubegeom_normal.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_normal_dap_far_range(self): # glDrawRangeElements
self.btest('cubegeom_normal_dap_far_range.c', reference='cubegeom_normal.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_normal_dap_far_glda(self): # use glDrawArrays
self.btest('cubegeom_normal_dap_far_glda.c', reference='cubegeom_normal_dap_far_glda.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_firefox('fails on CI but works locally')
def test_cubegeom_normal_dap_far_glda_quad(self): # with quad
self.btest('cubegeom_normal_dap_far_glda_quad.c', reference='cubegeom_normal_dap_far_glda_quad.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_mt(self):
self.btest('cubegeom_mt.c', reference='cubegeom_mt.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL']) # multitexture
@requires_graphics_hardware
def test_cubegeom_color2(self):
self.btest('cubegeom_color2.c', reference='cubegeom_color2.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_cubegeom_texturematrix(self):
self.btest('cubegeom_texturematrix.c', reference='cubegeom_texturematrix.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_fog(self):
self.btest('cubegeom_fog.c', reference='cubegeom_fog.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre_vao(self):
self.btest('cubegeom_pre_vao.c', reference='cubegeom_pre_vao.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre_vao_regal(self):
self.btest('cubegeom_pre_vao.c', reference='cubegeom_pre_vao.png', args=['-s', 'USE_REGAL=1', '-DUSE_REGAL', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre2_vao(self):
self.btest('cubegeom_pre2_vao.c', reference='cubegeom_pre_vao.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_pre2_vao2(self):
self.btest('cubegeom_pre2_vao2.c', reference='cubegeom_pre2_vao2.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre_vao_es(self):
self.btest('cubegeom_pre_vao_es.c', reference='cubegeom_pre_vao.png', args=['-s', 'FULL_ES2=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_u4fv_2(self):
self.btest('cubegeom_u4fv_2.c', reference='cubegeom_u4fv_2.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cube_explosion(self):
self.btest('cube_explosion.c', reference='cube_explosion.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_glgettexenv(self):
self.btest('glgettexenv.c', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'], expected=['1'])
def test_sdl_canvas_blank(self):
self.btest('sdl_canvas_blank.c', args=['-lSDL', '-lGL'], reference='sdl_canvas_blank.png')
def test_sdl_canvas_palette(self):
self.btest('sdl_canvas_palette.c', args=['-lSDL', '-lGL'], reference='sdl_canvas_palette.png')
def test_sdl_canvas_twice(self):
self.btest('sdl_canvas_twice.c', args=['-lSDL', '-lGL'], reference='sdl_canvas_twice.png')
def test_sdl_set_clip_rect(self):
self.btest('sdl_set_clip_rect.c', args=['-lSDL', '-lGL'], reference='sdl_set_clip_rect.png')
def test_sdl_maprgba(self):
self.btest('sdl_maprgba.c', args=['-lSDL', '-lGL'], reference='sdl_maprgba.png', reference_slack=3)
def test_sdl_create_rgb_surface_from(self):
self.btest('sdl_create_rgb_surface_from.c', args=['-lSDL', '-lGL'], reference='sdl_create_rgb_surface_from.png')
def test_sdl_rotozoom(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_rotozoom.c', reference='sdl_rotozoom.png', args=['--preload-file', 'screenshot.png', '--use-preload-plugins', '-lSDL', '-lGL'], reference_slack=3)
def test_sdl_gfx_primitives(self):
self.btest('sdl_gfx_primitives.c', args=['-lSDL', '-lGL'], reference='sdl_gfx_primitives.png', reference_slack=1)
def test_sdl_canvas_palette_2(self):
create_test_file('pre.js', '''
Module['preRun'].push(function() {
SDL.defaults.copyOnLock = false;
});
''')
create_test_file('args-r.js', '''
Module['arguments'] = ['-r'];
''')
create_test_file('args-g.js', '''
Module['arguments'] = ['-g'];
''')
create_test_file('args-b.js', '''
Module['arguments'] = ['-b'];
''')
self.btest('sdl_canvas_palette_2.c', reference='sdl_canvas_palette_r.png', args=['--pre-js', 'pre.js', '--pre-js', 'args-r.js', '-lSDL', '-lGL'])
self.btest('sdl_canvas_palette_2.c', reference='sdl_canvas_palette_g.png', args=['--pre-js', 'pre.js', '--pre-js', 'args-g.js', '-lSDL', '-lGL'])
self.btest('sdl_canvas_palette_2.c', reference='sdl_canvas_palette_b.png', args=['--pre-js', 'pre.js', '--pre-js', 'args-b.js', '-lSDL', '-lGL'])
def test_sdl_ttf_render_text_solid(self):
self.btest('sdl_ttf_render_text_solid.c', reference='sdl_ttf_render_text_solid.png', args=['-O2', '-s', 'INITIAL_MEMORY=16MB', '-lSDL', '-lGL'])
def test_sdl_alloctext(self):
self.btest('sdl_alloctext.c', expected='1', args=['-O2', '-s', 'INITIAL_MEMORY=16MB', '-lSDL', '-lGL'])
def test_sdl_surface_refcount(self):
self.btest('sdl_surface_refcount.c', args=['-lSDL'], expected='1')
def test_sdl_free_screen(self):
self.btest('sdl_free_screen.cpp', args=['-lSDL', '-lGL'], reference='htmltest.png')
@requires_graphics_hardware
def test_glbegin_points(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('glbegin_points.c', reference='glbegin_points.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL', '--use-preload-plugins'])
@requires_graphics_hardware
def test_s3tc(self):
shutil.copyfile(path_from_root('tests', 'screenshot.dds'), 'screenshot.dds')
self.btest('s3tc.c', reference='s3tc.png', args=['--preload-file', 'screenshot.dds', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_s3tc_ffp_only(self):
shutil.copyfile(path_from_root('tests', 'screenshot.dds'), 'screenshot.dds')
self.btest('s3tc.c', reference='s3tc.png', args=['--preload-file', 'screenshot.dds', '-s', 'LEGACY_GL_EMULATION=1', '-s', 'GL_FFP_ONLY=1', '-lGL', '-lSDL'])
@no_chrome('see #7117')
@requires_graphics_hardware
def test_aniso(self):
shutil.copyfile(path_from_root('tests', 'water.dds'), 'water.dds')
self.btest('aniso.c', reference='aniso.png', reference_slack=2, args=['--preload-file', 'water.dds', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL', '-Wno-incompatible-pointer-types'])
@requires_graphics_hardware
def test_tex_nonbyte(self):
self.btest('tex_nonbyte.c', reference='tex_nonbyte.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_float_tex(self):
self.btest('float_tex.cpp', reference='float_tex.png', args=['-lGL', '-lglut'])
@requires_graphics_hardware
def test_subdata(self):
self.btest('gl_subdata.cpp', reference='float_tex.png', args=['-lGL', '-lglut'])
@requires_graphics_hardware
def test_perspective(self):
self.btest('perspective.c', reference='perspective.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_glerror(self):
self.btest('gl_error.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL'])
def test_openal_error(self):
for args in [
[],
['-lopenal', '-s', 'STRICT'],
['--closure', '1']
]:
print(args)
self.btest('openal_error.c', expected='1', args=args)
def test_openal_capture_sanity(self):
self.btest('openal_capture_sanity.c', expected='0')
def test_runtimelink(self):
main, supp = self.setup_runtimelink_test()
create_test_file('supp.cpp', supp)
self.compile_btest(['supp.cpp', '-o', 'supp.wasm', '-s', 'SIDE_MODULE=1', '-O2', '-s', 'EXPORT_ALL=1'])
self.btest(main, args=['-DBROWSER=1', '-s', 'MAIN_MODULE=1', '-O2', '-s', 'RUNTIME_LINKED_LIBS=["supp.wasm"]', '-s', 'EXPORT_ALL=1'], expected='76')
def test_pre_run_deps(self):
# Adding a dependency in preRun will delay run
create_test_file('pre.js', '''
Module.preRun = function() {
addRunDependency();
out('preRun called, added a dependency...');
setTimeout(function() {
Module.okk = 10;
removeRunDependency()
}, 2000);
};
''')
for mem in [0, 1]:
self.btest('pre_run_deps.cpp', expected='10', args=['--pre-js', 'pre.js', '--memory-init-file', str(mem)])
@no_wasm_backend('mem init file')
def test_mem_init(self):
create_test_file('pre.js', '''
function myJSCallback() { // called from main()
Module._note(1);
}
Module.preRun = function() {
addOnPreMain(function() {
Module._note(2);
});
};
''')
create_test_file('post.js', '''
var assert = function(check, text) {
if (!check) {
console.log('assert failed: ' + text);
maybeReportResultToServer(9);
}
}
Module._note(4); // this happens too early! and is overwritten when the mem init arrives
''')
# with assertions, we notice when memory was written to too early
self.btest('mem_init.cpp', expected='9', args=['-s', 'WASM=0', '--pre-js', 'pre.js', '--post-js', 'post.js', '--memory-init-file', '1'])
# otherwise, we just overwrite
self.btest('mem_init.cpp', expected='3', args=['-s', 'WASM=0', '--pre-js', 'pre.js', '--post-js', 'post.js', '--memory-init-file', '1', '-s', 'ASSERTIONS=0'])
@no_wasm_backend('mem init file')
def test_mem_init_request(self):
def test(what, status):
print(what, status)
create_test_file('pre.js', '''
var xhr = Module.memoryInitializerRequest = new XMLHttpRequest();
xhr.open('GET', "''' + what + '''", true);
xhr.responseType = 'arraybuffer';
xhr.send(null);
console.warn = function(x) {
if (x.indexOf('a problem seems to have happened with Module.memoryInitializerRequest') >= 0) {
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:%s/report_result?0');
setTimeout(xhr.onload = function() {
console.log('close!');
window.close();
}, 1000);
xhr.send();
throw 'halt';
}
console.log('WARNING: ' + x);
};
''' % self.port)
self.btest('mem_init_request.cpp', expected=status, args=['-s', 'WASM=0', '--pre-js', 'pre.js', '--memory-init-file', '1'])
test('test.html.mem', '1')
test('nothing.nowhere', '0')
def test_runtime_misuse(self):
post_prep = '''
var expected_ok = false;
function doCcall(n) {
ccall('note', 'string', ['number'], [n]);
}
var wrapped = cwrap('note', 'string', ['number']); // returns a string to suppress cwrap optimization
function doCwrapCall(n) {
var str = wrapped(n);
out('got ' + str);
assert(str === 'silly-string');
}
function doDirectCall(n) {
Module['_note'](n);
}
'''
post_test = '''
var ok = false;
try {
doCcall(1);
ok = true; // should fail and not reach here, runtime is not ready yet so ccall will abort
} catch(e) {
out('expected fail 1');
assert(e.toString().indexOf('assert') >= 0); // assertion, not something else
ABORT = false; // hackish
}
assert(ok === expected_ok);
ok = false;
try {
doCwrapCall(2);
ok = true; // should fail and not reach here, runtime is not ready yet so cwrap call will abort
} catch(e) {
out('expected fail 2');
assert(e.toString().indexOf('assert') >= 0); // assertion, not something else
ABORT = false; // hackish
}
assert(ok === expected_ok);
ok = false;
try {
doDirectCall(3);
ok = true; // should fail and not reach here, runtime is not ready yet so any code execution
} catch(e) {
out('expected fail 3');
assert(e.toString().indexOf('assert') >= 0); // assertion, not something else
ABORT = false; // hackish
}
assert(ok === expected_ok);
'''
post_hook = r'''
function myJSCallback() {
// Run on the next event loop, as code may run in a postRun right after main().
setTimeout(function() {
var xhr = new XMLHttpRequest();
assert(Module.noted);
xhr.open('GET', 'http://localhost:%s/report_result?' + HEAP32[Module.noted>>2]);
xhr.send();
setTimeout(function() { window.close() }, 1000);
}, 0);
// called from main, this is an ok time
doCcall(100);
doCwrapCall(200);
doDirectCall(300);
}
''' % self.port
create_test_file('pre_runtime.js', r'''
Module.onRuntimeInitialized = function(){
myJSCallback();
};
''')
for filename, extra_args, second_code in [
('runtime_misuse.cpp', [], 600),
('runtime_misuse_2.cpp', ['--pre-js', 'pre_runtime.js'], 601) # 601, because no main means we *do* run another call after exit()
]:
for mode in [[], ['-s', 'WASM=0']]:
print('\n', filename, extra_args, mode)
print('mem init, so async, call too early')
create_test_file('post.js', post_prep + post_test + post_hook)
self.btest(filename, expected='600', args=['--post-js', 'post.js', '--memory-init-file', '1', '-s', 'EXIT_RUNTIME=1'] + extra_args + mode)
print('sync startup, call too late')
create_test_file('post.js', post_prep + 'Module.postRun.push(function() { ' + post_test + ' });' + post_hook)
self.btest(filename, expected=str(second_code), args=['--post-js', 'post.js', '-s', 'EXIT_RUNTIME=1'] + extra_args + mode)
print('sync, runtime still alive, so all good')
create_test_file('post.js', post_prep + 'expected_ok = true; Module.postRun.push(function() { ' + post_test + ' });' + post_hook)
self.btest(filename, expected='606', args=['--post-js', 'post.js'] + extra_args + mode)
def test_cwrap_early(self):
self.btest(os.path.join('browser', 'cwrap_early.cpp'), args=['-O2', '-s', 'ASSERTIONS=1', '--pre-js', path_from_root('tests', 'browser', 'cwrap_early.js'), '-s', 'EXTRA_EXPORTED_RUNTIME_METHODS=["cwrap"]'], expected='0')
def test_worker_api(self):
self.compile_btest([path_from_root('tests', 'worker_api_worker.cpp'), '-o', 'worker.js', '-s', 'BUILD_AS_WORKER=1', '-s', 'EXPORTED_FUNCTIONS=["_one"]'])
self.btest('worker_api_main.cpp', expected='566')
def test_worker_api_2(self):
self.compile_btest([path_from_root('tests', 'worker_api_2_worker.cpp'), '-o', 'worker.js', '-s', 'BUILD_AS_WORKER=1', '-O2', '--minify', '0', '-s', 'EXPORTED_FUNCTIONS=["_one", "_two", "_three", "_four"]', '--closure', '1'])
self.btest('worker_api_2_main.cpp', args=['-O2', '--minify', '0'], expected='11')
def test_worker_api_3(self):
self.compile_btest([path_from_root('tests', 'worker_api_3_worker.cpp'), '-o', 'worker.js', '-s', 'BUILD_AS_WORKER=1', '-s', 'EXPORTED_FUNCTIONS=["_one"]'])
self.btest('worker_api_3_main.cpp', expected='5')
def test_worker_api_sleep(self):
self.compile_btest([path_from_root('tests', 'worker_api_worker_sleep.cpp'), '-o', 'worker.js', '-s', 'BUILD_AS_WORKER=1', '-s', 'EXPORTED_FUNCTIONS=["_one"]'] + self.get_async_args())
self.btest('worker_api_main.cpp', expected='566')
def test_emscripten_async_wget2(self):
self.btest('http.cpp', expected='0', args=['-I' + path_from_root('tests')])
# TODO: test only worked in non-fastcomp
@unittest.skip('non-fastcomp is deprecated and fails in 3.5')
def test_module(self):
self.compile_btest([path_from_root('tests', 'browser_module.cpp'), '-o', 'module.js', '-O2', '-s', 'SIDE_MODULE=1', '-s', 'DLOPEN_SUPPORT=1', '-s', 'EXPORTED_FUNCTIONS=["_one", "_two"]'])
self.btest('browser_main.cpp', args=['-O2', '-s', 'MAIN_MODULE=1', '-s', 'DLOPEN_SUPPORT=1', '-s', 'EXPORT_ALL=1'], expected='8')
def test_preload_module(self):
create_test_file('library.c', r'''
#include <stdio.h>
int library_func() {
return 42;
}
''')
self.compile_btest(['library.c', '-s', 'SIDE_MODULE=1', '-O2', '-o', 'library.wasm', '-s', 'EXPORT_ALL=1'])
os.rename('library.wasm', 'library.so')
main = r'''
#include <dlfcn.h>
#include <stdio.h>
#include <emscripten.h>
int main() {
int found = EM_ASM_INT(
return Module['preloadedWasm']['/library.so'] !== undefined;
);
if (!found) {
REPORT_RESULT(1);
return 1;
}
void *lib_handle = dlopen("/library.so", RTLD_NOW);
if (!lib_handle) {
REPORT_RESULT(2);
return 2;
}
typedef int (*voidfunc)();
voidfunc x = (voidfunc)dlsym(lib_handle, "library_func");
if (!x || x() != 42) {
REPORT_RESULT(3);
return 3;
}
REPORT_RESULT(0);
return 0;
}
'''
self.btest(
main,
args=['-s', 'MAIN_MODULE=1', '--preload-file', '.@/', '-O2', '--use-preload-plugins', '-s', 'EXPORT_ALL=1'],
expected='0')
def test_mmap_file(self):
create_test_file('data.dat', 'data from the file ' + ('.' * 9000))
self.btest(path_from_root('tests', 'mmap_file.c'), expected='1', args=['--preload-file', 'data.dat'])
def test_emrun_info(self):
if not has_browser():
self.skipTest('need a browser')
result = self.run_process([path_from_root('emrun'), '--system_info', '--browser_info'], stdout=PIPE).stdout
assert 'CPU' in result
assert 'Browser' in result
assert 'Traceback' not in result
result = self.run_process([path_from_root('emrun'), '--list_browsers'], stdout=PIPE).stdout
assert 'Traceback' not in result
# Deliberately named as test_zzz_emrun to make this test the last one
# as this test may take the focus away from the main test window
# by opening a new window and possibly not closing it.
def test_zzz_emrun(self):
self.compile_btest([path_from_root('tests', 'test_emrun.c'), '--emrun', '-o', 'hello_world.html'])
if not has_browser():
self.skipTest('need a browser')
# We cannot run emrun from the temp directory the suite will clean up afterwards, since the
# browser that is launched will have that directory as startup directory, and the browser will
# not close as part of the test, pinning down the cwd on Windows and it wouldn't be possible to
# delete it. Therefore switch away from that directory before launching.
os.chdir(path_from_root())
args_base = [path_from_root('emrun'), '--timeout', '30', '--safe_firefox_profile',
'--kill_exit', '--port', '6939', '--verbose',
'--log_stdout', self.in_dir('stdout.txt'),
'--log_stderr', self.in_dir('stderr.txt')]
# Verify that trying to pass argument to the page without the `--` separator will
# generate an actionable error message
err = self.expect_fail(args_base + ['--foo'])
self.assertContained('error: unrecognized arguments: --foo', err)
self.assertContained('remember to add `--` between arguments', err)
if EMTEST_BROWSER is not None:
# If EMTEST_BROWSER carried command line arguments to pass to the browser,
# (e.g. "firefox -profile /path/to/foo") those can't be passed via emrun,
# so strip them out.
browser_cmd = shlex.split(EMTEST_BROWSER)
browser_path = browser_cmd[0]
args_base += ['--browser', browser_path]
if len(browser_cmd) > 1:
browser_args = browser_cmd[1:]
if 'firefox' in browser_path and ('-profile' in browser_args or '--profile' in browser_args):
# emrun uses its own -profile, strip it out
parser = argparse.ArgumentParser(add_help=False) # otherwise it throws with -headless
parser.add_argument('-profile')
parser.add_argument('--profile')
browser_args = parser.parse_known_args(browser_args)[1]
if browser_args:
args_base += ['--browser_args', ' ' + ' '.join(browser_args)]
for args in [
args_base,
args_base + ['--private_browsing', '--port', '6941']
]:
args += [self.in_dir('hello_world.html'), '--', '1', '2', '--3']
print(shared.shlex_join(args))
proc = self.run_process(args, check=False)
self.assertEqual(proc.returncode, 100)
stdout = open(self.in_dir('stdout.txt'), 'r').read()
stderr = open(self.in_dir('stderr.txt'), 'r').read()
self.assertContained('argc: 4', stdout)
self.assertContained('argv[3]: --3', stdout)
self.assertContained('hello, world!', stdout)
self.assertContained('Testing ASCII characters: !"$%&\'()*+,-./:;<=>?@[\\]^_`{|}~', stdout)
self.assertContained('Testing char sequences: %20%21 ä', stdout)
self.assertContained('hello, error stream!', stderr)
# This does not actually verify anything except that --cpuprofiler and --memoryprofiler compiles.
# Run interactive.test_cpuprofiler_memoryprofiler for interactive testing.
@requires_graphics_hardware
def test_cpuprofiler_memoryprofiler(self):
self.btest('hello_world_gles.c', expected='0', args=['-DLONGTEST=1', '-DTEST_MEMORYPROFILER_ALLOCATIONS_MAP=1', '-O2', '--cpuprofiler', '--memoryprofiler', '-lGL', '-lglut', '-DANIMATE'])
def test_uuid(self):
# Run with ./runner.py browser.test_uuid
# We run this test in Node/SPIDERMONKEY and browser environments because we try to make use of
# high quality crypto random number generators such as crypto.getRandomValues or randomBytes (if available).
# First run tests in Node and/or SPIDERMONKEY using self.run_js. Use closure compiler so we can check that
# require('crypto').randomBytes and window.crypto.getRandomValues doesn't get minified out.
self.compile_btest(['-O2', '--closure', '1', path_from_root('tests', 'uuid', 'test.c'), '-o', 'test.js', '-luuid'])
test_js_closure = open('test.js').read()
# Check that test.js compiled with --closure 1 contains ").randomBytes" and "window.crypto.getRandomValues"
assert ").randomBytes" in test_js_closure
assert "window.crypto.getRandomValues" in test_js_closure
out = self.run_js('test.js')
print(out)
# Tidy up files that might have been created by this test.
try_delete(path_from_root('tests', 'uuid', 'test.js'))
try_delete(path_from_root('tests', 'uuid', 'test.js.map'))
# Now run test in browser
self.btest(path_from_root('tests', 'uuid', 'test.c'), '1', args=['-luuid'])
@requires_graphics_hardware
def test_glew(self):
self.btest(path_from_root('tests', 'glew.c'), args=['-lGL', '-lSDL', '-lGLEW'], expected='1')
self.btest(path_from_root('tests', 'glew.c'), args=['-lGL', '-lSDL', '-lGLEW', '-s', 'LEGACY_GL_EMULATION=1'], expected='1')
self.btest(path_from_root('tests', 'glew.c'), args=['-lGL', '-lSDL', '-lGLEW', '-DGLEW_MX'], expected='1')
self.btest(path_from_root('tests', 'glew.c'), args=['-lGL', '-lSDL', '-lGLEW', '-s', 'LEGACY_GL_EMULATION=1', '-DGLEW_MX'], expected='1')
def test_doublestart_bug(self):
create_test_file('pre.js', r'''
if (!Module['preRun']) Module['preRun'] = [];
Module["preRun"].push(function () {
addRunDependency('test_run_dependency');
removeRunDependency('test_run_dependency');
});
''')
self.btest('doublestart.c', args=['--pre-js', 'pre.js', '-o', 'test.html'], expected='1')
@requires_threads
def test_html5(self):
for opts in [[], ['-O2', '-g1', '--closure', '1', '-s', 'HTML5_SUPPORT_DEFERRING_USER_SENSITIVE_REQUESTS=0'], ['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1'], ['-s', 'MIN_FIREFOX_VERSION=0', '-s', 'MIN_SAFARI_VERSION=0', '-s', 'MIN_IE_VERSION=0', '-s', 'MIN_EDGE_VERSION=0', '-s', 'MIN_CHROME_VERSION=0']]:
print(opts)
self.btest(path_from_root('tests', 'test_html5.c'), args=[] + opts, expected='0')
@requires_threads
def test_html5_gamepad(self):
for opts in [[], ['-O2', '-g1', '--closure', '1'], ['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1']]:
print(opts)
self.btest(path_from_root('tests', 'test_gamepad.c'), args=[] + opts, expected='0')
@requires_graphics_hardware
def test_html5_webgl_create_context_no_antialias(self):
for opts in [[], ['-O2', '-g1', '--closure', '1'], ['-s', 'FULL_ES2=1']]:
print(opts)
self.btest(path_from_root('tests', 'webgl_create_context.cpp'), args=opts + ['-DNO_ANTIALIAS', '-lGL'], expected='0')
# This test supersedes the one above, but it's skipped in the CI because anti-aliasing is not well supported by the Mesa software renderer.
@requires_threads
@requires_graphics_hardware
def test_html5_webgl_create_context(self):
for opts in [[], ['-O2', '-g1', '--closure', '1'], ['-s', 'FULL_ES2=1'], ['-s', 'USE_PTHREADS=1']]:
print(opts)
self.btest(path_from_root('tests', 'webgl_create_context.cpp'), args=opts + ['-lGL'], expected='0')
@requires_graphics_hardware
# Verify bug https://github.com/emscripten-core/emscripten/issues/4556: creating a WebGL context to Module.canvas without an ID explicitly assigned to it.
def test_html5_webgl_create_context2(self):
self.btest(path_from_root('tests', 'webgl_create_context2.cpp'), expected='0')
@requires_graphics_hardware
# Verify bug https://github.com/emscripten-core/emscripten/issues/4556: creating a WebGL context to Module.canvas without an ID explicitly assigned to it.
# (this only makes sense in the old deprecated -s DISABLE_DEPRECATED_FIND_EVENT_TARGET_BEHAVIOR=0 mode)
def test_html5_special_event_targets(self):
self.btest(path_from_root('tests', 'browser', 'html5_special_event_targets.cpp'), args=['-lGL'], expected='0')
@requires_graphics_hardware
def test_html5_webgl_destroy_context(self):
for opts in [[], ['-O2', '-g1'], ['-s', 'FULL_ES2=1']]:
print(opts)
self.btest(path_from_root('tests', 'webgl_destroy_context.cpp'), args=opts + ['--shell-file', path_from_root('tests/webgl_destroy_context_shell.html'), '-lGL'], expected='0')
@no_chrome('see #7373')
@requires_graphics_hardware
def test_webgl_context_params(self):
if WINDOWS:
self.skipTest('SKIPPED due to bug https://bugzilla.mozilla.org/show_bug.cgi?id=1310005 - WebGL implementation advertises implementation defined GL_IMPLEMENTATION_COLOR_READ_TYPE/FORMAT pair that it cannot read with')
self.btest(path_from_root('tests', 'webgl_color_buffer_readpixels.cpp'), args=['-lGL'], expected='0')
# Test for PR#5373 (https://github.com/emscripten-core/emscripten/pull/5373)
def test_webgl_shader_source_length(self):
for opts in [[], ['-s', 'FULL_ES2=1']]:
print(opts)
self.btest(path_from_root('tests', 'webgl_shader_source_length.cpp'), args=opts + ['-lGL'], expected='0')
def test_webgl2(self):
for opts in [
['-s', 'MIN_CHROME_VERSION=0'],
['-O2', '-g1', '--closure', '1', '-s', 'WORKAROUND_OLD_WEBGL_UNIFORM_UPLOAD_IGNORED_OFFSET_BUG=1'],
['-s', 'FULL_ES2=1'],
]:
print(opts)
self.btest(path_from_root('tests', 'webgl2.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL'] + opts, expected='0')
@requires_graphics_hardware
@requires_threads
def test_webgl2_pthreads(self):
# test that a program can be compiled with pthreads and render WebGL2 properly on the main thread
# (the testcase doesn't even use threads, but is compiled with thread support).
self.btest(path_from_root('tests', 'webgl2.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL', '-s', 'USE_PTHREADS=1'], expected='0')
def test_webgl2_objects(self):
self.btest(path_from_root('tests', 'webgl2_objects.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL'], expected='0')
def test_html5_webgl_api(self):
for mode in [['-s', 'OFFSCREENCANVAS_SUPPORT=1', '-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1'],
['-s', 'OFFSCREEN_FRAMEBUFFER=1', '-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1'],
[]]:
self.btest(path_from_root('tests', 'html5_webgl.c'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL'] + mode, expected='0')
def test_webgl2_ubos(self):
self.btest(path_from_root('tests', 'webgl2_ubos.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL'], expected='0')
@requires_graphics_hardware
def test_webgl2_garbage_free_entrypoints(self):
self.btest(path_from_root('tests', 'webgl2_garbage_free_entrypoints.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2', '-DTEST_WEBGL2=1'], expected='1')
self.btest(path_from_root('tests', 'webgl2_garbage_free_entrypoints.cpp'), expected='1')
@requires_graphics_hardware
def test_webgl2_backwards_compatibility_emulation(self):
self.btest(path_from_root('tests', 'webgl2_backwards_compatibility_emulation.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2', '-s', 'WEBGL2_BACKWARDS_COMPATIBILITY_EMULATION=1'], expected='0')
@requires_graphics_hardware
def test_webgl2_runtime_no_context(self):
# tests that if we support WebGL1 and 2, and WebGL2RenderingContext exists,
# but context creation fails, that we can then manually try to create a
# WebGL1 context and succeed.
self.btest(path_from_root('tests', 'test_webgl2_runtime_no_context.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2'], expected='1')
@requires_graphics_hardware
def test_webgl2_invalid_teximage2d_type(self):
self.btest(path_from_root('tests', 'webgl2_invalid_teximage2d_type.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2'], expected='0')
@requires_graphics_hardware
def test_webgl_with_closure(self):
self.btest(path_from_root('tests', 'webgl_with_closure.cpp'), args=['-O2', '-s', 'MAX_WEBGL_VERSION=2', '--closure', '1', '-lGL'], expected='0')
# Tests that -s GL_ASSERTIONS=1 and glVertexAttribPointer with packed types works
@requires_graphics_hardware
def test_webgl2_packed_types(self):
self.btest(path_from_root('tests', 'webgl2_draw_packed_triangle.c'), args=['-lGL', '-s', 'MAX_WEBGL_VERSION=2', '-s', 'GL_ASSERTIONS=1'], expected='0')
@requires_graphics_hardware
def test_webgl2_pbo(self):
self.btest(path_from_root('tests', 'webgl2_pbo.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL'], expected='0')
@no_firefox('fails on CI likely due to GPU drivers there')
@requires_graphics_hardware
def test_webgl2_sokol_mipmap(self):
self.btest(path_from_root('tests', 'third_party', 'sokol', 'mipmap-emsc.c'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL', '-O1'],
reference=os.path.join('third_party', 'sokol', 'mipmap-emsc.png'), reference_slack=2)
@no_firefox('fails on CI likely due to GPU drivers there')
@requires_graphics_hardware
def test_webgl2_sokol_mrt(self):
self.btest(path_from_root('tests', 'third_party', 'sokol', 'mrt-emcc.c'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL'],
reference=os.path.join('third_party', 'sokol', 'mrt-emcc.png'))
@requires_graphics_hardware
def test_webgl2_sokol_arraytex(self):
self.btest(path_from_root('tests', 'third_party', 'sokol', 'arraytex-emsc.c'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL'],
reference=os.path.join('third_party', 'sokol', 'arraytex-emsc.png'))
def test_sdl_touch(self):
for opts in [[], ['-O2', '-g1', '--closure', '1']]:
print(opts)
self.btest(path_from_root('tests', 'sdl_touch.c'), args=opts + ['-DAUTOMATE_SUCCESS=1', '-lSDL', '-lGL'], expected='0')
def test_html5_mouse(self):
for opts in [[], ['-O2', '-g1', '--closure', '1']]:
print(opts)
self.btest(path_from_root('tests', 'test_html5_mouse.c'), args=opts + ['-DAUTOMATE_SUCCESS=1'], expected='0')
def test_sdl_mousewheel(self):
for opts in [[], ['-O2', '-g1', '--closure', '1']]:
print(opts)
self.btest(path_from_root('tests', 'test_sdl_mousewheel.c'), args=opts + ['-DAUTOMATE_SUCCESS=1', '-lSDL', '-lGL'], expected='0')
def test_wget(self):
create_test_file('test.txt', 'emscripten')
self.btest(path_from_root('tests', 'test_wget.c'), expected='1', args=self.get_async_args())
def test_wget_data(self):
create_test_file('test.txt', 'emscripten')
self.btest(path_from_root('tests', 'test_wget_data.c'), expected='1', args=['-O2', '-g2'] + self.get_async_args())
def test_locate_file(self):
for wasm in [0, 1]:
print('wasm', wasm)
self.clear()
create_test_file('src.cpp', self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <assert.h>
int main() {
FILE *f = fopen("data.txt", "r");
assert(f && "could not open file");
char buf[100];
int num = fread(buf, 1, 20, f);
assert(num == 20 && "could not read 20 bytes");
buf[20] = 0;
fclose(f);
int result = !strcmp("load me right before", buf);
printf("|%s| : %d\n", buf, result);
REPORT_RESULT(result);
return 0;
}
'''))
create_test_file('data.txt', 'load me right before...')
create_test_file('pre.js', 'Module.locateFile = function(x) { return "sub/" + x };')
self.run_process([PYTHON, FILE_PACKAGER, 'test.data', '--preload', 'data.txt'], stdout=open('data.js', 'w'))
# put pre.js first, then the file packager data, so locateFile is there for the file loading code
self.compile_btest(['src.cpp', '-O2', '-g', '--pre-js', 'pre.js', '--pre-js', 'data.js', '-o', 'page.html', '-s', 'FORCE_FILESYSTEM=1', '-s', 'WASM=' + str(wasm)])
ensure_dir('sub')
if wasm:
shutil.move('page.wasm', os.path.join('sub', 'page.wasm'))
else:
shutil.move('page.html.mem', os.path.join('sub', 'page.html.mem'))
shutil.move('test.data', os.path.join('sub', 'test.data'))
self.run_browser('page.html', None, '/report_result?1')
# alternatively, put locateFile in the HTML
print('in html')
create_test_file('shell.html', '''
<body>
<script>
var Module = {
locateFile: function(x) { return "sub/" + x }
};
</script>
{{{ SCRIPT }}}
</body>
''')
def in_html(expected, args=[]):
self.compile_btest(['src.cpp', '-O2', '-g', '--shell-file', 'shell.html', '--pre-js', 'data.js', '-o', 'page.html', '-s', 'SAFE_HEAP=1', '-s', 'ASSERTIONS=1', '-s', 'FORCE_FILESYSTEM=1', '-s', 'WASM=' + str(wasm)] + args)
if wasm:
shutil.move('page.wasm', os.path.join('sub', 'page.wasm'))
else:
shutil.move('page.html.mem', os.path.join('sub', 'page.html.mem'))
self.run_browser('page.html', None, '/report_result?' + expected)
in_html('1')
# verify that the mem init request succeeded in the latter case
if not wasm:
create_test_file('src.cpp', self.with_report_result(r'''
#include <stdio.h>
#include <emscripten.h>
int main() {
int result = EM_ASM_INT({
return Module['memoryInitializerRequest'].status;
});
printf("memory init request: %d\n", result);
REPORT_RESULT(result);
return 0;
}
'''))
in_html('200')
@requires_graphics_hardware
def test_glfw3(self):
for opts in [[], ['-s', 'LEGACY_GL_EMULATION=1'], ['-Os', '--closure', '1']]:
print(opts)
self.btest(path_from_root('tests', 'glfw3.c'), args=['-s', 'USE_GLFW=3', '-lglfw', '-lGL'] + opts, expected='1')
@requires_graphics_hardware
def test_glfw_events(self):
self.btest(path_from_root('tests', 'glfw_events.c'), args=['-s', 'USE_GLFW=2', "-DUSE_GLFW=2", '-lglfw', '-lGL'], expected='1')
self.btest(path_from_root('tests', 'glfw_events.c'), args=['-s', 'USE_GLFW=3', "-DUSE_GLFW=3", '-lglfw', '-lGL'], expected='1')
@requires_graphics_hardware
def test_sdl2_image(self):
# load an image file, get pixel data. Also O2 coverage for --preload-file, and memory-init
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.jpg')
create_test_file('sdl2_image.c', self.with_report_result(open(path_from_root('tests', 'sdl2_image.c')).read()))
for mem in [0, 1]:
for dest, dirname, basename in [('screenshot.jpg', '/', 'screenshot.jpg'),
('screenshot.jpg@/assets/screenshot.jpg', '/assets', 'screenshot.jpg')]:
self.compile_btest([
'sdl2_image.c', '-o', 'page.html', '-O2', '--memory-init-file', str(mem),
'--preload-file', dest, '-DSCREENSHOT_DIRNAME="' + dirname + '"', '-DSCREENSHOT_BASENAME="' + basename + '"', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--use-preload-plugins'
])
self.run_browser('page.html', '', '/report_result?600')
@requires_graphics_hardware
def test_sdl2_image_jpeg(self):
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.jpeg')
create_test_file('sdl2_image_jpeg.c', self.with_report_result(open(path_from_root('tests', 'sdl2_image.c')).read()))
self.compile_btest([
'sdl2_image_jpeg.c', '-o', 'page.html',
'--preload-file', 'screenshot.jpeg', '-DSCREENSHOT_DIRNAME="/"', '-DSCREENSHOT_BASENAME="screenshot.jpeg"', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--use-preload-plugins'
])
self.run_browser('page.html', '', '/report_result?600')
@requires_graphics_hardware
def test_sdl2_image_formats(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.jpg')
self.btest('sdl2_image.c', expected='512', args=['--preload-file', 'screenshot.png', '-DSCREENSHOT_DIRNAME="/"', '-DSCREENSHOT_BASENAME="screenshot.png"',
'-DNO_PRELOADED', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '-s', 'SDL2_IMAGE_FORMATS=["png"]'])
self.btest('sdl2_image.c', expected='600', args=['--preload-file', 'screenshot.jpg', '-DSCREENSHOT_DIRNAME="/"', '-DSCREENSHOT_BASENAME="screenshot.jpg"',
'-DBITSPERPIXEL=24', '-DNO_PRELOADED', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '-s', 'SDL2_IMAGE_FORMATS=["jpg"]'])
def test_sdl2_key(self):
create_test_file('pre.js', '''
Module.postRun = function() {
function doOne() {
Module._one();
setTimeout(doOne, 1000/60);
}
setTimeout(doOne, 1000/60);
}
function keydown(c) {
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
var prevented = !document.dispatchEvent(event);
//send keypress if not prevented
if (!prevented) {
var event = new KeyboardEvent("keypress", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
}
function keyup(c) {
var event = new KeyboardEvent("keyup", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
''')
create_test_file('sdl2_key.c', self.with_report_result(open(path_from_root('tests', 'sdl2_key.c')).read()))
self.compile_btest(['sdl2_key.c', '-o', 'page.html', '-s', 'USE_SDL=2', '--pre-js', 'pre.js', '-s', '''EXPORTED_FUNCTIONS=['_main', '_one']'''])
self.run_browser('page.html', '', '/report_result?37182145')
def test_sdl2_text(self):
create_test_file('pre.js', '''
Module.postRun = function() {
function doOne() {
Module._one();
setTimeout(doOne, 1000/60);
}
setTimeout(doOne, 1000/60);
}
function simulateKeyEvent(c) {
var event = new KeyboardEvent("keypress", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.body.dispatchEvent(event);
}
''')
create_test_file('sdl2_text.c', self.with_report_result(open(path_from_root('tests', 'sdl2_text.c')).read()))
self.compile_btest(['sdl2_text.c', '-o', 'page.html', '--pre-js', 'pre.js', '-s', '''EXPORTED_FUNCTIONS=['_main', '_one']''', '-s', 'USE_SDL=2'])
self.run_browser('page.html', '', '/report_result?1')
@requires_graphics_hardware
def test_sdl2_mouse(self):
create_test_file('pre.js', '''
function simulateMouseEvent(x, y, button) {
var event = document.createEvent("MouseEvents");
if (button >= 0) {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousedown', true, true, window,
1, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event1);
var event2 = document.createEvent("MouseEvents");
event2.initMouseEvent('mouseup', true, true, window,
1, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event2);
} else {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousemove', true, true, window,
0, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
0, null);
Module['canvas'].dispatchEvent(event1);
}
}
window['simulateMouseEvent'] = simulateMouseEvent;
''')
create_test_file('sdl2_mouse.c', self.with_report_result(open(path_from_root('tests', 'sdl2_mouse.c')).read()))
self.compile_btest(['sdl2_mouse.c', '-O2', '--minify', '0', '-o', 'page.html', '--pre-js', 'pre.js', '-s', 'USE_SDL=2'])
self.run_browser('page.html', '', '/report_result?1')
@requires_graphics_hardware
def test_sdl2_mouse_offsets(self):
create_test_file('pre.js', '''
function simulateMouseEvent(x, y, button) {
var event = document.createEvent("MouseEvents");
if (button >= 0) {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousedown', true, true, window,
1, x, y, x, y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event1);
var event2 = document.createEvent("MouseEvents");
event2.initMouseEvent('mouseup', true, true, window,
1, x, y, x, y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event2);
} else {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousemove', true, true, window,
0, x, y, x, y,
0, 0, 0, 0,
0, null);
Module['canvas'].dispatchEvent(event1);
}
}
window['simulateMouseEvent'] = simulateMouseEvent;
''')
create_test_file('page.html', '''
<html>
<head>
<style type="text/css">
html, body { margin: 0; padding: 0; }
#container {
position: absolute;
left: 5px; right: 0;
top: 5px; bottom: 0;
}
#canvas {
position: absolute;
left: 0; width: 600px;
top: 0; height: 450px;
}
textarea {
margin-top: 500px;
margin-left: 5px;
width: 600px;
}
</style>
</head>
<body>
<div id="container">
<canvas id="canvas"></canvas>
</div>
<textarea id="output" rows="8"></textarea>
<script type="text/javascript">
var Module = {
canvas: document.getElementById('canvas'),
print: (function() {
var element = document.getElementById('output');
element.value = ''; // clear browser cache
return function(text) {
if (arguments.length > 1) text = Array.prototype.slice.call(arguments).join(' ');
element.value += text + "\\n";
element.scrollTop = element.scrollHeight; // focus on bottom
};
})()
};
</script>
<script type="text/javascript" src="sdl2_mouse.js"></script>
</body>
</html>
''')
create_test_file('sdl2_mouse.c', self.with_report_result(open(path_from_root('tests', 'sdl2_mouse.c')).read()))
self.compile_btest(['sdl2_mouse.c', '-DTEST_SDL_MOUSE_OFFSETS=1', '-O2', '--minify', '0', '-o', 'sdl2_mouse.js', '--pre-js', 'pre.js', '-s', 'USE_SDL=2'])
self.run_browser('page.html', '', '/report_result?1')
@requires_threads
def test_sdl2_threads(self):
self.btest('sdl2_threads.c', expected='4', args=['-s', 'USE_PTHREADS=1', '-s', 'USE_SDL=2', '-s', 'PROXY_TO_PTHREAD=1'])
@requires_graphics_hardware
def test_sdl2glshader(self):
self.btest('sdl2glshader.c', reference='sdlglshader.png', args=['-s', 'USE_SDL=2', '-O2', '--closure', '1', '-g1', '-s', 'LEGACY_GL_EMULATION=1'])
self.btest('sdl2glshader.c', reference='sdlglshader.png', args=['-s', 'USE_SDL=2', '-O2', '-s', 'LEGACY_GL_EMULATION=1'], also_proxied=True) # XXX closure fails on proxy
@requires_graphics_hardware
def test_sdl2_canvas_blank(self):
self.btest('sdl2_canvas_blank.c', reference='sdl_canvas_blank.png', args=['-s', 'USE_SDL=2'])
@requires_graphics_hardware
def test_sdl2_canvas_palette(self):
self.btest('sdl2_canvas_palette.c', reference='sdl_canvas_palette.png', args=['-s', 'USE_SDL=2'])
@requires_graphics_hardware
def test_sdl2_canvas_twice(self):
self.btest('sdl2_canvas_twice.c', reference='sdl_canvas_twice.png', args=['-s', 'USE_SDL=2'])
@requires_graphics_hardware
def test_sdl2_gfx(self):
self.btest('sdl2_gfx.cpp', args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_GFX=2'], reference='sdl2_gfx.png', reference_slack=2)
@requires_graphics_hardware
def test_sdl2_canvas_palette_2(self):
create_test_file('args-r.js', '''
Module['arguments'] = ['-r'];
''')
create_test_file('args-g.js', '''
Module['arguments'] = ['-g'];
''')
create_test_file('args-b.js', '''
Module['arguments'] = ['-b'];
''')
self.btest('sdl2_canvas_palette_2.c', reference='sdl_canvas_palette_r.png', args=['-s', 'USE_SDL=2', '--pre-js', 'args-r.js'])
self.btest('sdl2_canvas_palette_2.c', reference='sdl_canvas_palette_g.png', args=['-s', 'USE_SDL=2', '--pre-js', 'args-g.js'])
self.btest('sdl2_canvas_palette_2.c', reference='sdl_canvas_palette_b.png', args=['-s', 'USE_SDL=2', '--pre-js', 'args-b.js'])
def test_sdl2_swsurface(self):
self.btest('sdl2_swsurface.c', expected='1', args=['-s', 'USE_SDL=2', '-s', 'INITIAL_MEMORY=64MB'])
@requires_graphics_hardware
def test_sdl2_image_prepare(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.not')
self.btest('sdl2_image_prepare.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.not', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2'], manually_trigger_reftest=True)
@requires_graphics_hardware
def test_sdl2_image_prepare_data(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.not')
self.btest('sdl2_image_prepare_data.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.not', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2'], manually_trigger_reftest=True)
@requires_graphics_hardware
def test_sdl2_canvas_proxy(self):
def post():
html = open('test.html').read()
html = html.replace('</body>', '''
<script>
function assert(x, y) { if (!x) throw 'assertion failed ' + y }
%s
var windowClose = window.close;
window.close = function() {
// wait for rafs to arrive and the screen to update before reftesting
setTimeout(function() {
doReftest();
setTimeout(windowClose, 5000);
}, 1000);
};
</script>
</body>''' % open('reftest.js').read())
create_test_file('test.html', html)
create_test_file('data.txt', 'datum')
self.btest('sdl2_canvas_proxy.c', reference='sdl2_canvas.png', args=['-s', 'USE_SDL=2', '--proxy-to-worker', '--preload-file', 'data.txt', '-s', 'GL_TESTING=1'], manual_reference=True, post_build=post)
def test_sdl2_pumpevents(self):
# key events should be detected using SDL_PumpEvents
create_test_file('pre.js', '''
function keydown(c) {
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
''')
self.btest('sdl2_pumpevents.c', expected='7', args=['--pre-js', 'pre.js', '-s', 'USE_SDL=2'])
def test_sdl2_timer(self):
self.btest('sdl2_timer.c', expected='5', args=['-s', 'USE_SDL=2'])
def test_sdl2_canvas_size(self):
self.btest('sdl2_canvas_size.c', expected='1', args=['-s', 'USE_SDL=2'])
@requires_graphics_hardware
def test_sdl2_gl_read(self):
# SDL, OpenGL, readPixels
create_test_file('sdl2_gl_read.c', self.with_report_result(open(path_from_root('tests', 'sdl2_gl_read.c')).read()))
self.compile_btest(['sdl2_gl_read.c', '-o', 'something.html', '-s', 'USE_SDL=2'])
self.run_browser('something.html', '.', '/report_result?1')
@requires_graphics_hardware
def test_sdl2_glmatrixmode_texture(self):
self.btest('sdl2_glmatrixmode_texture.c', reference='sdl2_glmatrixmode_texture.png',
args=['-s', 'LEGACY_GL_EMULATION=1', '-s', 'USE_SDL=2'],
message='You should see a (top) red-white and (bottom) white-red image.')
@requires_graphics_hardware
def test_sdl2_gldrawelements(self):
self.btest('sdl2_gldrawelements.c', reference='sdl2_gldrawelements.png',
args=['-s', 'LEGACY_GL_EMULATION=1', '-s', 'USE_SDL=2'],
message='GL drawing modes. Bottom: points, lines, line loop, line strip. Top: triangles, triangle strip, triangle fan, quad.')
@requires_graphics_hardware
def test_sdl2_fog_simple(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl2_fog_simple.c', reference='screenshot-fog-simple.png',
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '-O2', '--minify', '0', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl2_fog_negative(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl2_fog_negative.c', reference='screenshot-fog-negative.png',
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl2_fog_density(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl2_fog_density.c', reference='screenshot-fog-density.png',
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl2_fog_exp2(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl2_fog_exp2.c', reference='screenshot-fog-exp2.png',
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl2_fog_linear(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl2_fog_linear.c', reference='screenshot-fog-linear.png', reference_slack=1,
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins'],
message='You should see an image with fog.')
def test_sdl2_unwasteful(self):
self.btest('sdl2_unwasteful.cpp', expected='1', args=['-s', 'USE_SDL=2', '-O1'])
def test_sdl2_canvas_write(self):
self.btest('sdl2_canvas_write.cpp', expected='0', args=['-s', 'USE_SDL=2'])
@requires_graphics_hardware
def test_sdl2_gl_frames_swap(self):
def post_build(*args):
self.post_manual_reftest(*args)
html = open('test.html').read()
html2 = html.replace('''Module['postRun'] = doReftest;''', '') # we don't want the very first frame
assert html != html2
create_test_file('test.html', html2)
self.btest('sdl2_gl_frames_swap.c', reference='sdl2_gl_frames_swap.png', args=['--proxy-to-worker', '-s', 'GL_TESTING=1', '-s', 'USE_SDL=2'], manual_reference=True, post_build=post_build)
@requires_graphics_hardware
def test_sdl2_ttf(self):
shutil.copy2(path_from_root('tests', 'freetype', 'LiberationSansBold.ttf'), self.get_dir())
self.btest('sdl2_ttf.c', reference='sdl2_ttf.png',
args=['-O2', '-s', 'USE_SDL=2', '-s', 'USE_SDL_TTF=2', '--embed-file', 'LiberationSansBold.ttf'],
message='You should see colorful "hello" and "world" in the window')
def test_sdl2_custom_cursor(self):
shutil.copyfile(path_from_root('tests', 'cursor.bmp'), 'cursor.bmp')
self.btest('sdl2_custom_cursor.c', expected='1', args=['--preload-file', 'cursor.bmp', '-s', 'USE_SDL=2'])
def test_sdl2_misc(self):
self.btest('sdl2_misc.c', expected='1', args=['-s', 'USE_SDL=2'])
print('also test building to object files first')
src = open(path_from_root('tests', 'sdl2_misc.c')).read()
create_test_file('test.c', self.with_report_result(src))
self.run_process([EMCC, '-c', 'test.c', '-s', 'USE_SDL=2', '-o', 'test.o'])
self.compile_btest(['test.o', '-s', 'USE_SDL=2', '-o', 'test.html'])
self.run_browser('test.html', '...', '/report_result?1')
@requires_sound_hardware
def test_sdl2_mixer_wav(self):
shutil.copyfile(path_from_root('tests', 'sounds', 'the_entertainer.wav'), 'sound.wav')
self.btest('sdl2_mixer_wav.c', expected='1', args=['--preload-file', 'sound.wav', '-s', 'USE_SDL=2', '-s', 'USE_SDL_MIXER=2', '-s', 'INITIAL_MEMORY=33554432'])
@parameterized({
'wav': ([], '0', 'the_entertainer.wav'),
'ogg': (['ogg'], 'MIX_INIT_OGG', 'alarmvictory_1.ogg'),
'mp3': (['mp3'], 'MIX_INIT_MP3', 'pudinha.mp3'),
})
@requires_sound_hardware
def test_sdl2_mixer_music(self, formats, flags, music_name):
shutil.copyfile(path_from_root('tests', 'sounds', music_name), music_name)
self.btest('sdl2_mixer_music.c', expected='1', args=[
'--preload-file', music_name,
'-DSOUND_PATH=' + json.dumps(music_name),
'-DFLAGS=' + flags,
'-s', 'USE_SDL=2',
'-s', 'USE_SDL_MIXER=2',
'-s', 'SDL2_MIXER_FORMATS=' + json.dumps(formats),
'-s', 'INITIAL_MEMORY=33554432'
])
@no_wasm_backend('cocos2d needs to be ported')
@requires_graphics_hardware
def test_cocos2d_hello(self):
cocos2d_root = os.path.join(system_libs.Ports.get_build_dir(), 'cocos2d')
preload_file = os.path.join(cocos2d_root, 'samples', 'HelloCpp', 'Resources') + '@'
self.btest('cocos2d_hello.cpp', reference='cocos2d_hello.png', reference_slack=1,
args=['-s', 'USE_COCOS2D=3', '-s', 'ERROR_ON_UNDEFINED_SYMBOLS=0',
'--preload-file', preload_file, '--use-preload-plugins',
'-Wno-inconsistent-missing-override'],
message='You should see Cocos2d logo')
def test_async(self):
for opts in [0, 1, 2, 3]:
print(opts)
self.btest('browser/async.cpp', '1', args=['-O' + str(opts), '-g2'] + self.get_async_args())
@requires_threads
def test_async_in_pthread(self):
self.btest('browser/async.cpp', '1', args=self.get_async_args() + ['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1', '-g'])
def test_async_2(self):
# Error.stackTraceLimit default to 10 in chrome but this test relies on more
# than 40 stack frames being reported.
create_test_file('pre.js', 'Error.stackTraceLimit = 80;\n')
self.btest('browser/async_2.cpp', '40', args=['-O3', '--pre-js', 'pre.js'] + self.get_async_args())
def test_async_virtual(self):
for opts in [0, 3]:
print(opts)
self.btest('browser/async_virtual.cpp', '5', args=['-O' + str(opts), '-profiling'] + self.get_async_args())
def test_async_virtual_2(self):
for opts in [0, 3]:
print(opts)
self.btest('browser/async_virtual_2.cpp', '1', args=['-O' + str(opts), '-s', 'ASSERTIONS=1', '-s', 'SAFE_HEAP=1', '-profiling'] + self.get_async_args())
# Test async sleeps in the presence of invoke_* calls, which can happen with
# longjmp or exceptions.
@parameterized({
'O0': ([],), # noqa
'O3': (['-O3'],), # noqa
})
def test_async_longjmp(self, args):
self.btest('browser/async_longjmp.cpp', '2', args=args + self.get_async_args())
def test_async_mainloop(self):
for opts in [0, 3]:
print(opts)
self.btest('browser/async_mainloop.cpp', '121', args=['-O' + str(opts)] + self.get_async_args())
@requires_sound_hardware
def test_sdl_audio_beep_sleep(self):
self.btest('sdl_audio_beep_sleep.cpp', '1', args=['-Os', '-s', 'ASSERTIONS=1', '-s', 'DISABLE_EXCEPTION_CATCHING=0', '-profiling', '-s', 'SAFE_HEAP=1', '-lSDL'] + self.get_async_args(), timeout=90)
def test_mainloop_reschedule(self):
self.btest('mainloop_reschedule.cpp', '1', args=['-Os'] + self.get_async_args())
def test_mainloop_infloop(self):
self.btest('mainloop_infloop.cpp', '1', args=self.get_async_args())
def test_async_iostream(self):
self.btest('browser/async_iostream.cpp', '1', args=self.get_async_args())
# Test an async return value. The value goes through a custom JS library
# method that uses asyncify, and therefore it needs to be declared in
# ASYNCIFY_IMPORTS.
# To make the test more precise we also use ASYNCIFY_IGNORE_INDIRECT here.
@parameterized({
'normal': (['-s', 'ASYNCIFY_IMPORTS=["sync_tunnel"]'],), # noqa
'response': (['-s', '[email protected]'],), # noqa
'nothing': (['-DBAD'],), # noqa
'empty_list': (['-DBAD', '-s', 'ASYNCIFY_IMPORTS=[]'],), # noqa
'em_js_bad': (['-DBAD', '-DUSE_EM_JS'],), # noqa
})
def test_async_returnvalue(self, args):
if '@' in str(args):
create_test_file('filey.txt', '["sync_tunnel"]')
self.btest('browser/async_returnvalue.cpp', '0', args=['-s', 'ASYNCIFY', '-s', 'ASYNCIFY_IGNORE_INDIRECT', '--js-library', path_from_root('tests', 'browser', 'async_returnvalue.js')] + args + ['-s', 'ASSERTIONS=1'])
def test_async_stack_overflow(self):
self.btest('browser/async_stack_overflow.cpp', '0', args=['-s', 'ASYNCIFY', '-s', 'ASYNCIFY_STACK_SIZE=4'])
def test_async_bad_list(self):
self.btest('browser/async_bad_list.cpp', '0', args=['-s', 'ASYNCIFY', '-s', 'ASYNCIFY_ONLY=["waka"]', '--profiling'])
# Tests that when building with -s MINIMAL_RUNTIME=1, the build can use -s MODULARIZE=1 as well.
def test_minimal_runtime_modularize(self):
create_test_file('src.cpp', self.with_report_result(open(path_from_root('tests', 'browser_test_hello_world.c')).read()))
self.compile_btest(['src.cpp', '-o', 'test.html', '-s', 'MODULARIZE=1', '-s', 'MINIMAL_RUNTIME=1'])
self.run_browser('test.html', None, '/report_result?0')
@requires_sync_compilation
def test_modularize(self):
for opts in [
[],
['-O1'],
['-O2', '-profiling'],
['-O2'],
['-O2', '--closure', '1']
]:
for args, code in [
# defaults
([], '''
let promise = Module();
if (!promise instanceof Promise) throw new Error('Return value should be a promise');
'''),
# use EXPORT_NAME
(['-s', 'EXPORT_NAME="HelloWorld"'], '''
if (typeof Module !== "undefined") throw "what?!"; // do not pollute the global scope, we are modularized!
HelloWorld.noInitialRun = true; // errorneous module capture will load this and cause timeout
let promise = HelloWorld();
if (!promise instanceof Promise) throw new Error('Return value should be a promise');
'''),
# pass in a Module option (which prevents main(), which we then invoke ourselves)
(['-s', 'EXPORT_NAME="HelloWorld"'], '''
HelloWorld({ noInitialRun: true }).then(hello => {
hello._main();
});
'''),
# Even without a mem init file, everything is async
(['-s', 'EXPORT_NAME="HelloWorld"', '--memory-init-file', '0'], '''
HelloWorld({ noInitialRun: true }).then(hello => {
hello._main();
});
'''),
]:
print('test on', opts, args, code)
src = open(path_from_root('tests', 'browser_test_hello_world.c')).read()
create_test_file('test.c', self.with_report_result(src))
# this test is synchronous, so avoid async startup due to wasm features
self.compile_btest(['test.c', '-s', 'MODULARIZE=1', '-s', 'WASM_ASYNC_COMPILATION=0', '-s', 'SINGLE_FILE=1'] + args + opts)
create_test_file('a.html', '''
<script src="a.out.js"></script>
<script>
%s
</script>
''' % code)
self.run_browser('a.html', '...', '/report_result?0')
def test_modularize_network_error(self):
test_c_path = path_from_root('tests', 'browser_test_hello_world.c')
browser_reporting_js_path = path_from_root('tests', 'browser_reporting.js')
self.compile_btest([test_c_path, '-s', 'MODULARIZE=1', '-s', 'EXPORT_NAME="createModule"', '--extern-pre-js', browser_reporting_js_path])
create_test_file('a.html', '''
<script src="a.out.js"></script>
<script>
createModule()
.then(() => {
reportResultToServer("Module creation succeeded when it should have failed");
})
.catch(err => {
reportResultToServer(err.message.slice(0, 54));
});
</script>
''')
print('Deleting a.out.wasm to cause a download error')
os.remove('a.out.wasm')
self.run_browser('a.html', '...', '/report_result?abort(both async and sync fetching of the wasm failed)')
def test_modularize_init_error(self):
test_cpp_path = path_from_root('tests', 'browser', 'test_modularize_init_error.cpp')
browser_reporting_js_path = path_from_root('tests', 'browser_reporting.js')
self.compile_btest([test_cpp_path, '-s', 'MODULARIZE=1', '-s', 'EXPORT_NAME="createModule"', '--extern-pre-js', browser_reporting_js_path])
create_test_file('a.html', '''
<script src="a.out.js"></script>
<script>
if (typeof window === 'object') {
window.addEventListener('unhandledrejection', function(event) {
reportResultToServer("Unhandled promise rejection: " + event.reason.message);
});
}
createModule()
.then(() => {
reportResultToServer("Module creation succeeded when it should have failed");
})
.catch(err => {
reportResultToServer(err);
});
</script>
''')
self.run_browser('a.html', '...', '/report_result?intentional error to test rejection')
# test illustrating the regression on the modularize feature since commit c5af8f6
# when compiling with the --preload-file option
@no_wasm_backend('cannot customize INITIAL_MEMORY in wasm at runtime')
def test_modularize_and_preload_files(self):
# amount of memory different from the default one that will be allocated for the emscripten heap
totalMemory = 33554432
for opts in [[], ['-O1'], ['-O2', '-profiling'], ['-O2'], ['-O2', '--closure', '1']]:
# the main function simply checks that the amount of allocated heap memory is correct
src = r'''
#include <stdio.h>
#include <emscripten.h>
int main() {
EM_ASM({
// use eval here in order for the test with closure compiler enabled to succeed
var totalMemory = Module['INITIAL_MEMORY'];
assert(totalMemory === %d, 'bad memory size');
});
REPORT_RESULT(0);
return 0;
}
''' % totalMemory
create_test_file('test.c', self.with_report_result(src))
# generate a dummy file
create_test_file('dummy_file', 'dummy')
# compile the code with the modularize feature and the preload-file option enabled
# no wasm, since this tests customizing total memory at runtime
self.compile_btest(['test.c', '-s', 'WASM=0', '-s', 'MODULARIZE=1', '-s', 'EXPORT_NAME="Foo"', '--preload-file', 'dummy_file'] + opts)
create_test_file('a.html', '''
<script src="a.out.js"></script>
<script>
// instantiate the Foo module with custom INITIAL_MEMORY value
var foo = Foo({ INITIAL_MEMORY: %d });
</script>
''' % totalMemory)
self.run_browser('a.html', '...', '/report_result?0')
def test_webidl(self):
# see original in test_core.py
self.run_process([PYTHON, path_from_root('tools', 'webidl_binder.py'),
path_from_root('tests', 'webidl', 'test.idl'),
'glue'])
self.assertExists('glue.cpp')
self.assertExists('glue.js')
for opts in [[], ['-O1'], ['-O2']]:
print(opts)
self.btest(os.path.join('webidl', 'test.cpp'), '1', args=['--post-js', 'glue.js', '-I.', '-DBROWSER'] + opts)
@requires_sync_compilation
def test_dynamic_link(self):
create_test_file('pre.js', '''
Module.dynamicLibraries = ['side.wasm'];
''')
create_test_file('main.cpp', r'''
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <emscripten.h>
char *side(const char *data);
int main() {
char *temp = side("hello through side\n");
char *ret = (char*)malloc(strlen(temp)+1);
strcpy(ret, temp);
temp[1] = 'x';
EM_ASM({
Module.realPrint = out;
out = function(x) {
if (!Module.printed) Module.printed = x;
Module.realPrint(x);
};
});
puts(ret);
EM_ASM({ assert(Module.printed === 'hello through side', ['expected', Module.printed]); });
REPORT_RESULT(2);
return 0;
}
''')
create_test_file('side.cpp', r'''
#include <stdlib.h>
#include <string.h>
char *side(const char *data);
char *side(const char *data) {
char *ret = (char*)malloc(strlen(data)+1);
strcpy(ret, data);
return ret;
}
''')
self.run_process([EMCC, 'side.cpp', '-s', 'SIDE_MODULE=1', '-O2', '-o', 'side.wasm', '-s', 'EXPORT_ALL=1'])
self.btest(self.in_dir('main.cpp'), '2', args=['-s', 'MAIN_MODULE=1', '-O2', '--pre-js', 'pre.js', '-s', 'EXPORT_ALL=1'])
print('wasm in worker (we can read binary data synchronously there)')
create_test_file('pre.js', '''
var Module = { dynamicLibraries: ['side.wasm'] };
''')
self.run_process([EMCC, 'side.cpp', '-s', 'SIDE_MODULE=1', '-O2', '-o', 'side.wasm', '-s', 'EXPORT_ALL=1'])
self.btest(self.in_dir('main.cpp'), '2', args=['-s', 'MAIN_MODULE=1', '-O2', '--pre-js', 'pre.js', '--proxy-to-worker', '-s', 'EXPORT_ALL=1'])
print('wasm (will auto-preload since no sync binary reading)')
create_test_file('pre.js', '''
Module.dynamicLibraries = ['side.wasm'];
''')
# same wasm side module works
self.btest(self.in_dir('main.cpp'), '2', args=['-s', 'MAIN_MODULE=1', '-O2', '--pre-js', 'pre.js', '-s', 'EXPORT_ALL=1'])
# verify that dynamic linking works in all kinds of in-browser environments.
# don't mix different kinds in a single test.
def test_dylink_dso_needed(self):
self._run_dylink_dso_needed(0)
def test_dylink_dso_needed_inworker(self):
self._run_dylink_dso_needed(1)
def _run_dylink_dso_needed(self, inworker):
self.emcc_args += ['-O2']
def do_run(src, expected_output):
# XXX there is no infrastructure (yet ?) to retrieve stdout from browser in tests.
# -> do the assert about expected output inside browser.
#
# we have to put the hook into post.js because in main it is too late
# (in main we won't be able to catch what static constructors inside
# linked dynlibs printed), and in pre.js it is too early (out is not yet
# setup by the shell).
create_test_file('post.js', r'''
Module.realPrint = out;
out = function(x) {
if (!Module.printed) Module.printed = "";
Module.printed += x + '\n'; // out is passed str without last \n
Module.realPrint(x);
};
''')
src += r'''
int main() {
test_main();
EM_ASM({
var expected = %r;
assert(Module.printed === expected, ['stdout expected:', expected]);
});
REPORT_RESULT(0);
}
''' % (expected_output,)
# --proxy-to-worker only on main
if inworker:
self.emcc_args += ['--proxy-to-worker']
self.btest(src, '0', args=self.get_emcc_args() + ['--post-js', 'post.js'])
self._test_dylink_dso_needed(do_run)
@requires_graphics_hardware
@requires_sync_compilation
def test_dynamic_link_glemu(self):
create_test_file('pre.js', '''
Module.dynamicLibraries = ['side.wasm'];
''')
create_test_file('main.cpp', r'''
#include <stdio.h>
#include <string.h>
#include <assert.h>
const char *side();
int main() {
const char *exts = side();
puts(side());
assert(strstr(exts, "GL_EXT_texture_env_combine"));
REPORT_RESULT(1);
return 0;
}
''')
create_test_file('side.cpp', r'''
#include "SDL/SDL.h"
#include "SDL/SDL_opengl.h"
const char *side() {
SDL_Init(SDL_INIT_VIDEO);
SDL_SetVideoMode(600, 600, 16, SDL_OPENGL);
return (const char *)glGetString(GL_EXTENSIONS);
}
''')
self.run_process([EMCC, 'side.cpp', '-s', 'SIDE_MODULE=1', '-O2', '-o', 'side.wasm', '-lSDL', '-s', 'EXPORT_ALL=1'])
self.btest(self.in_dir('main.cpp'), '1', args=['-s', 'MAIN_MODULE=1', '-O2', '-s', 'LEGACY_GL_EMULATION=1', '-lSDL', '-lGL', '--pre-js', 'pre.js', '-s', 'EXPORT_ALL=1'])
def test_memory_growth_during_startup(self):
create_test_file('data.dat', 'X' * (30 * 1024 * 1024))
self.btest('browser_test_hello_world.c', '0', args=['-s', 'ASSERTIONS=1', '-s', 'ALLOW_MEMORY_GROWTH=1', '-s', 'INITIAL_MEMORY=16MB', '-s', 'TOTAL_STACK=16384', '--preload-file', 'data.dat'])
# pthreads tests
def prep_no_SAB(self):
create_test_file('html.html', open(path_from_root('src', 'shell_minimal.html')).read().replace('''<body>''', '''<body>
<script>
SharedArrayBuffer = undefined;
Atomics = undefined;
</script>
'''))
# Test that the emscripten_ atomics api functions work.
@parameterized({
'normal': ([],),
'closure': (['--closure', '1'],),
})
@requires_threads
def test_pthread_atomics(self, args=[]):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_atomics.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8', '-g1'] + args)
# Test 64-bit atomics.
@requires_threads
def test_pthread_64bit_atomics(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_64bit_atomics.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test 64-bit C++11 atomics.
@requires_threads
def test_pthread_64bit_cxx11_atomics(self):
for opt in [['-O0'], ['-O3']]:
for pthreads in [[], ['-s', 'USE_PTHREADS=1']]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_64bit_cxx11_atomics.cpp'), expected='0', args=opt + pthreads)
# Test c++ std::thread::hardware_concurrency()
@requires_threads
def test_pthread_hardware_concurrency(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_hardware_concurrency.cpp'), expected='0', args=['-O2', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE="navigator.hardwareConcurrency"'])
@parameterized({
'join': ('join',),
'wait': ('wait',),
})
@requires_threads
def test_pthread_main_thread_blocking(self, name):
print('Test that we error if not ALLOW_BLOCKING_ON_MAIN_THREAD')
self.btest(path_from_root('tests', 'pthread', 'main_thread_%s.cpp' % name), expected='0', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=1', '-s', 'ALLOW_BLOCKING_ON_MAIN_THREAD=0'])
if name == 'join':
print('Test that by default we just warn about blocking on the main thread.')
self.btest(path_from_root('tests', 'pthread', 'main_thread_%s.cpp' % name), expected='1', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=1'])
print('Test that tryjoin is fine, even if not ALLOW_BLOCKING_ON_MAIN_THREAD')
self.btest(path_from_root('tests', 'pthread', 'main_thread_join.cpp'), expected='2', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=1', '-g', '-DTRY_JOIN', '-s', 'ALLOW_BLOCKING_ON_MAIN_THREAD=0'])
print('Test that tryjoin is fine, even if not ALLOW_BLOCKING_ON_MAIN_THREAD, and even without a pool')
self.btest(path_from_root('tests', 'pthread', 'main_thread_join.cpp'), expected='2', args=['-O3', '-s', 'USE_PTHREADS=1', '-g', '-DTRY_JOIN', '-s', 'ALLOW_BLOCKING_ON_MAIN_THREAD=0'])
print('Test that everything works ok when we are on a pthread.')
self.btest(path_from_root('tests', 'pthread', 'main_thread_%s.cpp' % name), expected='1', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=1', '-s', 'PROXY_TO_PTHREAD', '-s', 'ALLOW_BLOCKING_ON_MAIN_THREAD=0'])
# Test the old GCC atomic __sync_fetch_and_op builtin operations.
@no_firefox('https://bugzilla.mozilla.org/show_bug.cgi?id=1666568')
@requires_threads
def test_pthread_gcc_atomic_fetch_and_op(self):
for opt in [[], ['-O1'], ['-O2'], ['-O3'], ['-Os']]:
for debug in [[], ['-g']]:
args = opt + debug
print(args)
self.btest(path_from_root('tests', 'pthread', 'test_pthread_gcc_atomic_fetch_and_op.cpp'), expected='0', args=args + ['-s', 'INITIAL_MEMORY=64MB', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# 64 bit version of the above test.
@no_firefox('https://bugzilla.mozilla.org/show_bug.cgi?id=1666568')
@requires_threads
def test_pthread_gcc_64bit_atomic_fetch_and_op(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_gcc_64bit_atomic_fetch_and_op.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'], also_asmjs=True)
# Test the old GCC atomic __sync_op_and_fetch builtin operations.
@no_firefox('https://bugzilla.mozilla.org/show_bug.cgi?id=1666568')
@requires_threads
def test_pthread_gcc_atomic_op_and_fetch(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_gcc_atomic_op_and_fetch.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'], also_asmjs=True)
# 64 bit version of the above test.
@no_firefox('https://bugzilla.mozilla.org/show_bug.cgi?id=1666568')
@requires_threads
def test_pthread_gcc_64bit_atomic_op_and_fetch(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_gcc_64bit_atomic_op_and_fetch.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'], also_asmjs=True)
# Tests the rest of the remaining GCC atomics after the two above tests.
@no_firefox('https://bugzilla.mozilla.org/show_bug.cgi?id=1666568')
@requires_threads
def test_pthread_gcc_atomics(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_gcc_atomics.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test the __sync_lock_test_and_set and __sync_lock_release primitives.
@requires_threads
def test_pthread_gcc_spinlock(self):
for arg in [[], ['-DUSE_EMSCRIPTEN_INTRINSICS']]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_gcc_spinlock.cpp'), expected='800', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'] + arg, also_asmjs=True)
# Test that basic thread creation works.
@no_firefox('https://bugzilla.mozilla.org/show_bug.cgi?id=1666568')
@requires_threads
def test_pthread_create(self):
def test(args):
print(args)
self.btest(path_from_root('tests', 'pthread', 'test_pthread_create.cpp'),
expected='0',
args=['-s', 'INITIAL_MEMORY=64MB', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'] + args,
extra_tries=0) # this should be 100% deterministic
print() # new line
test([])
test(['-O3'])
# TODO: re-enable minimal runtime once the flakiness is figure out,
# https://github.com/emscripten-core/emscripten/issues/12368
# test(['-s', 'MINIMAL_RUNTIME=1'])
# Test that preallocating worker threads work.
@requires_threads
def test_pthread_preallocates_workers(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_preallocates_workers.cpp'), expected='0', args=['-O3', '-s', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=4', '-s', 'PTHREAD_POOL_DELAY_LOAD=1'])
# Test that allocating a lot of threads doesn't regress. This needs to be checked manually!
@requires_threads
def test_pthread_large_pthread_allocation(self):
self.btest(path_from_root('tests', 'pthread', 'test_large_pthread_allocation.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=128MB', '-O3', '-s', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=50'], message='Check output from test to ensure that a regression in time it takes to allocate the threads has not occurred.')
# Tests the -s PROXY_TO_PTHREAD=1 option.
@requires_threads
def test_pthread_proxy_to_pthread(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_proxy_to_pthread.c'), expected='1', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1'])
# Test that a pthread can spawn another pthread of its own.
@requires_threads
def test_pthread_create_pthread(self):
for modularize in [[], ['-s', 'MODULARIZE=1', '-s', 'EXPORT_NAME=MyModule', '--shell-file', path_from_root('tests', 'shell_that_launches_modularize.html')]]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_create_pthread.cpp'), expected='1', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=2'] + modularize)
# Test another case of pthreads spawning pthreads, but this time the callers immediately join on the threads they created.
@requires_threads
def test_pthread_nested_spawns(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_nested_spawns.cpp'), expected='1', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=2'])
# Test that main thread can wait for a pthread to finish via pthread_join().
@requires_threads
def test_pthread_join(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_join.cpp'), expected='6765', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test that threads can rejoin the pool once detached and finished
@requires_threads
def test_std_thread_detach(self):
self.btest(path_from_root('tests', 'pthread', 'test_std_thread_detach.cpp'), expected='0', args=['-s', 'USE_PTHREADS=1'])
# Test pthread_cancel() operation
@requires_threads
def test_pthread_cancel(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_cancel.cpp'), expected='1', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test pthread_kill() operation
@no_chrome('pthread_kill hangs chrome renderer, and keep subsequent tests from passing')
@requires_threads
def test_pthread_kill(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_kill.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test that pthread cleanup stack (pthread_cleanup_push/_pop) works.
@requires_threads
def test_pthread_cleanup(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_cleanup.cpp'), expected='907640832', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# Tests the pthread mutex api.
@requires_threads
def test_pthread_mutex(self):
for arg in [[], ['-DSPINLOCK_TEST']]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_mutex.cpp'), expected='50', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'] + arg)
@requires_threads
def test_pthread_attr_getstack(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_attr_getstack.cpp'), expected='0', args=['-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=2'])
# Test that memory allocation is thread-safe.
@requires_threads
def test_pthread_malloc(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_malloc.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# Stress test pthreads allocating memory that will call to sbrk(), and main thread has to free up the data.
@requires_threads
def test_pthread_malloc_free(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_malloc_free.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8', '-s', 'INITIAL_MEMORY=256MB'])
# Test that the pthread_barrier API works ok.
@requires_threads
def test_pthread_barrier(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_barrier.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test the pthread_once() function.
@requires_threads
def test_pthread_once(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_once.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test against a certain thread exit time handling bug by spawning tons of threads.
@no_firefox('https://bugzilla.mozilla.org/show_bug.cgi?id=1666568')
@requires_threads
def test_pthread_spawns(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_spawns.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8', '--closure', '1', '-s', 'ENVIRONMENT=web,worker'])
# It is common for code to flip volatile global vars for thread control. This is a bit lax, but nevertheless, test whether that
# kind of scheme will work with Emscripten as well.
@requires_threads
def test_pthread_volatile(self):
for arg in [[], ['-DUSE_C_VOLATILE']]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_volatile.cpp'), expected='1', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'] + arg)
# Test thread-specific data (TLS).
@requires_threads
def test_pthread_thread_local_storage(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_thread_local_storage.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8', '-s', 'ASSERTIONS=1'])
# Test the pthread condition variable creation and waiting.
@requires_threads
def test_pthread_condition_variable(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_condition_variable.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test that pthreads are able to do printf.
@requires_threads
def test_pthread_printf(self):
def run(debug):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_printf.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=1', '-s', 'LIBRARY_DEBUG=%d' % debug])
run(debug=True)
run(debug=False)
# Test that pthreads are able to do cout. Failed due to https://bugzilla.mozilla.org/show_bug.cgi?id=1154858.
@requires_threads
def test_pthread_iostream(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_iostream.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=1'])
# Test that the main thread is able to use pthread_set/getspecific.
@requires_threads
def test_pthread_setspecific_mainthread(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_setspecific_mainthread.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1'], also_asmjs=True)
# Test that pthreads have access to filesystem.
@requires_threads
def test_pthread_file_io(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_file_io.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=1'])
# Test that the pthread_create() function operates benignly in the case that threading is not supported.
@requires_threads
def test_pthread_supported(self):
for args in [[], ['-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8']]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_supported.cpp'), expected='0', args=['-O3'] + args)
# Test the operation of Module.pthreadMainPrefixURL variable
@no_wasm_backend('uses js')
@requires_threads
def test_pthread_custom_pthread_main_url(self):
ensure_dir('cdn')
create_test_file('main.cpp', self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten/emscripten.h>
#include <emscripten/threading.h>
#include <pthread.h>
int result = 0;
void *thread_main(void *arg) {
emscripten_atomic_store_u32(&result, 1);
pthread_exit(0);
}
int main() {
pthread_t t;
if (emscripten_has_threading_support()) {
pthread_create(&t, 0, thread_main, 0);
pthread_join(t, 0);
} else {
result = 1;
}
REPORT_RESULT(result);
}
'''))
# Test that it is possible to define "Module.locateFile" string to locate where worker.js will be loaded from.
create_test_file('shell.html', open(path_from_root('src', 'shell.html')).read().replace('var Module = {', 'var Module = { locateFile: function (path, prefix) {if (path.endsWith(".wasm")) {return prefix + path;} else {return "cdn/" + path;}}, '))
self.compile_btest(['main.cpp', '--shell-file', 'shell.html', '-s', 'WASM=0', '-s', 'IN_TEST_HARNESS=1', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=1', '-o', 'test.html'])
shutil.move('test.worker.js', os.path.join('cdn', 'test.worker.js'))
shutil.copyfile('test.html.mem', os.path.join('cdn', 'test.html.mem'))
self.run_browser('test.html', '', '/report_result?1')
# Test that it is possible to define "Module.locateFile(foo)" function to locate where worker.js will be loaded from.
create_test_file('shell2.html', open(path_from_root('src', 'shell.html')).read().replace('var Module = {', 'var Module = { locateFile: function(filename) { if (filename == "test.worker.js") return "cdn/test.worker.js"; else return filename; }, '))
self.compile_btest(['main.cpp', '--shell-file', 'shell2.html', '-s', 'WASM=0', '-s', 'IN_TEST_HARNESS=1', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=1', '-o', 'test2.html'])
try_delete('test.worker.js')
self.run_browser('test2.html', '', '/report_result?1')
# Test that if the main thread is performing a futex wait while a pthread needs it to do a proxied operation (before that pthread would wake up the main thread), that it's not a deadlock.
@requires_threads
def test_pthread_proxying_in_futex_wait(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_proxying_in_futex_wait.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=1'])
# Test that sbrk() operates properly in multithreaded conditions
@requires_threads
def test_pthread_sbrk(self):
for aborting_malloc in [0, 1]:
print('aborting malloc=' + str(aborting_malloc))
# With aborting malloc = 1, test allocating memory in threads
# With aborting malloc = 0, allocate so much memory in threads that some of the allocations fail.
self.btest(path_from_root('tests', 'pthread', 'test_pthread_sbrk.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8', '-s', 'ABORTING_MALLOC=' + str(aborting_malloc), '-DABORTING_MALLOC=' + str(aborting_malloc), '-s', 'INITIAL_MEMORY=128MB'])
# Test that -s ABORTING_MALLOC=0 works in both pthreads and non-pthreads builds. (sbrk fails gracefully)
@requires_threads
def test_pthread_gauge_available_memory(self):
for opts in [[], ['-O2']]:
for args in [[], ['-s', 'USE_PTHREADS=1']]:
self.btest(path_from_root('tests', 'gauge_available_memory.cpp'), expected='1', args=['-s', 'ABORTING_MALLOC=0'] + args + opts)
# Test that the proxying operations of user code from pthreads to main thread work
@requires_threads
def test_pthread_run_on_main_thread(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_run_on_main_thread.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=1'])
# Test how a lot of back-to-back called proxying operations behave.
@requires_threads
def test_pthread_run_on_main_thread_flood(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_run_on_main_thread_flood.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=1'])
# Test that it is possible to asynchronously call a JavaScript function on the main thread.
@requires_threads
def test_pthread_call_async(self):
self.btest(path_from_root('tests', 'pthread', 'call_async.c'), expected='1', args=['-s', 'USE_PTHREADS=1'])
# Test that it is possible to synchronously call a JavaScript function on the main thread and get a return value back.
@requires_threads
def test_pthread_call_sync_on_main_thread(self):
self.btest(path_from_root('tests', 'pthread', 'call_sync_on_main_thread.c'), expected='1', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1', '-DPROXY_TO_PTHREAD=1', '--js-library', path_from_root('tests', 'pthread', 'call_sync_on_main_thread.js')])
self.btest(path_from_root('tests', 'pthread', 'call_sync_on_main_thread.c'), expected='1', args=['-O3', '-s', 'USE_PTHREADS=1', '-DPROXY_TO_PTHREAD=0', '--js-library', path_from_root('tests', 'pthread', 'call_sync_on_main_thread.js')])
self.btest(path_from_root('tests', 'pthread', 'call_sync_on_main_thread.c'), expected='1', args=['-Oz', '-DPROXY_TO_PTHREAD=0', '--js-library', path_from_root('tests', 'pthread', 'call_sync_on_main_thread.js'), '-s', 'EXPORTED_FUNCTIONS=[_main,_malloc]'])
# Test that it is possible to asynchronously call a JavaScript function on the main thread.
@requires_threads
def test_pthread_call_async_on_main_thread(self):
self.btest(path_from_root('tests', 'pthread', 'call_async_on_main_thread.c'), expected='7', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1', '-DPROXY_TO_PTHREAD=1', '--js-library', path_from_root('tests', 'pthread', 'call_async_on_main_thread.js')])
self.btest(path_from_root('tests', 'pthread', 'call_async_on_main_thread.c'), expected='7', args=['-O3', '-s', 'USE_PTHREADS=1', '-DPROXY_TO_PTHREAD=0', '--js-library', path_from_root('tests', 'pthread', 'call_async_on_main_thread.js')])
self.btest(path_from_root('tests', 'pthread', 'call_async_on_main_thread.c'), expected='7', args=['-Oz', '-DPROXY_TO_PTHREAD=0', '--js-library', path_from_root('tests', 'pthread', 'call_async_on_main_thread.js')])
# Tests that spawning a new thread does not cause a reinitialization of the global data section of the application memory area.
@requires_threads
def test_pthread_global_data_initialization(self):
mem_init_modes = [[], ['--memory-init-file', '0'], ['--memory-init-file', '1']]
for mem_init_mode in mem_init_modes:
for args in [['-s', 'MODULARIZE=1', '-s', 'EXPORT_NAME=MyModule', '--shell-file', path_from_root('tests', 'shell_that_launches_modularize.html')], ['-O3']]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_global_data_initialization.c'), expected='20', args=args + mem_init_mode + ['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1', '-s', 'PTHREAD_POOL_SIZE=1'])
@requires_threads
@requires_sync_compilation
def test_pthread_global_data_initialization_in_sync_compilation_mode(self):
mem_init_modes = [[], ['--memory-init-file', '0'], ['--memory-init-file', '1']]
for mem_init_mode in mem_init_modes:
args = ['-s', 'WASM_ASYNC_COMPILATION=0']
self.btest(path_from_root('tests', 'pthread', 'test_pthread_global_data_initialization.c'), expected='20', args=args + mem_init_mode + ['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1', '-s', 'PTHREAD_POOL_SIZE=1'])
# Test that emscripten_get_now() reports coherent wallclock times across all pthreads, instead of each pthread independently reporting wallclock times since the launch of that pthread.
@requires_threads
def test_pthread_clock_drift(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_clock_drift.cpp'), expected='1', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1'])
@requires_threads
def test_pthread_utf8_funcs(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_utf8_funcs.cpp'), expected='0', args=['-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=1'])
# Test the emscripten_futex_wake(addr, INT_MAX); functionality to wake all waiters
@requires_threads
def test_pthread_wake_all(self):
self.btest(path_from_root('tests', 'pthread', 'test_futex_wake_all.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'INITIAL_MEMORY=64MB', '-s', 'NO_EXIT_RUNTIME=1'], also_asmjs=True)
# Test that STACK_BASE and STACK_MAX correctly bound the stack on pthreads.
@requires_threads
def test_pthread_stack_bounds(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_stack_bounds.cpp'), expected='1', args=['-s', 'USE_PTHREADS'])
# Test that real `thread_local` works.
@requires_threads
def test_pthread_tls(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_tls.cpp'), expected='1337', args=['-s', 'PROXY_TO_PTHREAD', '-s', 'USE_PTHREADS'])
# Test that real `thread_local` works in main thread without PROXY_TO_PTHREAD.
@requires_threads
def test_pthread_tls_main(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_tls_main.cpp'), expected='1337', args=['-s', 'USE_PTHREADS'])
@requires_threads
def test_pthread_safe_stack(self):
# Note that as the test runs with PROXY_TO_PTHREAD, we set TOTAL_STACK,
# and not DEFAULT_PTHREAD_STACK_SIZE, as the pthread for main() gets the
# same stack size as the main thread normally would.
self.btest(path_from_root('tests', 'core', 'test_safe_stack.c'), expected='1', args=['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'STACK_OVERFLOW_CHECK=2', '-s', 'TOTAL_STACK=64KB', '--pre-js', path_from_root('tests', 'pthread', 'test_safe_stack.js')])
@parameterized({
'leak': ['test_pthread_lsan_leak', ['-g4']],
'no_leak': ['test_pthread_lsan_no_leak'],
})
@requires_threads
def test_pthread_lsan(self, name, args=[]):
self.btest(path_from_root('tests', 'pthread', name + '.cpp'), expected='1', args=['-fsanitize=leak', '-s', 'INITIAL_MEMORY=256MB', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '--pre-js', path_from_root('tests', 'pthread', name + '.js')] + args)
@parameterized({
# Reusing the LSan test files for ASan.
'leak': ['test_pthread_lsan_leak', ['-g4']],
'no_leak': ['test_pthread_lsan_no_leak'],
})
@requires_threads
def test_pthread_asan(self, name, args=[]):
self.btest(path_from_root('tests', 'pthread', name + '.cpp'), expected='1', args=['-fsanitize=address', '-s', 'INITIAL_MEMORY=256MB', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '--pre-js', path_from_root('tests', 'pthread', name + '.js')] + args)
@requires_threads
def test_pthread_asan_use_after_free(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_asan_use_after_free.cpp'), expected='1', args=['-fsanitize=address', '-s', 'INITIAL_MEMORY=256MB', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '--pre-js', path_from_root('tests', 'pthread', 'test_pthread_asan_use_after_free.js')])
# Tests MAIN_THREAD_EM_ASM_INT() function call signatures.
def test_main_thread_em_asm_signatures(self):
self.btest(path_from_root('tests', 'core', 'test_em_asm_signatures.cpp'), expected='121', args=[])
@requires_threads
def test_main_thread_em_asm_signatures_pthreads(self):
self.btest(path_from_root('tests', 'core', 'test_em_asm_signatures.cpp'), expected='121', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1', '-s', 'ASSERTIONS=1'])
@requires_threads
def test_main_thread_async_em_asm(self):
self.btest(path_from_root('tests', 'core', 'test_main_thread_async_em_asm.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1', '-s', 'ASSERTIONS=1'])
@requires_threads
def test_main_thread_em_asm_blocking(self):
create_test_file('page.html',
open(path_from_root('tests', 'browser', 'test_em_asm_blocking.html')).read())
create_test_file('wasm.cpp',
self.with_report_result(
open(path_from_root('tests', 'browser', 'test_em_asm_blocking.cpp')).read()))
self.compile_btest(['wasm.cpp', '-O2', '-o', 'wasm.js', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
self.run_browser('page.html', '', '/report_result?8')
# test atomicrmw i64
@no_wasm_backend('uses an asm.js .ll file')
@requires_threads
def test_atomicrmw_i64(self):
# TODO: enable this with wasm, currently pthreads/atomics have limitations
self.compile_btest([path_from_root('tests', 'atomicrmw_i64.ll'), '-s', 'USE_PTHREADS=1', '-s', 'IN_TEST_HARNESS=1', '-o', 'test.html', '-s', 'WASM=0'])
self.run_browser('test.html', None, '/report_result?0')
# Test that it is possible to send a signal via calling alarm(timeout), which in turn calls to the signal handler set by signal(SIGALRM, func);
def test_sigalrm(self):
self.btest(path_from_root('tests', 'sigalrm.cpp'), expected='0', args=['-O3'])
def test_canvas_style_proxy(self):
self.btest('canvas_style_proxy.c', expected='1', args=['--proxy-to-worker', '--shell-file', path_from_root('tests/canvas_style_proxy_shell.html'), '--pre-js', path_from_root('tests/canvas_style_proxy_pre.js')])
def test_canvas_size_proxy(self):
self.btest(path_from_root('tests', 'canvas_size_proxy.c'), expected='0', args=['--proxy-to-worker'])
def test_custom_messages_proxy(self):
self.btest(path_from_root('tests', 'custom_messages_proxy.c'), expected='1', args=['--proxy-to-worker', '--shell-file', path_from_root('tests', 'custom_messages_proxy_shell.html'), '--post-js', path_from_root('tests', 'custom_messages_proxy_postjs.js')])
def test_vanilla_html_when_proxying(self):
for opts in [0, 1, 2]:
print(opts)
create_test_file('src.cpp', self.with_report_result(open(path_from_root('tests', 'browser_test_hello_world.c')).read()))
self.compile_btest(['src.cpp', '-o', 'test.js', '-O' + str(opts), '--proxy-to-worker'])
create_test_file('test.html', '<script src="test.js"></script>')
self.run_browser('test.html', None, '/report_result?0')
@no_wasm_backend('mem init file')
def test_in_flight_memfile_request(self):
# test the XHR for an asm.js mem init file being in flight already
for o in [0, 1, 2]:
print(o)
opts = ['-O' + str(o), '-s', 'WASM=0']
print('plain html')
create_test_file('src.cpp', self.with_report_result(open(path_from_root('tests', 'in_flight_memfile_request.c')).read()))
self.compile_btest(['src.cpp', '-o', 'test.js'] + opts)
create_test_file('test.html', '<script src="test.js"></script>')
self.run_browser('test.html', None, '/report_result?0') # never when we provide our own HTML like this.
print('default html')
self.btest('in_flight_memfile_request.c', expected='0' if o < 2 else '1', args=opts) # should happen when there is a mem init file (-O2+)
@requires_sync_compilation
def test_binaryen_async(self):
# notice when we use async compilation
script = '''
<script>
// note if we do async compilation
var real_wasm_instantiate = WebAssembly.instantiate;
var real_wasm_instantiateStreaming = WebAssembly.instantiateStreaming;
if (typeof real_wasm_instantiateStreaming === 'function') {
WebAssembly.instantiateStreaming = function(a, b) {
Module.sawAsyncCompilation = true;
return real_wasm_instantiateStreaming(a, b);
};
} else {
WebAssembly.instantiate = function(a, b) {
Module.sawAsyncCompilation = true;
return real_wasm_instantiate(a, b);
};
}
// show stderr for the viewer's fun
err = function(x) {
out('<<< ' + x + ' >>>');
console.log(x);
};
</script>
{{{ SCRIPT }}}
'''
shell_with_script('shell.html', 'shell.html', script)
common_args = ['--shell-file', 'shell.html']
for opts, expect in [
([], 1),
(['-O1'], 1),
(['-O2'], 1),
(['-O3'], 1),
(['-s', 'WASM_ASYNC_COMPILATION=1'], 1), # force it on
(['-O1', '-s', 'WASM_ASYNC_COMPILATION=0'], 0), # force it off
]:
print(opts, expect)
self.btest('binaryen_async.c', expected=str(expect), args=common_args + opts)
# Ensure that compilation still works and is async without instantiateStreaming available
no_streaming = ' <script> WebAssembly.instantiateStreaming = undefined;</script>'
shell_with_script('shell.html', 'shell.html', no_streaming + script)
self.btest('binaryen_async.c', expected='1', args=common_args)
# Test that implementing Module.instantiateWasm() callback works.
def test_manual_wasm_instantiate(self):
src = 'src.cpp'
create_test_file(src, self.with_report_result(open(os.path.join(path_from_root('tests/manual_wasm_instantiate.cpp'))).read()))
self.compile_btest(['src.cpp', '-o', 'manual_wasm_instantiate.js', '-s', 'BINARYEN=1'])
shutil.copyfile(path_from_root('tests', 'manual_wasm_instantiate.html'), 'manual_wasm_instantiate.html')
self.run_browser('manual_wasm_instantiate.html', 'wasm instantiation succeeded', '/report_result?1')
def test_wasm_locate_file(self):
# Test that it is possible to define "Module.locateFile(foo)" function to locate where worker.js will be loaded from.
ensure_dir('cdn')
create_test_file('shell2.html', open(path_from_root('src', 'shell.html')).read().replace('var Module = {', 'var Module = { locateFile: function(filename) { if (filename == "test.wasm") return "cdn/test.wasm"; else return filename; }, '))
create_test_file('src.cpp', self.with_report_result(open(path_from_root('tests', 'browser_test_hello_world.c')).read()))
self.compile_btest(['src.cpp', '--shell-file', 'shell2.html', '-o', 'test.html'])
shutil.move('test.wasm', os.path.join('cdn', 'test.wasm'))
self.run_browser('test.html', '', '/report_result?0')
def test_utf8_textdecoder(self):
self.btest('benchmark_utf8.cpp', expected='0', args=['--embed-file', path_from_root('tests/utf8_corpus.txt') + '@/utf8_corpus.txt', '-s', 'EXTRA_EXPORTED_RUNTIME_METHODS=["UTF8ToString"]'])
def test_utf16_textdecoder(self):
self.btest('benchmark_utf16.cpp', expected='0', args=['--embed-file', path_from_root('tests/utf16_corpus.txt') + '@/utf16_corpus.txt', '-s', 'EXTRA_EXPORTED_RUNTIME_METHODS=["UTF16ToString","stringToUTF16","lengthBytesUTF16"]'])
def test_TextDecoder(self):
self.btest('browser_test_hello_world.c', '0', args=['-s', 'TEXTDECODER=0'])
just_fallback = os.path.getsize('test.js')
self.btest('browser_test_hello_world.c', '0')
td_with_fallback = os.path.getsize('test.js')
self.btest('browser_test_hello_world.c', '0', args=['-s', 'TEXTDECODER=2'])
td_without_fallback = os.path.getsize('test.js')
self.assertLess(td_without_fallback, just_fallback)
self.assertLess(just_fallback, td_with_fallback)
def test_small_js_flags(self):
self.btest('browser_test_hello_world.c', '0', args=['-O3', '--closure', '1', '-s', 'INCOMING_MODULE_JS_API=[]', '-s', 'ENVIRONMENT=web'])
# Check an absolute js code size, with some slack.
size = os.path.getsize('test.js')
print('size:', size)
# Note that this size includes test harness additions (for reporting the result, etc.).
self.assertLess(abs(size - 5496), 100)
# Tests that it is possible to initialize and render WebGL content in a pthread by using OffscreenCanvas.
# -DTEST_CHAINED_WEBGL_CONTEXT_PASSING: Tests that it is possible to transfer WebGL canvas in a chain from main thread -> thread 1 -> thread 2 and then init and render WebGL content there.
@no_chrome('see https://crbug.com/961765')
@requires_threads
@requires_offscreen_canvas
def test_webgl_offscreen_canvas_in_pthread(self):
for args in [[], ['-DTEST_CHAINED_WEBGL_CONTEXT_PASSING']]:
self.btest('gl_in_pthread.cpp', expected='1', args=args + ['-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=2', '-s', 'OFFSCREENCANVAS_SUPPORT=1', '-lGL'])
# Tests that it is possible to render WebGL content on a <canvas> on the main thread, after it has once been used to render WebGL content in a pthread first
# -DTEST_MAIN_THREAD_EXPLICIT_COMMIT: Test the same (WebGL on main thread after pthread), but by using explicit .commit() to swap on the main thread instead of implicit "swap when rAF ends" logic
@requires_threads
@requires_offscreen_canvas
def test_webgl_offscreen_canvas_in_mainthread_after_pthread(self):
self.skipTest('This test is disabled because current OffscreenCanvas does not allow transfering it after a rendering context has been created for it.')
for args in [[], ['-DTEST_MAIN_THREAD_EXPLICIT_COMMIT']]:
self.btest('gl_in_mainthread_after_pthread.cpp', expected='0', args=args + ['-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=2', '-s', 'OFFSCREENCANVAS_SUPPORT=1', '-lGL'])
@requires_threads
@requires_offscreen_canvas
def test_webgl_offscreen_canvas_only_in_pthread(self):
self.btest('gl_only_in_pthread.cpp', expected='0', args=['-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=1', '-s', 'OFFSCREENCANVAS_SUPPORT=1', '-lGL', '-s', 'OFFSCREEN_FRAMEBUFFER=1'])
# Tests that rendering from client side memory without default-enabling extensions works.
@requires_graphics_hardware
def test_webgl_from_client_side_memory_without_default_enabled_extensions(self):
self.btest('webgl_draw_triangle.c', '0', args=['-lGL', '-s', 'OFFSCREEN_FRAMEBUFFER=1', '-DEXPLICIT_SWAP=1', '-DDRAW_FROM_CLIENT_MEMORY=1', '-s', 'FULL_ES2=1'])
# Tests for WEBGL_multi_draw extension
# For testing WebGL draft extensions like this, if using chrome as the browser,
# We might want to append the --enable-webgl-draft-extensions to the EMTEST_BROWSER env arg.
@requires_graphics_hardware
def test_webgl_multi_draw(self):
self.btest('webgl_multi_draw_test.c', reference='webgl_multi_draw.png',
args=['-lGL', '-s', 'OFFSCREEN_FRAMEBUFFER=1', '-DMULTI_DRAW_ARRAYS=1', '-DEXPLICIT_SWAP=1'])
self.btest('webgl_multi_draw_test.c', reference='webgl_multi_draw.png',
args=['-lGL', '-s', 'OFFSCREEN_FRAMEBUFFER=1', '-DMULTI_DRAW_ARRAYS_INSTANCED=1', '-DEXPLICIT_SWAP=1'])
self.btest('webgl_multi_draw_test.c', reference='webgl_multi_draw.png',
args=['-lGL', '-s', 'OFFSCREEN_FRAMEBUFFER=1', '-DMULTI_DRAW_ELEMENTS=1', '-DEXPLICIT_SWAP=1'])
self.btest('webgl_multi_draw_test.c', reference='webgl_multi_draw.png',
args=['-lGL', '-s', 'OFFSCREEN_FRAMEBUFFER=1', '-DMULTI_DRAW_ELEMENTS_INSTANCED=1', '-DEXPLICIT_SWAP=1'])
# Tests for base_vertex/base_instance extension
# For testing WebGL draft extensions like this, if using chrome as the browser,
# We might want to append the --enable-webgl-draft-extensions to the EMTEST_BROWSER env arg.
# If testing on Mac, you also need --use-cmd-decoder=passthrough to get this extension.
# Also there is a known bug with Mac Intel baseInstance which can fail producing the expected image result.
@requires_graphics_hardware
def test_webgl_draw_base_vertex_base_instance(self):
for multiDraw in [0, 1]:
for drawElements in [0, 1]:
self.btest('webgl_draw_base_vertex_base_instance_test.c', reference='webgl_draw_instanced_base_vertex_base_instance.png',
args=['-lGL',
'-s', 'MAX_WEBGL_VERSION=2',
'-s', 'OFFSCREEN_FRAMEBUFFER=1',
'-DMULTI_DRAW=' + str(multiDraw),
'-DDRAW_ELEMENTS=' + str(drawElements),
'-DEXPLICIT_SWAP=1',
'-DWEBGL_CONTEXT_VERSION=2'])
# Tests that -s OFFSCREEN_FRAMEBUFFER=1 rendering works.
@requires_graphics_hardware
def test_webgl_offscreen_framebuffer(self):
# Tests all the different possible versions of libgl
for threads in [[], ['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD']]:
for version in [[], ['-s', 'FULL_ES3'], ['-s', 'FULL_ES3']]:
args = ['-lGL', '-s', 'OFFSCREEN_FRAMEBUFFER=1', '-DEXPLICIT_SWAP=1'] + threads + version
print('with args: %s' % str(args))
self.btest('webgl_draw_triangle.c', '0', args=args)
# Tests that VAOs can be used even if WebGL enableExtensionsByDefault is set to 0.
@requires_graphics_hardware
def test_webgl_vao_without_automatic_extensions(self):
self.btest('test_webgl_no_auto_init_extensions.c', '0', args=['-lGL', '-s', 'GL_SUPPORT_AUTOMATIC_ENABLE_EXTENSIONS=0'])
# Tests that offscreen framebuffer state restoration works
@requires_graphics_hardware
def test_webgl_offscreen_framebuffer_state_restoration(self):
for args in [
# full state restoration path on WebGL 1.0
['-s', 'MAX_WEBGL_VERSION=1', '-s', 'OFFSCREEN_FRAMEBUFFER_FORBID_VAO_PATH=1'],
# VAO path on WebGL 1.0
['-s', 'MAX_WEBGL_VERSION=1'],
['-s', 'MAX_WEBGL_VERSION=2', '-DTEST_WEBGL2=0'],
# VAO path on WebGL 2.0
['-s', 'MAX_WEBGL_VERSION=2', '-DTEST_WEBGL2=1', '-DTEST_ANTIALIAS=1', '-DTEST_REQUIRE_VAO=1'],
# full state restoration path on WebGL 2.0
['-s', 'MAX_WEBGL_VERSION=2', '-DTEST_WEBGL2=1', '-DTEST_ANTIALIAS=1', '-s', 'OFFSCREEN_FRAMEBUFFER_FORBID_VAO_PATH=1'],
# blitFramebuffer path on WebGL 2.0 (falls back to VAO on Firefox < 67)
['-s', 'MAX_WEBGL_VERSION=2', '-DTEST_WEBGL2=1', '-DTEST_ANTIALIAS=0'],
]:
cmd = args + ['-lGL', '-s', 'OFFSCREEN_FRAMEBUFFER=1', '-DEXPLICIT_SWAP=1']
self.btest('webgl_offscreen_framebuffer_swap_with_bad_state.c', '0', args=cmd)
# Tests that -s WORKAROUND_OLD_WEBGL_UNIFORM_UPLOAD_IGNORED_OFFSET_BUG=1 rendering works.
@requires_graphics_hardware
def test_webgl_workaround_webgl_uniform_upload_bug(self):
self.btest('webgl_draw_triangle_with_uniform_color.c', '0', args=['-lGL', '-s', 'WORKAROUND_OLD_WEBGL_UNIFORM_UPLOAD_IGNORED_OFFSET_BUG=1'])
# Tests that using an array of structs in GL uniforms works.
@requires_graphics_hardware
def test_webgl_array_of_structs_uniform(self):
self.btest('webgl_array_of_structs_uniform.c', args=['-lGL', '-s', 'MAX_WEBGL_VERSION=2'], reference='webgl_array_of_structs_uniform.png')
# Tests that if a WebGL context is created in a pthread on a canvas that has not been transferred to that pthread, WebGL calls are then proxied to the main thread
# -DTEST_OFFSCREEN_CANVAS=1: Tests that if a WebGL context is created on a pthread that has the canvas transferred to it via using Emscripten's EMSCRIPTEN_PTHREAD_TRANSFERRED_CANVASES="#canvas", then OffscreenCanvas is used
# -DTEST_OFFSCREEN_CANVAS=2: Tests that if a WebGL context is created on a pthread that has the canvas transferred to it via automatic transferring of Module.canvas when EMSCRIPTEN_PTHREAD_TRANSFERRED_CANVASES is not defined, then OffscreenCanvas is also used
@requires_threads
@requires_offscreen_canvas
def test_webgl_offscreen_canvas_in_proxied_pthread(self):
for asyncify in [0, 1]:
cmd = ['-s', 'USE_PTHREADS=1', '-s', 'OFFSCREENCANVAS_SUPPORT=1', '-lGL', '-s', 'GL_DEBUG=1', '-s', 'PROXY_TO_PTHREAD=1']
if asyncify:
# given the synchronous render loop here, asyncify is needed to see intermediate frames and
# the gradual color change
cmd += ['-s', 'ASYNCIFY', '-DASYNCIFY']
print(str(cmd))
self.btest('gl_in_proxy_pthread.cpp', expected='1', args=cmd)
@requires_threads
@requires_graphics_hardware
@requires_offscreen_canvas
def test_webgl_resize_offscreencanvas_from_main_thread(self):
for args1 in [[], ['-s', 'PROXY_TO_PTHREAD=1']]:
for args2 in [[], ['-DTEST_SYNC_BLOCKING_LOOP=1']]:
for args3 in [[], ['-s', 'OFFSCREENCANVAS_SUPPORT=1', '-s', 'OFFSCREEN_FRAMEBUFFER=1']]:
cmd = args1 + args2 + args3 + ['-s', 'USE_PTHREADS=1', '-lGL', '-s', 'GL_DEBUG=1']
print(str(cmd))
self.btest('resize_offscreencanvas_from_main_thread.cpp', expected='1', args=cmd)
@requires_graphics_hardware
def test_webgl_simple_enable_extensions(self):
for webgl_version in [1, 2]:
for simple_enable_extensions in [0, 1]:
cmd = ['-DWEBGL_CONTEXT_VERSION=' + str(webgl_version),
'-DWEBGL_SIMPLE_ENABLE_EXTENSION=' + str(simple_enable_extensions),
'-s', 'MAX_WEBGL_VERSION=2',
'-s', 'GL_SUPPORT_AUTOMATIC_ENABLE_EXTENSIONS=' + str(simple_enable_extensions),
'-s', 'GL_SUPPORT_SIMPLE_ENABLE_EXTENSIONS=' + str(simple_enable_extensions)]
self.btest('webgl2_simple_enable_extensions.c', expected='0', args=cmd)
# Tests the feature that shell html page can preallocate the typed array and place it to Module.buffer before loading the script page.
# In this build mode, the -s INITIAL_MEMORY=xxx option will be ignored.
# Preallocating the buffer in this was is asm.js only (wasm needs a Memory).
@no_wasm_backend('asm.js feature')
def test_preallocated_heap(self):
self.btest('test_preallocated_heap.cpp', expected='1', args=['-s', 'WASM=0', '-s', 'INITIAL_MEMORY=16MB', '-s', 'ABORTING_MALLOC=0', '--shell-file', path_from_root('tests', 'test_preallocated_heap_shell.html')])
# Tests emscripten_fetch() usage to XHR data directly to memory without persisting results to IndexedDB.
def test_fetch_to_memory(self):
# Test error reporting in the negative case when the file URL doesn't exist. (http 404)
self.btest('fetch/to_memory.cpp',
expected='1',
args=['-s', 'FETCH_DEBUG=1', '-s', 'FETCH=1', '-DFILE_DOES_NOT_EXIST'],
also_asmjs=True)
# Test the positive case when the file URL exists. (http 200)
shutil.copyfile(path_from_root('tests', 'gears.png'), 'gears.png')
for arg in [[], ['-s', 'FETCH_SUPPORT_INDEXEDDB=0']]:
self.btest('fetch/to_memory.cpp',
expected='1',
args=['-s', 'FETCH_DEBUG=1', '-s', 'FETCH=1'] + arg,
also_asmjs=True)
def test_fetch_to_indexdb(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), 'gears.png')
self.btest('fetch/to_indexeddb.cpp',
expected='1',
args=['-s', 'FETCH_DEBUG=1', '-s', 'FETCH=1'],
also_asmjs=True)
# Tests emscripten_fetch() usage to persist an XHR into IndexedDB and subsequently load up from there.
def test_fetch_cached_xhr(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), 'gears.png')
self.btest('fetch/cached_xhr.cpp',
expected='1',
args=['-s', 'FETCH_DEBUG=1', '-s', 'FETCH=1'],
also_asmjs=True)
# Tests that response headers get set on emscripten_fetch_t values.
@requires_threads
def test_fetch_response_headers(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), 'gears.png')
self.btest('fetch/response_headers.cpp', expected='1', args=['-s', 'FETCH_DEBUG=1', '-s', 'FETCH=1', '-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1'], also_asmjs=True)
# Test emscripten_fetch() usage to stream a XHR in to memory without storing the full file in memory
def test_fetch_stream_file(self):
self.skipTest('moz-chunked-arraybuffer was firefox-only and has been removed')
# Strategy: create a large 128MB file, and compile with a small 16MB Emscripten heap, so that the tested file
# won't fully fit in the heap. This verifies that streaming works properly.
s = '12345678'
for i in range(14):
s = s[::-1] + s # length of str will be 2^17=128KB
with open('largefile.txt', 'w') as f:
for i in range(1024):
f.write(s)
self.btest('fetch/stream_file.cpp',
expected='1',
args=['-s', 'FETCH_DEBUG=1', '-s', 'FETCH=1', '-s', 'INITIAL_MEMORY=536870912'],
also_asmjs=True)
# Tests emscripten_fetch() usage in synchronous mode when used from the main
# thread proxied to a Worker with -s PROXY_TO_PTHREAD=1 option.
@requires_threads
@no_wasm_backend("WASM2JS does not yet support pthreads")
def test_fetch_sync_xhr(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), 'gears.png')
self.btest('fetch/sync_xhr.cpp', expected='1', args=['-s', 'FETCH_DEBUG=1', '-s', 'FETCH=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1'])
# Tests emscripten_fetch() usage when user passes none of the main 3 flags (append/replace/no_download).
# In that case, in append is implicitly understood.
@requires_threads
@no_wasm_backend("WASM2JS does not yet support pthreads")
def test_fetch_implicit_append(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), 'gears.png')
self.btest('fetch/example_synchronous_fetch.cpp', expected='200', args=['-s', 'FETCH=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1'])
# Tests synchronous emscripten_fetch() usage from wasm pthread in fastcomp.
@requires_threads
def test_fetch_sync_xhr_in_wasm(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), 'gears.png')
self.btest('fetch/example_synchronous_fetch.cpp', expected='200', args=['-s', 'FETCH=1', '-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1'])
# Tests that the Fetch API works for synchronous XHRs when used with --proxy-to-worker.
@requires_threads
def test_fetch_sync_xhr_in_proxy_to_worker(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), 'gears.png')
self.btest('fetch/sync_xhr.cpp',
expected='1',
args=['-s', 'FETCH_DEBUG=1', '-s', 'FETCH=1', '--proxy-to-worker'],
also_asmjs=True)
# Tests waiting on EMSCRIPTEN_FETCH_WAITABLE request from a worker thread
@no_wasm_backend("emscripten_fetch_wait uses an asm.js based web worker")
@requires_threads
def test_fetch_sync_fetch_in_main_thread(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), 'gears.png')
self.btest('fetch/sync_fetch_in_main_thread.cpp', expected='0', args=['-s', 'FETCH_DEBUG=1', '-s', 'FETCH=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1'])
@requires_threads
@no_wasm_backend("WASM2JS does not yet support pthreads")
def test_fetch_idb_store(self):
self.btest('fetch/idb_store.cpp', expected='0', args=['-s', 'USE_PTHREADS=1', '-s', 'FETCH=1', '-s', 'WASM=0', '-s', 'PROXY_TO_PTHREAD=1'])
@requires_threads
@no_wasm_backend("WASM2JS does not yet support pthreads")
def test_fetch_idb_delete(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), 'gears.png')
self.btest('fetch/idb_delete.cpp', expected='0', args=['-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1', '-s', 'FETCH=1', '-s', 'WASM=0', '-s', 'PROXY_TO_PTHREAD=1'])
@requires_asmfs
@requires_threads
def test_asmfs_hello_file(self):
# Test basic file loading and the valid character set for files.
ensure_dir('dirrey')
shutil.copyfile(path_from_root('tests', 'asmfs', 'hello_file.txt'), os.path.join(self.get_dir(), 'dirrey', 'hello file !#$%&\'()+,-.;=@[]^_`{}~ %%.txt'))
self.btest('asmfs/hello_file.cpp', expected='0', args=['-s', 'ASMFS=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1', '-s', 'PROXY_TO_PTHREAD=1'])
@requires_asmfs
@requires_threads
def test_asmfs_read_file_twice(self):
shutil.copyfile(path_from_root('tests', 'asmfs', 'hello_file.txt'), 'hello_file.txt')
self.btest('asmfs/read_file_twice.cpp', expected='0', args=['-s', 'ASMFS=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1', '-s', 'PROXY_TO_PTHREAD=1'])
@requires_asmfs
@requires_threads
def test_asmfs_fopen_write(self):
self.btest('asmfs/fopen_write.cpp', expected='0', args=['-s', 'ASMFS=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1'])
@requires_asmfs
@requires_threads
def test_asmfs_mkdir_create_unlink_rmdir(self):
self.btest('cstdio/test_remove.cpp', expected='0', args=['-s', 'ASMFS=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1'])
@requires_asmfs
@requires_threads
def test_asmfs_dirent_test_readdir(self):
self.btest('dirent/test_readdir.c', expected='0', args=['-s', 'ASMFS=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1'])
@requires_asmfs
@requires_threads
def test_asmfs_dirent_test_readdir_empty(self):
self.btest('dirent/test_readdir_empty.c', expected='0', args=['-s', 'ASMFS=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1'])
@requires_asmfs
@requires_threads
def test_asmfs_unistd_close(self):
self.btest('unistd/close.c', expected='0', args=['-s', 'ASMFS=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1'])
@requires_asmfs
@requires_threads
def test_asmfs_unistd_access(self):
self.btest('unistd/access.c', expected='0', args=['-s', 'ASMFS=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1'])
@requires_asmfs
@requires_threads
def test_asmfs_unistd_unlink(self):
# TODO: Once symlinks are supported, remove -DNO_SYMLINK=1
self.btest('unistd/unlink.c', expected='0', args=['-s', 'ASMFS=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1', '-DNO_SYMLINK=1'])
@requires_asmfs
@requires_threads
def test_asmfs_test_fcntl_open(self):
self.btest('fcntl/test_fcntl_open.c', expected='0', args=['-s', 'ASMFS=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1', '-s', 'PROXY_TO_PTHREAD=1'])
@requires_asmfs
@requires_threads
def test_asmfs_relative_paths(self):
self.btest('asmfs/relative_paths.cpp', expected='0', args=['-s', 'ASMFS=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1'])
@requires_threads
def test_pthread_locale(self):
for args in [
[],
['-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=2'],
]:
print("Testing with: ", args)
self.btest('pthread/test_pthread_locale.c', expected='1', args=args)
# Tests the Emscripten HTML5 API emscripten_set_canvas_element_size() and emscripten_get_canvas_element_size() functionality in singlethreaded programs.
def test_emscripten_set_canvas_element_size(self):
self.btest('emscripten_set_canvas_element_size.c', expected='1')
# Test that emscripten_get_device_pixel_ratio() is callable from pthreads (and proxies to main thread to obtain the proper window.devicePixelRatio value).
@requires_threads
def test_emscripten_get_device_pixel_ratio(self):
for args in [[], ['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1']]:
self.btest('emscripten_get_device_pixel_ratio.c', expected='1', args=args)
# Tests that emscripten_run_script() variants of functions work in pthreads.
@requires_threads
def test_pthread_run_script(self):
for args in [[], ['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1']]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_run_script.cpp'), expected='1', args=['-O3'] + args)
# Tests emscripten_set_canvas_element_size() and OffscreenCanvas functionality in different build configurations.
@requires_threads
@requires_graphics_hardware
def test_emscripten_animate_canvas_element_size(self):
for args in [
['-DTEST_EMSCRIPTEN_SET_MAIN_LOOP=1'],
['-DTEST_EMSCRIPTEN_SET_MAIN_LOOP=1', '-s', 'PROXY_TO_PTHREAD=1', '-s', 'USE_PTHREADS=1', '-s', 'OFFSCREEN_FRAMEBUFFER=1'],
['-DTEST_EMSCRIPTEN_SET_MAIN_LOOP=1', '-s', 'PROXY_TO_PTHREAD=1', '-s', 'USE_PTHREADS=1', '-s', 'OFFSCREEN_FRAMEBUFFER=1', '-DTEST_EXPLICIT_CONTEXT_SWAP=1'],
['-DTEST_EXPLICIT_CONTEXT_SWAP=1', '-s', 'PROXY_TO_PTHREAD=1', '-s', 'USE_PTHREADS=1', '-s', 'OFFSCREEN_FRAMEBUFFER=1'],
['-DTEST_EXPLICIT_CONTEXT_SWAP=1', '-s', 'PROXY_TO_PTHREAD=1', '-s', 'USE_PTHREADS=1', '-s', 'OFFSCREEN_FRAMEBUFFER=1', '-DTEST_MANUALLY_SET_ELEMENT_CSS_SIZE=1'],
['-DTEST_EMSCRIPTEN_SET_MAIN_LOOP=1', '-s', 'OFFSCREENCANVAS_SUPPORT'],
]:
cmd = ['-lGL', '-O3', '-g2', '--shell-file', path_from_root('tests', 'canvas_animate_resize_shell.html'), '-s', 'GL_DEBUG=1', '--threadprofiler'] + args
print(' '.join(cmd))
self.btest('canvas_animate_resize.cpp', expected='1', args=cmd)
# Tests the absolute minimum pthread-enabled application.
@requires_threads
def test_pthread_hello_thread(self):
for opts in [[], ['-O3']]:
for modularize in [[], ['-s', 'MODULARIZE=1', '-s', 'EXPORT_NAME=MyModule', '--shell-file', path_from_root('tests', 'shell_that_launches_modularize.html')]]:
self.btest(path_from_root('tests', 'pthread', 'hello_thread.c'), expected='1', args=['-s', 'USE_PTHREADS=1'] + modularize + opts)
# Tests that a pthreads build of -s MINIMAL_RUNTIME=1 works well in different build modes
def test_minimal_runtime_hello_pthread(self):
for opts in [[], ['-O3']]:
for modularize in [[], ['-s', 'MODULARIZE=1', '-s', 'EXPORT_NAME=MyModule', '-s', 'MINIMAL_RUNTIME=1']]:
self.btest(path_from_root('tests', 'pthread', 'hello_thread.c'), expected='1', args=['-s', 'USE_PTHREADS=1'] + modularize + opts)
# Tests memory growth in pthreads mode, but still on the main thread.
@requires_threads
def test_pthread_growth_mainthread(self):
def run(emcc_args=[]):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_memory_growth_mainthread.c'), expected='1', args=['-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=2', '-s', 'ALLOW_MEMORY_GROWTH=1', '-s', 'INITIAL_MEMORY=32MB', '-s', 'MAXIMUM_MEMORY=256MB'] + emcc_args, also_asmjs=False)
run()
run(['-s', 'PROXY_TO_PTHREAD=1'])
# Tests memory growth in a pthread.
@requires_threads
def test_pthread_growth(self):
def run(emcc_args=[]):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_memory_growth.c'), expected='1', args=['-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=2', '-s', 'ALLOW_MEMORY_GROWTH=1', '-s', 'INITIAL_MEMORY=32MB', '-s', 'MAXIMUM_MEMORY=256MB', '-g'] + emcc_args, also_asmjs=False)
run()
run(['-s', 'ASSERTIONS=1'])
run(['-s', 'PROXY_TO_PTHREAD=1'])
# Tests that time in a pthread is relative to the main thread, so measurements
# on different threads are still monotonic, as if checking a single central
# clock.
@requires_threads
def test_pthread_reltime(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_reltime.cpp'), expected='3', args=['-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=1'])
# Tests that it is possible to load the main .js file of the application manually via a Blob URL, and still use pthreads.
@requires_threads
@no_wasm_backend("WASM2JS does not yet support pthreads")
def test_load_js_from_blob_with_pthreads(self):
# TODO: enable this with wasm, currently pthreads/atomics have limitations
src = 'src.c'
create_test_file(src, self.with_report_result(open(path_from_root('tests', 'pthread', 'hello_thread.c')).read()))
self.compile_btest(['src.c', '-s', 'USE_PTHREADS=1', '-o', 'hello_thread_with_blob_url.js', '-s', 'WASM=0'])
shutil.copyfile(path_from_root('tests', 'pthread', 'main_js_as_blob_loader.html'), 'hello_thread_with_blob_url.html')
self.run_browser('hello_thread_with_blob_url.html', 'hello from thread!', '/report_result?1')
# Tests that base64 utils work in browser with no native atob function
def test_base64_atob_fallback(self):
src = r'''
#include <stdio.h>
#include <emscripten.h>
int main() {
REPORT_RESULT(0);
return 0;
}
'''
create_test_file('test.c', self.with_report_result(src))
# generate a dummy file
create_test_file('dummy_file', 'dummy')
# compile the code with the modularize feature and the preload-file option enabled
self.compile_btest(['test.c', '-s', 'MODULARIZE=1', '-s', 'EXPORT_NAME="Foo"', '--preload-file', 'dummy_file', '-s', 'SINGLE_FILE=1'])
create_test_file('a.html', '''
<script>
atob = undefined;
fetch = undefined;
</script>
<script src="a.out.js"></script>
<script>
var foo = Foo();
</script>
''')
self.run_browser('a.html', '...', '/report_result?0')
# Tests that SINGLE_FILE works as intended in generated HTML (with and without Worker)
def test_single_file_html(self):
self.btest('single_file_static_initializer.cpp', '19', args=['-s', 'SINGLE_FILE=1'], also_proxied=True)
self.assertExists('test.html')
self.assertNotExists('test.js')
self.assertNotExists('test.worker.js')
self.assertNotExists('test.wasm')
self.assertNotExists('test.mem')
# Tests that SINGLE_FILE works as intended in generated HTML with MINIMAL_RUNTIME
def test_minimal_runtime_single_file_html(self):
for wasm in [0, 1]:
for opts in [[], ['-O3']]:
self.btest('single_file_static_initializer.cpp', '19', args=opts + ['-s', 'MINIMAL_RUNTIME=1', '-s', 'SINGLE_FILE=1', '-s', 'WASM=' + str(wasm)])
self.assertExists('test.html')
self.assertNotExists('test.js')
self.assertNotExists('test.wasm')
self.assertNotExists('test.asm.js')
self.assertNotExists('test.mem')
self.assertNotExists('test.js')
self.assertNotExists('test.worker.js')
# Tests that SINGLE_FILE works when built with ENVIRONMENT=web and Closure enabled (#7933)
def test_single_file_in_web_environment_with_closure(self):
self.btest('minimal_hello.c', '0', args=['-s', 'SINGLE_FILE=1', '-s', 'ENVIRONMENT=web', '-O2', '--closure', '1'])
# Tests that SINGLE_FILE works as intended with locateFile
def test_single_file_locate_file(self):
create_test_file('src.cpp', self.with_report_result(open(path_from_root('tests', 'browser_test_hello_world.c')).read()))
for wasm_enabled in [True, False]:
args = ['src.cpp', '-o', 'test.js', '-s', 'SINGLE_FILE=1']
if not wasm_enabled:
args += ['-s', 'WASM=0']
self.compile_btest(args)
create_test_file('test.html', '''
<script>
var Module = {
locateFile: function (path) {
if (path.indexOf('data:') === 0) {
throw new Error('Unexpected data URI.');
}
return path;
}
};
</script>
<script src="test.js"></script>
''')
self.run_browser('test.html', None, '/report_result?0')
# Tests that SINGLE_FILE works as intended in a Worker in JS output
def test_single_file_worker_js(self):
create_test_file('src.cpp', self.with_report_result(open(path_from_root('tests', 'browser_test_hello_world.c')).read()))
self.compile_btest(['src.cpp', '-o', 'test.js', '--proxy-to-worker', '-s', 'SINGLE_FILE=1'])
create_test_file('test.html', '<script src="test.js"></script>')
self.run_browser('test.html', None, '/report_result?0')
self.assertExists('test.js')
self.assertNotExists('test.worker.js')
# Tests that pthreads code works as intended in a Worker. That is, a pthreads-using
# program can run either on the main thread (normal tests) or when we start it in
# a Worker in this test (in that case, both the main application thread and the worker threads
# are all inside Web Workers).
@requires_threads
def test_pthreads_started_in_worker(self):
create_test_file('src.cpp', self.with_report_result(open(path_from_root('tests', 'pthread', 'test_pthread_atomics.cpp')).read()))
self.compile_btest(['src.cpp', '-o', 'test.js', '-s', 'INITIAL_MEMORY=64MB', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
create_test_file('test.html', '''
<script>
new Worker('test.js');
</script>
''')
self.run_browser('test.html', None, '/report_result?0')
def test_access_file_after_heap_resize(self):
create_test_file('test.txt', 'hello from file')
create_test_file('page.c', self.with_report_result(open(path_from_root('tests', 'access_file_after_heap_resize.c'), 'r').read()))
self.compile_btest(['page.c', '-s', 'ALLOW_MEMORY_GROWTH=1', '--preload-file', 'test.txt', '-o', 'page.html'])
self.run_browser('page.html', 'hello from file', '/report_result?15')
# with separate file packager invocation
self.run_process([PYTHON, FILE_PACKAGER, 'data.js', '--preload', 'test.txt', '--js-output=' + 'data.js'])
self.compile_btest(['page.c', '-s', 'ALLOW_MEMORY_GROWTH=1', '--pre-js', 'data.js', '-o', 'page.html', '-s', 'FORCE_FILESYSTEM=1'])
self.run_browser('page.html', 'hello from file', '/report_result?15')
def test_unicode_html_shell(self):
create_test_file('main.cpp', self.with_report_result(r'''
int main() {
REPORT_RESULT(0);
return 0;
}
'''))
create_test_file('shell.html', open(path_from_root('src', 'shell.html')).read().replace('Emscripten-Generated Code', 'Emscripten-Generated Emoji 😅'))
self.compile_btest(['main.cpp', '--shell-file', 'shell.html', '-o', 'test.html'])
self.run_browser('test.html', None, '/report_result?0')
# Tests the functionality of the emscripten_thread_sleep() function.
@requires_threads
def test_emscripten_thread_sleep(self):
self.btest(path_from_root('tests', 'pthread', 'emscripten_thread_sleep.c'), expected='1', args=['-s', 'USE_PTHREADS=1', '-s', 'EXTRA_EXPORTED_RUNTIME_METHODS=["print"]'])
# Tests that Emscripten-compiled applications can be run from a relative path in browser that is different than the address of the current page
def test_browser_run_from_different_directory(self):
src = open(path_from_root('tests', 'browser_test_hello_world.c')).read()
create_test_file('test.c', self.with_report_result(src))
self.compile_btest(['test.c', '-o', 'test.html', '-O3'])
ensure_dir('subdir')
shutil.move('test.js', os.path.join('subdir', 'test.js'))
shutil.move('test.wasm', os.path.join('subdir', 'test.wasm'))
src = open('test.html').read()
# Make sure JS is loaded from subdirectory
create_test_file('test-subdir.html', src.replace('test.js', 'subdir/test.js'))
self.run_browser('test-subdir.html', None, '/report_result?0')
# Similar to `test_browser_run_from_different_directory`, but asynchronous because of `-s MODULARIZE=1`
def test_browser_run_from_different_directory_async(self):
src = open(path_from_root('tests', 'browser_test_hello_world.c')).read()
create_test_file('test.c', self.with_report_result(src))
for args, creations in [
(['-s', 'MODULARIZE=1'], [
'Module();', # documented way for using modularize
'new Module();' # not documented as working, but we support it
]),
]:
print(args)
# compile the code with the modularize feature and the preload-file option enabled
self.compile_btest(['test.c', '-o', 'test.js', '-O3'] + args)
ensure_dir('subdir')
shutil.move('test.js', os.path.join('subdir', 'test.js'))
shutil.move('test.wasm', os.path.join('subdir', 'test.wasm'))
for creation in creations:
print(creation)
# Make sure JS is loaded from subdirectory
create_test_file('test-subdir.html', '''
<script src="subdir/test.js"></script>
<script>
%s
</script>
''' % creation)
self.run_browser('test-subdir.html', None, '/report_result?0')
# Similar to `test_browser_run_from_different_directory`, but
# also also we eval the initial code, so currentScript is not present. That prevents us
# from finding the file in a subdir, but here we at least check we do not regress compared to the
# normal case of finding in the current dir.
def test_browser_modularize_no_current_script(self):
src = open(path_from_root('tests', 'browser_test_hello_world.c')).read()
create_test_file('test.c', self.with_report_result(src))
# test both modularize (and creating an instance) and modularize-instance
# (which creates by itself)
for path, args, creation in [
([], ['-s', 'MODULARIZE=1'], 'Module();'),
(['subdir'], ['-s', 'MODULARIZE=1'], 'Module();'),
]:
print(path, args, creation)
filesystem_path = os.path.join('.', *path)
ensure_dir(filesystem_path)
# compile the code with the modularize feature and the preload-file option enabled
self.compile_btest(['test.c', '-o', 'test.js'] + args)
shutil.move('test.js', os.path.join(filesystem_path, 'test.js'))
shutil.move('test.wasm', os.path.join(filesystem_path, 'test.wasm'))
open(os.path.join(filesystem_path, 'test.html'), 'w').write('''
<script>
setTimeout(function() {
var xhr = new XMLHttpRequest();
xhr.open('GET', 'test.js', false);
xhr.send(null);
eval(xhr.responseText);
%s
}, 1);
</script>
''' % creation)
self.run_browser('/'.join(path + ['test.html']), None, '/report_result?0')
def test_emscripten_request_animation_frame(self):
self.btest(path_from_root('tests', 'emscripten_request_animation_frame.c'), '0')
def test_emscripten_request_animation_frame_loop(self):
self.btest(path_from_root('tests', 'emscripten_request_animation_frame_loop.c'), '0')
def test_request_animation_frame(self):
self.btest('request_animation_frame.cpp', '0', also_proxied=True)
@requires_threads
def test_emscripten_set_timeout(self):
self.btest(path_from_root('tests', 'emscripten_set_timeout.c'), '0', args=['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1'])
@requires_threads
def test_emscripten_set_timeout_loop(self):
self.btest(path_from_root('tests', 'emscripten_set_timeout_loop.c'), '0', args=['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1'])
def test_emscripten_set_immediate(self):
self.btest(path_from_root('tests', 'emscripten_set_immediate.c'), '0')
def test_emscripten_set_immediate_loop(self):
self.btest(path_from_root('tests', 'emscripten_set_immediate_loop.c'), '0')
@requires_threads
def test_emscripten_set_interval(self):
self.btest(path_from_root('tests', 'emscripten_set_interval.c'), '0', args=['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1'])
# Test emscripten_performance_now() and emscripten_date_now()
@requires_threads
def test_emscripten_performance_now(self):
self.btest(path_from_root('tests', 'emscripten_performance_now.c'), '0', args=['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1'])
@requires_threads
def test_embind_with_pthreads(self):
self.btest('embind_with_pthreads.cpp', '1', args=['--bind', '-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1'])
def test_embind_with_asyncify(self):
self.btest('embind_with_asyncify.cpp', '1', args=['--bind'] + self.get_async_args())
# Test emscripten_console_log(), emscripten_console_warn() and emscripten_console_error()
def test_emscripten_console_log(self):
self.btest(path_from_root('tests', 'emscripten_console_log.c'), '0', args=['--pre-js', path_from_root('tests', 'emscripten_console_log_pre.js')])
def test_emscripten_throw_number(self):
self.btest(path_from_root('tests', 'emscripten_throw_number.c'), '0', args=['--pre-js', path_from_root('tests', 'emscripten_throw_number_pre.js')])
def test_emscripten_throw_string(self):
self.btest(path_from_root('tests', 'emscripten_throw_string.c'), '0', args=['--pre-js', path_from_root('tests', 'emscripten_throw_string_pre.js')])
# Tests that Closure run in combination with -s ENVIRONMENT=web mode works with a minimal console.log() application
def test_closure_in_web_only_target_environment_console_log(self):
self.btest('minimal_hello.c', '0', args=['-s', 'ENVIRONMENT=web', '-O3', '--closure', '1'])
# Tests that Closure run in combination with -s ENVIRONMENT=web mode works with a small WebGL application
@requires_graphics_hardware
def test_closure_in_web_only_target_environment_webgl(self):
self.btest('webgl_draw_triangle.c', '0', args=['-lGL', '-s', 'ENVIRONMENT=web', '-O3', '--closure', '1'])
def test_no_declare_asm_module_exports_asmjs(self):
for minimal_runtime in [[], ['-s', 'MINIMAL_RUNTIME=1']]:
self.btest(path_from_root('tests', 'declare_asm_module_exports.cpp'), '1', args=['-s', 'DECLARE_ASM_MODULE_EXPORTS=0', '-s', 'ENVIRONMENT=web', '-O3', '--closure', '1', '-s', 'WASM=0'] + minimal_runtime)
@no_wasm_backend('MINIMAL_RUNTIME not yet available in Wasm backend')
def test_no_declare_asm_module_exports_wasm_minimal_runtime(self):
self.btest(path_from_root('tests', 'declare_asm_module_exports.cpp'), '1', args=['-s', 'DECLARE_ASM_MODULE_EXPORTS=0', '-s', 'ENVIRONMENT=web', '-O3', '--closure', '1', '-s', 'MINIMAL_RUNTIME=1'])
# Tests that the different code paths in src/shell_minimal_runtime.html all work ok.
def test_minimal_runtime_loader_shell(self):
args = ['-s', 'MINIMAL_RUNTIME=2']
for wasm in [[], ['-s', 'WASM=0', '--memory-init-file', '0'], ['-s', 'WASM=0', '--memory-init-file', '1'], ['-s', 'SINGLE_FILE=1'], ['-s', 'WASM=0', '-s', 'SINGLE_FILE=1']]:
for modularize in [[], ['-s', 'MODULARIZE=1']]:
print(str(args + wasm + modularize))
self.btest('minimal_hello.c', '0', args=args + wasm + modularize)
# Tests that -s MINIMAL_RUNTIME=1 works well in different build modes
def test_minimal_runtime_hello_world(self):
for args in [[], ['-s', 'MINIMAL_RUNTIME_STREAMING_WASM_COMPILATION=1', '--closure', '1'], ['-s', 'MINIMAL_RUNTIME_STREAMING_WASM_INSTANTIATION=1', '--closure', '1']]:
self.btest(path_from_root('tests', 'small_hello_world.c'), '0', args=args + ['-s', 'MINIMAL_RUNTIME=1'])
@requires_threads
def test_offset_converter(self, *args):
try:
self.btest(path_from_root('tests', 'browser', 'test_offset_converter.c'), '1', args=['-s', 'USE_OFFSET_CONVERTER', '-g4', '-s', 'PROXY_TO_PTHREAD', '-s', 'USE_PTHREADS'])
except Exception as e:
# dump the wasm file; this is meant to help debug #10539 on the bots
print(self.run_process([os.path.join(building.get_binaryen_bin(), 'wasm-opt'), 'test.wasm', '-g', '--print', '-all'], stdout=PIPE).stdout)
raise e
# Tests emscripten_unwind_to_js_event_loop() behavior
def test_emscripten_unwind_to_js_event_loop(self, *args):
self.btest(path_from_root('tests', 'browser', 'test_emscripten_unwind_to_js_event_loop.c'), '1', args=['-s', 'NO_EXIT_RUNTIME=1'])
def test_wasm2js_fallback(self):
for args in [[], ['-s', 'MINIMAL_RUNTIME=1']]:
src = 'src.cpp'
create_test_file(src, self.with_report_result(open(path_from_root('tests', 'small_hello_world.c')).read()))
self.compile_btest([src, '-s', 'WASM=2', '-o', 'test.html'] + args)
# First run with WebAssembly support enabled
# Move the Wasm2js fallback away to test it is not accidentally getting loaded.
os.rename('test.wasm.js', 'test.wasm.js.unused')
self.run_browser('test.html', 'hello!', '/report_result?0')
os.rename('test.wasm.js.unused', 'test.wasm.js')
# Then disable WebAssembly support in VM, and try again.. Should still work with Wasm2JS fallback.
html = open('test.html', 'r').read()
html = html.replace('<body>', '<body><script>delete WebAssembly;</script>')
open('test.html', 'w').write(html)
os.remove('test.wasm') # Also delete the Wasm file to test that it is not attempted to be loaded.
self.run_browser('test.html', 'hello!', '/report_result?0')
def test_wasm2js_fallback_on_wasm_compilation_failure(self):
for args in [[], ['-s', 'MINIMAL_RUNTIME=1']]:
src = 'src.cpp'
create_test_file(src, self.with_report_result(open(path_from_root('tests', 'small_hello_world.c')).read()))
self.compile_btest([src, '-s', 'WASM=2', '-o', 'test.html'] + args)
# Run without the .wasm.js file present: with Wasm support, the page should still run
os.rename('test.wasm.js', 'test.wasm.js.unused')
self.run_browser('test.html', 'hello!', '/report_result?0')
# Restore the .wasm.js file, then corrupt the .wasm file, that should trigger the Wasm2js fallback to run
os.rename('test.wasm.js.unused', 'test.wasm.js')
shutil.copyfile('test.js', 'test.wasm')
self.run_browser('test.html', 'hello!', '/report_result?0')
def test_system(self):
self.btest(path_from_root('tests', 'system.c'), '0')
# Tests that it is possible to hook into/override a symbol defined in a system library.
@requires_graphics_hardware
def test_override_system_js_lib_symbol(self):
# This test verifies it is possible to override a symbol from WebGL library.
# When WebGL is implicitly linked in, the implicit linking should happen before any user --js-libraries, so that they can adjust
# the behavior afterwards.
self.btest(path_from_root('tests', 'test_override_system_js_lib_symbol.c'),
expected='5121',
args=['--js-library', path_from_root('tests', 'test_override_system_js_lib_symbol.js')])
# When WebGL is explicitly linked to in strict mode, the linking order on command line should enable overriding.
self.btest(path_from_root('tests', 'test_override_system_js_lib_symbol.c'),
expected='5121',
args=['-s', 'AUTO_JS_LIBRARIES=0', '-lwebgl.js', '--js-library', path_from_root('tests', 'test_override_system_js_lib_symbol.js')])
@no_firefox('no 4GB support yet')
def test_zzz_zzz_4GB(self):
# TODO Convert to an actual browser test when it reaches stable.
# For now, keep this in browser as this suite runs serially, which
# means we don't compete for memory with anything else (and run it
# at the very very end, to reduce the risk of it OOM-killing the
# browser).
# test that we can allocate in the 2-4GB range, if we enable growth and
# set the max appropriately
self.emcc_args += ['-O2', '-s', 'ALLOW_MEMORY_GROWTH', '-s', 'MAXIMUM_MEMORY=4GB']
self.do_run_in_out_file_test('tests', 'browser', 'test_4GB.cpp', js_engines=[V8_ENGINE])
@no_firefox('no 4GB support yet')
def test_zzz_zzz_2GB_fail(self):
# TODO Convert to an actual browser test when it reaches stable.
# For now, keep this in browser as this suite runs serially, which
# means we don't compete for memory with anything else (and run it
# at the very very end, to reduce the risk of it OOM-killing the
# browser).
# test that growth doesn't go beyond 2GB without the max being set for that,
# and that we can catch an allocation failure exception for that
self.emcc_args += ['-O2', '-s', 'ALLOW_MEMORY_GROWTH', '-s', 'MAXIMUM_MEMORY=2GB']
self.do_run_in_out_file_test('tests', 'browser', 'test_2GB_fail.cpp', js_engines=[V8_ENGINE])
@no_firefox('no 4GB support yet')
def test_zzz_zzz_4GB_fail(self):
# TODO Convert to an actual browser test when it reaches stable.
# For now, keep this in browser as this suite runs serially, which
# means we don't compete for memory with anything else (and run it
# at the very very end, to reduce the risk of it OOM-killing the
# browser).
# test that we properly report an allocation error that would overflow over
# 4GB.
self.emcc_args += ['-O2', '-s', 'ALLOW_MEMORY_GROWTH', '-s', 'MAXIMUM_MEMORY=4GB', '-s', 'ABORTING_MALLOC=0']
self.do_run_in_out_file_test('tests', 'browser', 'test_4GB_fail.cpp', js_engines=[V8_ENGINE])
@unittest.skip("only run this manually, to test for race conditions")
@parameterized({
'normal': ([],),
'assertions': (['-s', 'ASSERTIONS'],)
})
@requires_threads
def test_manual_pthread_proxy_hammer(self, args):
# the specific symptom of the hang that was fixed is that the test hangs
# at some point, using 0% CPU. often that occured in 0-200 iterations, but
# you may want to adjust "ITERATIONS".
self.btest(path_from_root('tests', 'pthread', 'test_pthread_proxy_hammer.cpp'),
expected='0',
args=['-s', 'USE_PTHREADS=1', '-O2', '-s', 'PROXY_TO_PTHREAD',
'-DITERATIONS=1024', '-g1'] + args,
timeout=10000,
# don't run this with the default extra_tries value, as this is
# *meant* to notice something random, a race condition.
extra_tries=0)
|
system.py
|
import threading
from mysql import connector
from config import *
import platform
import modules.core.database as database
import modules.core.extract as extract
import time
import speedtest #pip install speedtest-cli
import psutil #pip install psutil
import urllib.request
import subprocess
import socket
import os
import sys
class system_cls():
def __init__(self,update,context) -> None:
self.update = update
self.context = context
self.msg = None
self.user = None
self.tag_msg = None
self.tag_user = None
self.msg = update.message
self.user = user = self.msg['from_user']
self.chat = chat = self.msg['chat']
self.db = database.bot_db()
try:
self.tag_msg = tag_msg = update.message.reply_to_message
self.tag_user = tag_user = tag_msg['from_user']
self.db.add_user(user=tag_user)
except:
pass
self.db.parse(chat=chat, user=user)
self.chat_id = self.chat["id"]
self.msg_string = self.msg.text
def change_config():
pass
def sql_cmd_line(self,sql):
m = extract.sudo_check_2(msg=self.msg,del_lvl=8,context=self.context,sudo=1)
if m== 7:
pass
else: return
try:
x = self.db.cursor.execute(sql)
except Exception as x:
self.msg.reply_text("Error during Execution : \n\n" + str(x), parse_mode="HTML")
return
try:
list = self.db.cursor.fetchall()
text = ",\n ".join(map(str, list))
self.msg.reply_text(text, parse_mode="HTML")
except Exception as x:
try:
z = self.db.db.commit()
self.msg.reply_text("Committed !", parse_mode="HTML")
except Exception as y:
self.msg.reply_text( "Fetch Error : \n" + str(x) + "\n\nCommit Error: \n" + str(y), parse_mode="HTML")
def system_stat(self):
m = extract.sudo_check_2(msg=self.msg,del_lvl=8,context=self.context,sudo=1)
if m== 7:
pass
else: return
try:
s1 = "<code>Python v" + platform.python_version() + "</code>\n"
s2 = "<code>" + platform.system() + " v" + platform.version() + "</code>\n"
except:
s1 = s2 = ""
try:
s3 = "Ram Usage : <code>" + str(psutil.virtual_memory().percent) + "%</code>\n"
s4 = "Cpu Usage : <code>" + str(psutil.cpu_percent()) + "%</code>\n"
except:
s3 = s4 = ""
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
ip = str(s.getsockname()[0])
s.close()
s5 = "Local Ip : <code>" + ip + "</code>\n"
except:
s5 = ""
try:
external_ip = urllib.request.urlopen('https://ident.me').read().decode('utf8')
s6 = "External Ip : <code>" + external_ip + "</code>\n"
except:
s6 = ""
try:
current_network = subprocess.check_output(['netsh', 'wlan', 'show', 'interfaces']).decode('utf-8').split('\n')
ssid_line = [x for x in current_network if 'SSID' in x and 'BSSID' not in x]
if ssid_line:
ssid_list = ssid_line[0].split(':')
connected_ssid = ssid_list[1].strip()
s7 = "Connected : <code>" + connected_ssid + "</code>\n"
except:
s7 = ""
text = s7 + s3 + s4 + s5 + s6 + s1 + s2
msg = self.msg.reply_text(text=text ,
parse_mode="HTML")
def bot_restart(self,quit=0):
m = extract.sudo_check_2(msg=self.msg,del_lvl=8,context=self.context,sudo=1)
if m== 7:
pass
else: return
if quit==1:
ms = self.msg.reply_text(text="Terminating !" ,
parse_mode="HTML")
#self.updater.stop()
exit(1)
sys.exit(1)
return
try:
ms = self.msg.reply_text(text="Stoping Current Instance..." ,
parse_mode="HTML")
#p = psutil.Process(os.getpid())
#for handler in p.open_files() + p.connections():
# os.close(handler.fd)
except Exception as e:
self.msg.reply_text(text=str(e) ,
parse_mode="HTML")
ms.edit_text(text="Restarting in 5 Seconds.." ,
parse_mode="HTML")
time.sleep(5)
ms.edit_text(text="Restarting" ,
parse_mode="HTML")
python = sys.executable
os.execl(python, python, *sys.argv)
def pc_restart(self,shut=0):
m = extract.sudo_check_2(msg=self.msg,del_lvl=8,context=self.context,sudo=1)
if m== 7:
pass
else: return
if shut == 1:
self.msg.reply_text(text="Shutting Down Server..." ,
parse_mode="HTML")
os.system("shutdown /s /t 1")
else:
self.msg.reply_text(text="Restarting Server..." ,
parse_mode="HTML")
os.system("shutdown /r /t 1")
def system_cmd_line(self,line=""):
output = subprocess.Popen([line],stdout=subprocess.PIPE)
r = output.communicate()
self.msg.reply_text(text=str(r) ,
parse_mode="HTML")
def activity_log_file(self):
m = extract.sudo_check_2(msg=self.msg,del_lvl=8,context=self.context,sudo=1)
if m== 7 or m==8:
pass
else: return
path = path = str(os.path.dirname(os.path.dirname(sys.argv[0])))
wp1= path + '/logs/log_bot_runtime.log'
try:
self.context.bot.send_document(chat_id=self.chat_id, document=open(wp1, 'rb'), filename="bot_runtime.log")
except Exception as x:
self.msg.reply_text(text="<code>Bot : " + str(x) + "</code>",
parse_mode="HTML")
wp2= path + '/logs/log_sql_runtime.log'
try:
self.context.bot.send_document(chat_id=self.chat_id, document=open(wp2, 'rb'), filename="sql_server.log")
except Exception as y:
self.msg.reply_text(text="<code>Sql : " + str(y) + "</code>",
parse_mode="HTML")
"""
file = context.bot.getFile(update.message.audio.file_id)
file.download('./voice.ogg')
def downloader(update, context):
context.bot.get_file(update.message.document).download()
# writing to a custom file
with open("custom/file.doc", 'wb') as f:
context.bot.get_file(update.message.document).download(out=f)
"""
def net(self):
m = extract.sudo_check_2(msg=self.msg,del_lvl=8,context=self.context,sudo=1)
if m== 7 or m==8:
pass
else: return
msg = self.msg.reply_text(text="<code>" + "Connecting..." + "</code>",
parse_mode="HTML")
st = speedtest.Speedtest(secure=True)
st.get_best_server()
msg.edit_text(text="<code>" + "Checking download speed..." + "</code>",
parse_mode="HTML")
st.download()
megabyte = 1./1000000
d = st.results.download
d = megabyte * d
d = round(d, 2)
ds = "Download Speed : " + str(d) + " mb/s"
msg.edit_text(text=("<code>" + ds + "\n\nChecking upload speed..." + "</code>"),
parse_mode="HTML")
st.upload()
u = st.results.upload
u = megabyte * u
u = round(u, 2)
us = "\nUpload Speed : " + str(u) + " mb/s"
servernames = []
msg.edit_text(text=("<code>" + ds + us + "\n\nMeasuring ping..." + "</code>"),
parse_mode="HTML")
st.get_servers(servernames)
p = str(st.results.ping)
ps = "\nPing : " + p + "ms"
msg.edit_text(text=("<code>" + "Test Time : " + time.strftime("%Y-%m-%d (%H:%M:%S)") + "\n\n" + ds + us + ps + "</code>"),
parse_mode="HTML")
#print("--net speed *(D:"+str(d)+"mb/s, U:" + +str(u) + "mb/s, P:"+str(p)+"ms)")
def publish(self,pub=""):
cha = self.db.get_chat()
for x,y in enumerate(cha):
try:
self.context.bot.send_message(y[0], pub)
except:
pass
if x == 0:
break
def rel(self,pub=""):
cha = self.db.get_chat()
for x,y in enumerate(cha):
try:
self.context.bot.send_message(y[0], pub)
except:
pass
if x == 0:
break
def router(self):
res = self.msg_string.split(None,1)
if res[0] == "/net":
self.net()
elif res[0] == "/publish":
self.publish(res[1])
elif res[0] == "/sql":
self.sql_cmd_line(res[1])
elif res[0] == "/system":
try:
if res[1] == "stat":
self.system_stat()
elif res[1] == "log":
self.activity_log_file()
elif res[1] == "restart":
self.bot_restart()
elif res[1] == "quit":
self.bot_restart(quit=1)
else:
self.system_stat()
except Exception as x:
print(str(x))
self.system_stat()
elif res[0] == "/cmd":
self.system_cmd_line(res[1])
elif res[0] == "/server":
if res[1] == "restart":
self.pc_restart(shut=0)
elif res[1] == "shutdown":
self.pc_restart(shut=1)
def system_threading(update, context):
threading.Thread(target=system_cls(update,context).router, args=(), daemon=True).start()
|
cpu_gpu_profiler.py
|
import os
import signal
import time
import subprocess
import pandas as pd
from collections import defaultdict
from threading import Event, Thread
import psutil
from .errors import CommandExecutionError
# redirect the GPU memory usage to a file
#GPU_MONITOR = "nvidia-smi --query-gpu=index,memory.used --format=csv -lms 500 -f output.csv"
GPU_MONITOR = "nvidia-smi --query-gpu=index,memory.used --format=csv -l 120 -f output.csv"
def gpu_memory_usage_extract(file_name, ret_dict, num_gpus):
"""Extract GPU usage from the nvidia-smi output file"""
try:
gpu_memory_usage = pd.read_csv(file_name)
except pd.errors.EmptyDataError:
print("Error! The GPU profiling output is empty!")
raise
# filter out break line
gpu_memory_usage = gpu_memory_usage[~pd.isnull(gpu_memory_usage.iloc[:, 1])]
gpu_memory_usage = gpu_memory_usage[gpu_memory_usage.iloc[:, 1].str.contains('MiB')]
# set a default dict to collect gpu mean, var and max usage, default value is an empty list
record = defaultdict(list)
# compute the average memory usage on each gpu
for i in range(num_gpus):
gpu_usage_i = gpu_memory_usage[gpu_memory_usage.iloc[:,0] == i].iloc[:,1]
gpu_usage_i = gpu_usage_i.str.extract('(\d+)', expand=False).astype(int)
mean_use_gpu_i = gpu_usage_i.mean()
# when data only contains 1 data point and is 0, the Series.std() return NaN
std_use_gpu_i = gpu_usage_i.std() if pd.notna(gpu_usage_i.std()) else 0.0
max_use_gpu_i = gpu_usage_i.max()
record['mean_usage'].append(mean_use_gpu_i)
record['std_usage'].append(std_use_gpu_i)
record['max_usage'].append(max_use_gpu_i)
if len(record) == 0:
return
ret_dict['gpu_memory_usage_mean'] = float(sum(record['mean_usage'])) / len(record['mean_usage'])
ret_dict['gpu_memory_usage_std'] = float(sum(record['std_usage'])) / len(record['std_usage'])
ret_dict['gpu_memory_usage_max'] = float(sum(record['max_usage'])) / len(record['max_usage'])
os.remove(file_name)
def get_cpu_mem_usage_from_process(pid, cpu_usage):
"""Get CPU memory usage given process id, result append to a mutable list."""
if not psutil.pid_exists(pid):
return
proc = psutil.Process(pid)
if proc.is_running():
# The rss, Resident Set Size, is the memory allocated to the process, its unit is KB.
cpu_usage.append(proc.memory_info().rss / 1024)
class RepeatedQuery:
"""Use another a thread to repeatly execute a given function at a given time interval"""
def __init__(self, interval, function, *args, **kwargs):
self.interval = interval
self.function = function
self.args = args
self.kwargs = kwargs
self.start = time.time()
self.event = Event()
self.thread = Thread(target=self._target)
self.thread.start()
def _target(self):
while not self.event.wait(self._time):
self.function(*self.args, **self.kwargs)
@property
def _time(self):
return self.interval - ((time.time() - self.start) % self.interval)
def stop(self):
self.event.set()
self.thread.join()
class Profiler(object):
"""The CPU GPU memory profiler"""
def __init__(self, ret_dict, num_gpus, process_id):
self.__ret_dict = ret_dict
self.num_gpus = num_gpus
self.cpu_usage = []
self.cpu_mem_repeat_query = RepeatedQuery(
interval=5,
function=get_cpu_mem_usage_from_process,
pid=process_id,
cpu_usage=self.cpu_usage
)
def __enter__(self):
if self.num_gpus < 1:
return self
open("output.csv", 'a').close()
self.__gpu_monitor_process = subprocess.Popen(
GPU_MONITOR,
shell=True,
preexec_fn=os.setsid
)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.cpu_mem_repeat_query.stop()
if len(self.cpu_usage) == 0:
raise CommandExecutionError
cpu_usage = sum(self.cpu_usage) / len(self.cpu_usage)
self.__ret_dict['cpu_memory_usage'] = cpu_usage
if self.num_gpus < 1:
return
os.killpg(os.getpgid(self.__gpu_monitor_process.pid), signal.SIGTERM)
# to solve race condition
time.sleep(1)
gpu_memory_usage_extract(
file_name="output.csv",
ret_dict=self.__ret_dict,
num_gpus=self.num_gpus
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.