source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
---|---|
motion_udp.py
|
import bpy
import socket
import numpy as np
import time
#import matplotlib.pyplot as plt
import sys
dir = 'C:\\Users\\tom\\Desktop\\blSim'
sys.path.append(dir)
import struct
import threading
from gen_curve_2 import *
print('Initializing UDP socket...')
UDP_IP = "127.0.0.1"
UDP_PORT = 25000
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.bind((UDP_IP,UDP_PORT))
print ('Socket initialized')
print('Initializing Blender...')
try:
draw()
except:
print('error init. road')
Tf = bpy.data.objects.get('Cube_1')
Tr = bpy.data.objects.get('Cube_2')
c = (Tf,Tr)
print('Blender initialized')
def read_udp(obj):
while True:
data,addr = sock.recvfrom(1024)
d_unpk = struct.unpack('d'*14,data)
print("msg:",d_unpk , " addr:", addr)
x_1 = d_unpk[0]
y_1 = d_unpk[2]
yaw_1 = d_unpk[4]
x_2 = d_unpk[1]
y_2 = d_unpk[3]
yaw_2 = d_unpk[5]
z_1 = d_unpk[12]
z_2 = d_unpk[13]
obj[0].location.x = x_1
obj[0].location.y = y_1
obj[0].location.z = z_1
obj[0].rotation_euler[2] = yaw_1*pi/180
obj[1].location.x = x_2
obj[1].location.y = y_2
obj[1].location.z = z_2
obj[1].rotation_euler[2] = yaw_2*pi/180
t = threading.Thread(target = read_udp, args= (c,))
try:
t.start()
except:
print('threading error')
|
polybeast_env.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import multiprocessing as mp
import threading
import time
import numpy as np
import libtorchbeast
from torchbeast import atari_wrappers
# yapf: disable
parser = argparse.ArgumentParser(description='Remote Environment Server')
parser.add_argument("--pipes_basename", default="unix:/tmp/polybeast",
help="Basename for the pipes for inter-process communication. "
"Has to be of the type unix:/some/path.")
parser.add_argument('--num_servers', default=4, type=int, metavar='N',
help='Number of environment servers.')
parser.add_argument('--env', type=str, default='PongNoFrameskip-v4',
help='Gym environment.')
# yapf: enable
class Env:
def reset(self):
print("reset called")
return np.ones((4, 84, 84), dtype=np.uint8)
def step(self, action):
frame = np.zeros((4, 84, 84), dtype=np.uint8)
return frame, 0.0, False, {} # First three mandatory.
def create_env(env_name, lock=threading.Lock()):
with lock: # Atari isn't threadsafe at construction time.
return atari_wrappers.wrap_pytorch(
atari_wrappers.wrap_deepmind(
atari_wrappers.make_atari(env_name),
clip_rewards=False,
frame_stack=True,
scale=False,
)
)
def serve(env_name, server_address):
init = Env if env_name == "Mock" else lambda: create_env(env_name)
server = libtorchbeast.Server(init, server_address=server_address)
server.run()
def main(flags):
if not flags.pipes_basename.startswith("unix:"):
raise Exception("--pipes_basename has to be of the form unix:/some/path.")
processes = []
for i in range(flags.num_servers):
p = mp.Process(
target=serve, args=(flags.env, f"{flags.pipes_basename}.{i}"), daemon=True
)
p.start()
processes.append(p)
try:
# We are only here to listen to the interrupt.
while True:
time.sleep(10)
except KeyboardInterrupt:
pass
if __name__ == "__main__":
flags = parser.parse_args()
main(flags)
|
precompute_guides_msgpack_CFD.py
|
import msgpack
import json
import pickle
import os.path
from Queue import PriorityQueue
import re
import doench_score
import azimuth.model_comparison
import numpy as np
import pandas as pd
import csv
from intervaltree import IntervalTree
from multiprocessing import Process
import os
import time
start_time = time.time()
#Reverse complements a given string
def revcom(s):
basecomp = {'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A','U':'A', 'N':'N'}
letters = list(s[::-1])
letters = [basecomp[base] for base in letters]
return ''.join(letters)
class GuideRNA():
"""Holder of gRNA information"""
def __init__(self, selected, start, seq, PAM, seq_before, seq_after, chrom, cut_pos, score, exon_ranking, ensembl_gene, gene_name, functional_domain, has_exome_repeat, off_target_score):
self.start = start
self.seq = seq
self.PAM = PAM
self.seq_before = seq_before # 10bp before the sgRNA
self.seq_after = seq_after # 10bp after the sgRNA
self.chrom = chrom
self.cut_pos = cut_pos
self.score = score
self.exon_ranking = exon_ranking
self.ensembl_gene = ensembl_gene
self.gene_name = gene_name
self.selected = selected
self.functional_domain = functional_domain
if functional_domain:
self.has_functional_domain = True
else:
self.has_functional_domain = False
self.has_exome_repeat = has_exome_repeat
self.off_target_score = off_target_score
if off_target_score == 'inf':
self.off_target_score = 10000
def serialize_for_display(self):
"""Serialize for the way we are returning json"""
serialization = {
"score": self.score,
"start": self.start,
"seq": self.seq,
"PAM": self.PAM,
"seq_before": self.seq_before,
"seq_after": self.seq_after,
"chrom": self.chrom,
"cut_pos": self.cut_pos,
"selected": self.selected,
"has_exome_repeat": self.has_exome_repeat,
"off_target_score": self.off_target_score,
"has_functional_domain": self.has_functional_domain
}
if self.functional_domain != None:
serialization["functional_domain"] = self.functional_domain
return serialization
def cmp_scheme(self, g):
return (-g.off_target_score, g.has_functional_domain, g.score)
def __cmp__(self, other):
return cmp(self.cmp_scheme(self), self.cmp_scheme(other))
params = {
"PAM": "NGG",
"protospacer_len": 20,
"prime5": True,
"scoring": "Azimuth",
"quantity": 100,
"functional_domains": False,
"mer_len": 20
}
# azimuth model
print "loading azimuth models", time.time() - start_time
azimuth_saved_model_dir = os.path.join(os.path.dirname(azimuth.__file__), 'saved_models')
model_name = 'V3_model_full.pickle'
azimuth_model_file = os.path.join(azimuth_saved_model_dir, model_name)
with open(azimuth_model_file, 'rb') as f:
azimuth_model = pickle.load(f)
azimuth_scores_file = 'azimuth_scores.p'
with open(azimuth_scores_file, 'rb') as inp:
azimuth_scores = pickle.load(inp)
def get_azimuth_score(mer30):
if mer30 in azimuth_scores:
return azimuth_scores[mer30]
else:
score = azimuth.model_comparison.predict(np.array([mer30]), aa_cut=None, percent_peptide=None, model=azimuth_model, model_file=azimuth_model_file)[0]
print "generating Azimuth", mer30, score
azimuth_scores[mer30] = score
return score
# load in exome
APP_STATIC = "/home/joshm/GUIDES/CRISPR-Library-Designer/static"
exome_seq_path = os.path.join(APP_STATIC, 'data', 'GRCh37_exons')
mer_len = params['mer_len']
# process kmers
# consider all kmers which are followed by NGG
print "preparing hum kmers", time.time() - start_time
exome_mers = {}
for file in os.listdir(exome_seq_path):
file_loc = os.path.join(exome_seq_path, file)
with open(file_loc, 'r') as file_data:
fwdseq = file_data.read()
revseq = revcom(fwdseq)
for seq in [fwdseq, revseq]:
for i in range(len(seq) - mer_len - 2):
s = seq[i: i + mer_len]
if seq[i + mer_len + 1 : i + mer_len + 3] != "GG": # only PAMs
continue
if 'N' in s:
continue
if s in exome_mers:
exome_mers[s] += 1
else:
exome_mers[s] = 1
print 'len(exome_mers) = ', len(exome_mers), time.time() - start_time
# takes in guide OBJECT
# returns whether there is a duplicate in exome
def hasExomeRepeat(protospacer):
guide_seq = protospacer[-mer_len:] # get PAM-proximal mer_len bases
hits = exome_mers[guide_seq] # how many times does occur in genome followed by NGG?
return hits >= 2
# loading CFD preprocessed
#Unpickle mismatch scores and PAM scores
def get_mm_pam_scores():
try:
mm_scores = pickle.load(open('mismatch_score.pkl','rb'))
pam_scores = pickle.load(open('pam_scores.pkl','rb'))
return (mm_scores,pam_scores)
except:
raise Exception("Could not find file with mismatch scores or PAM scores")
#Calculates CFD score
def calc_cfd(wt,sg,pam):
mm_scores,pam_scores = get_mm_pam_scores()
score = 1
sg = sg.replace('T','U')
wt = wt.replace('T','U')
s_list = list(sg)
wt_list = list(wt)
for i,sl in enumerate(s_list):
if wt_list[i] == sl:
score*=1
else:
key = 'r'+wt_list[i]+':d'+revcom(sl)+','+str(i+1)
score*= mm_scores[key]
score*=pam_scores[pam]
return (score)
def get_pot_off_targets(seq):
seq_list = list(seq)
backup_seq_list = list(seq)
nts = ['A','T','C','G']
results = {}
for a in range(len(seq)):
for a_sym in nts:
seq_list[a] = a_sym
for b in range(a + 1, len(seq)):
for b_sym in nts:
seq_list[b] = b_sym
for c in range(b + 1, len(seq)):
for c_sym in nts:
seq_list[c] = c_sym
new_seq = ''.join(seq_list)
results[new_seq] = True
seq_list[c] = backup_seq_list[c]
seq_list[b] = backup_seq_list[b]
seq_list[a] = backup_seq_list[a]
if seq in results:
del results[seq]
return results.keys()
# load preprocessed info
with open("off_target_scores.p", "rb") as inp:
off_target_scores = pickle.load(inp)
print 'len(off_target_scores) = ', len(off_target_scores), time.time() - start_time
def get_off_target_score(protospacer):
if hasExomeRepeat(protospacer):
return 100000
if not protospacer in off_target_scores:
score = 0
off_targets = get_pot_off_targets(protospacer)
for off_target in off_targets:
if off_target in exome_mers:
wt = protospacer + "CGG"
sg = off_target
pam = "GG"
score += exome_mers[off_target] * calc_cfd(wt, sg, pam)
off_target_scores[protospacer] = score
return off_target_scores[protospacer]
# Create interval tree for functional domains
print "constructing interval tuples", time.time() - start_time
interval_tuples_dict = {}
ucsc_pfam_f = '../functional_domains/ucsc_pfam.txt'
with open(ucsc_pfam_f, 'r') as pfam_csv:
csvreader = csv.reader(pfam_csv, delimiter='\t')
next(csvreader) # skip header
for row in csvreader:
chrom = row[1]
start = row[2]
end = row[3]
name = row[4]
if chrom not in interval_tuples_dict:
interval_tuples_dict[chrom] = []
new_tuple = (int(start), int(end), name)
interval_tuples_dict[chrom].append(new_tuple)
print "constructing interval trees", time.time() - start_time
interval_trees_dict = {}
for k, v in interval_tuples_dict.iteritems():
interval_trees_dict[k] = IntervalTree.from_tuples(v)
modPAM = params["PAM"].upper()
modPAM = modPAM.replace('N', '[ATCG]')
params["modPAM"] = modPAM
params["PAM_len"] = len(params["PAM"])
revcompl = lambda x: ''.join([{'A':'T','C':'G','G':'C','T':'A','N':'N'}[B] for B in x][::-1])
print "constructing refGene", time.time() - start_time
refGeneFilename = '../gtex/refGene.txt'
refGene = pd.read_csv(refGeneFilename, sep="\t")
refGene.columns=['','name','chrom','strand','txStart','txEnd','cdsStart','cdsEnd','exonCount','exonStarts','exonEnds','id','name2','cdsStartStat','cdsEndStat','exonFrames']
refGene["exonStarts"] = refGene.apply(lambda x: x['exonStarts'].split(',')[:-1], axis=1)
refGene["exonEnds"] = refGene.apply(lambda x: x['exonEnds'].split(',')[:-1], axis=1)
refGene["exonFrames"] = refGene.apply(lambda x: x['exonFrames'].split(',')[:-1], axis=1)
def gene_exon_coords(gene, exon):
try:
start = list(refGene.loc[refGene['name'] == gene]['exonStarts'])[0][exon]
end = list(refGene.loc[refGene['name'] == gene]['exonEnds'])[0][exon]
chrom = list(refGene.loc[refGene['name'] == gene]['chrom'])[0]
return {
'start': int(start),
'end': int(end),
'chrom': str(chrom)
}
except IndexError:
return None
def gene_exon_file(gene, exon):
filename = gene + "_" + str(exon)
seq_path = os.path.join('../GRCh37_exons/', filename)
if os.path.isfile(seq_path):
with open(seq_path) as infile:
return infile.read()
else:
return None
with open("/home/joshm/GUIDES/CRISPR-Library-Designer/static/data/pre_processed/exon_info.p", "rb") as f:
exon_info = pickle.load(f)
def get_exon_start_chrom(gene, exon):
# get the row from the exon_info dataframe
row = exon_info[exon_info['name'] == gene].iloc[0]
# find where the exon starts
start = row['exonStarts'][exon]
# find the chromosome this falls in
chrom = str(row['chrom'])
if chrom.isdigit():
chrom = str(int(chrom)) # get rid of decimal place
return start, chrom
# this is run on multiprocessing workflow
def run(genes_list):
for gene in genes_list:
exon = 0
seq = gene_exon_file(gene["ensembl_id"], exon)
coords = gene_exon_coords(gene["ensembl_id"], exon)
while seq:
# Check if we haven't done this in a previous run of the program
outfile_name = gene["ensembl_id"] + "_" + str(exon) + ".p"
folder = '../cfdGRCh37_guides_msgpack_' + params["scoring"] + '/'
if params['functional_domains']:
folder = '../cfdGRCh37_guides_msgpack_' + params['scoring'] + '_domains/'
output_path = os.path.join(folder, outfile_name)
if os.path.isfile(output_path):
# prepare next exon
exon += 1
seq = gene_exon_file(gene["ensembl_id"], exon)
coords = gene_exon_coords(gene["ensembl_id"], exon)
continue
q = PriorityQueue()
def process_guide(m, selected, max_queue_size, seq, domain):
if 'N' in seq:
return
PAM_start = m.start()
score = 0
if params["scoring"] == "Doench":
# Doench score requires the 4 before and 6 after 20-mer (gives 30-mer)
mer30 = seq[PAM_start-params["protospacer_len"]-4:PAM_start+params["PAM_len"]+3]
if len(mer30) == 30:
score = doench_score.calc_score(mer30)
elif params["scoring"] == "Azimuth":
# Azimuth requires the 4 before and 6 after 20-mer (gives 30-mer)
mer30 = seq[PAM_start-params["protospacer_len"]-4:PAM_start+params["PAM_len"]+3]
if len(mer30) == 30:
score = get_azimuth_score(mer30)
protospacer = ""
PAM = ""
if params["prime5"]:
protospacer = seq[PAM_start-params["protospacer_len"]:PAM_start]
protospacer_before = seq[PAM_start-params["protospacer_len"]-10:PAM_start-params["protospacer_len"]]
protospacer_after = seq[PAM_start:PAM_start+10]
PAM = seq[PAM_start:PAM_start+params["PAM_len"]]
else:
protospacer = seq[PAM_start+params["PAM_len"]:PAM_start+params["PAM_len"]+params["protospacer_len"]]
protospacer_before = seq[PAM_start+params["PAM_len"]-10:PAM_start+params["PAM_len"]]
protospacer_after = seq[PAM_start+params["PAM_len"]+params["protospacer_len"]:PAM_start+params["PAM_len"]+params["protospacer_len"]+10]
PAM = seq[PAM_start:PAM_start+params["PAM_len"]]
if protospacer not in exome_mers:
print protospacer, 'NOT in exome_mers', gene["ensembl_id"], exon
print 'PAM is', seq[PAM_start:PAM_start+params["PAM_len"]]
has_exome_repeat = hasExomeRepeat(protospacer)
off_target_score = get_off_target_score(protospacer)
potential_gRNA = GuideRNA(selected, PAM_start-params["protospacer_len"], protospacer, PAM, protospacer_before, protospacer_after, score, exon, gene["ensembl_id"], gene["name"], domain, has_exome_repeat, off_target_score)
# If there's enough room, add it, no question.
if q.qsize() < max_queue_size:
q.put(potential_gRNA)
# Otherwise, take higher score
else:
lowest_gRNA = q.get()
if cmp(potential_gRNA, lowest_gRNA) == 1: # if potential_gRNA > lowest_gRNA
q.put(potential_gRNA)
else:
q.put(lowest_gRNA)
for m in re.finditer(params["modPAM"], seq):
if params["prime5"] and (m.start() < params["protospacer_len"] or m.start() + params["PAM_len"] > len(seq)):
continue
elif not params["prime5"] and (m.start() + params["PAM_len"] + params["protospacer_len"] > len(seq)):
continue
# Functional domains currently only supported for Cas9.
# This needs to be modified for other genome editing proteins.
domain = None
if params["PAM"] == "NGG": # spCas9
cut_site = coords['start'] + m.start() - 3
chrom = 'chr' + coords['chrom']
if chrom in interval_trees_dict:
domain_matches = list(interval_trees_dict[chrom][cut_site])
if len(domain_matches) > 0:
domain = domain_matches[0].data
process_guide(m, True, params["quantity"], seq, domain)
seq_rc = revcompl(seq)
for m in re.finditer(params["modPAM"], seq_rc):
if params["prime5"] and (m.start() < params["protospacer_len"] or m.start() + params["PAM_len"] > len(seq)):
continue
elif not params["prime5"] and (m.start() + params["PAM_len"] + params["protospacer_len"] > len(seq)):
continue
# Functional domains currently only supported for Cas9.
# This needs to be modified for other genome editing proteins.
domain = None
if params["PAM"] == "NGG": #spCas9
cut_site = coords['end'] - m.start() + 3
chrom = 'chr' + coords['chrom']
if chrom in interval_trees_dict:
domain_matches = list(interval_trees_dict[chrom][cut_site])
if len(domain_matches) > 0:
domain = domain_matches[0].data
process_guide(m, True, params["quantity"], seq_rc, domain)
# Pop gRNAs into our 'permanent' storage
gRNAs = []
while not q.empty():
gRNA = q.get()
gRNAs.append(gRNA.serialize_for_display())
outfile_name = gene["ensembl_id"] + "_" + str(exon) + ".p"
folder = '../cfdGRCh37_guides_msgpack_' + params['scoring'] + '/'
if params['functional_domains']:
folder = '../cfdGRCh37_guides_msgpack_' + params['scoring'] + '_domains/'
output_path = os.path.join(folder, outfile_name)
with open(output_path, 'w') as outfile:
# Reverse gRNAs list.
# Want highest on-target first.
msgpack.dump(gRNAs[::-1], outfile)
# prepare next exon
exon += 1
seq = gene_exon_file(gene["ensembl_id"], exon)
coords = gene_exon_coords(gene["ensembl_id"], exon)
NUM_CORES = 16
print "beginning gene by gene processing", time.time() - start_time
with open('genes_list.json') as genes_list_file:
full_genes_list = json.load(genes_list_file)
# gene format: {"ensembl_id": "ENSG00000261122.2", "name": "5S_rRNA", "description": ""}
processes = []
unit = len(full_genes_list) / NUM_CORES + 1
print 'unit is', unit, time.time() - start_time
for i in range(NUM_CORES):
start = unit * i
end = min(unit * (i + 1), len(full_genes_list))
genes_list = full_genes_list[start:end]
p = Process(target = run, args=(genes_list,))
processes.append(p)
for process in processes:
process.start()
for process in processes:
process.join()
with open('azimuth_scores.p', 'wb') as output:
pickle.dump(azimuth_scores, output)
end_time = time.time()
hours, rem = divmod(end_time-start_time, 3600)
minutes, seconds = divmod(rem, 60)
print "time elapsed"
print("{:0>2}:{:0>2}:{:05.2f}".format(int(hours),int(minutes),seconds))
|
socketio.py
|
import json
import threading
from ws4py.client.threadedclient import WebSocketClient
from ws4py.exc import HandshakeError
from .config import Config
from enum import Enum
class SocketIO:
def __init__(this, config, on_connected = None, on_message = None,
on_closed = None, on_error = None):
if not isinstance(config, Config):
raise ValueError("config must be an object of Config class")
this._impl = SocketIO_Private(config, on_connected, on_message, on_closed, on_error)
def on_connected(this, on_connected):
this._impl.connected_callback = on_connected
def on_message(this, on_message):
this._impl.message_callback = on_message
def on_closed(this, on_closed):
this._impl.closed_callback = on_closed
def on_error(this, on_error):
this._impl.error_callback = on_error
def connect(this):
this._impl.connect()
def sid(this):
return this._impl.sid()
class SocketIO_Private:
class EIO:
OPEN = '0'
CLOSE = '1'
PING = '2'
PONG = '3'
MESSAGE = '4'
UPGRADE = '5'
NOOP = '6'
class SIO:
CONNECT = '0'
DISCONNECT = '1'
EVENT = '2'
ACK = '3'
ERROR = '4'
BINARY_EVENT = '5'
BINARY_ACK = '6'
def __init__(this, config, on_connected, on_message, on_closed, on_error):
this.config = config
this.connecting = False
if on_connected:
this.connected_callback = on_connected
if on_message:
this.message_callback = on_message
if on_closed:
this.closed_callback = on_closed
if on_error:
this.error_callback = on_error
def ws_opened(this):
pass
def ws_closed(this, code, reason=None):
this.pinger.close()
this.closed_callback(code, reason)
def ws_message(this, m):
m = str(m)
if(m):
if(m[0] == this.EIO.MESSAGE):
if(m[1] == this.SIO.EVENT):
try:
message = json.loads(m[2:])
this.message_callback(message[0],message[1])
except json._implecoder.JSONDecodeError as e:
this.error_callback(4001,"Error decoding socket message: {0}".format(e))
elif(m[1] == this.SIO.CONNECT):
this.connected_callback()
elif(m[1] == this.SIO.ERROR):
this.error_callback(4009,m[2:])
elif(m[1] == this.SIO.DISCONNECT):
this.error_callback(4010,"SocketIO got disconnect packet.")
this.disconnect()
else:
this.error_callback(4008,"Got unknown SocketIO packet: {0}".format(m[1]))
elif(m[0] == this.EIO.PONG):
this.pinger.pong()
pass
elif(m[0] == this.EIO.OPEN):
try:
this.socketOpt = json.loads(m[1:])
this.start_ping()
except json._implecoder.JSONDecodeError as e:
this.error_callback(4000,"Error decoding socket options: {0}".format(e))
else:
this.error_callback(4007,"Got unknown EngineIO packet: {0}".format(m[0]))
def ws_unhandled_error(this, error):
this.error_callback(4002,"Websocket library reports unhandled error: {0}".format(error))
def connected_callback(this):
pass
def message_callback(this, event, message=None):
pass
def closed_callback(this, code, reason=None):
pass
def error_callback(this, code, reason=None):
pass
def connect(this):
wsProtocol = 'ws' if this.config.protocol() == 'http' else 'wss'
wsURL = "{0}://{1}:{2}/socket.io/?EIO=3&transport=websocket&access_token={3}&agent={4}".format(
wsProtocol,this.config.host(),this.config.port(),this.config.token(),this.config.agent())
this.ws = SocketIO_Private.WebSocket(wsURL,this.ws_opened,this.ws_closed,
this.ws_message,this.ws_unhandled_error)
try:
this.ws.connect()
except HandshakeError as e:
this.error_callback(4003, "Websocket handshake error: {0}".format(e))
except ConnectionRefusedError as e:
this.error_callback(4004, "Websocket connection refused error")
except Exception as e:
this.error_callback(4005, "Websocket other exception error")
def disconnect(this):
# TODO: only if connected (state machine?)
this.pinger.close()
this.ws.close()
def sid(this):
return this.socketOpt.get('sid')
def start_ping(this):
this.pinger = SocketIO_Private.Pinger(this.socketOpt.get('pingInterval'),
this.socketOpt.get('pingTimeout'), this.ping, this.pongTimeout)
this.pinger.start()
def ping(this):
this.ws.send("2")
def pongTimeout(this):
this.error_callback(4006,"Server did not respond to ping. disconnecting")
this.disconnect()
class WebSocket(WebSocketClient):
def __init__(this, url, opened = None, closed = None,
received_message = None, unhandled_error = None):
super().__init__(url)
if opened:
this.opened = opened
if closed:
this.closed = closed
if received_message:
this.received_message = received_message
if unhandled_error:
this.unhandled_error = unhandled_error
@property
def handshake_headers(this):
headers = WebSocketClient.handshake_headers.fget(this)
headers = [
(header, this.host) if header == 'Host' else (header, value)
for (header, value) in headers
]
return headers
class OnceTimer:
def __init__(this, timeout, callback):
this.callback = callback
this.timeout = timeout
this.cancelEvent = threading.Event()
this.cancelEvent.clear()
this.thread = threading.Thread(target=this.wait, daemon=True)
this.thread.start()
def __enter__(this):
return this
def __exit__(this, *err):
this.cancel()
def wait(this):
if not this.cancelEvent.wait(this.timeout):
this.callback()
def cancel(this):
this.cancelEvent.set()
this.thread.join()
this.thread = None
class RepeatTimer:
def __init__(this,interval,callback):
this.run = True
this.startEvent = threading.Event()
this.stopEvent = threading.Event()
this.interval = interval
this.callback = callback
this.thread = None
def __enter__(this):
return this
def __exit__(this, *err):
this.close()
def start(this):
this.stopEvent.clear()
this.startEvent.set()
if this.thread is None:
this.run = True
this.thread = threading.Thread(target=this.loop, daemon=True)
this.thread.start()
def stop(this):
this.startEvent.clear()
this.stopEvent.set()
def close(this):
this.run = False
this.stopEvent.set()
this.startEvent.set()
this.thread.join()
this.thread = None
def loop(this):
while this.run:
this.startEvent.wait()
if not this.stopEvent.wait(this.interval):
this.callback()
class Pinger:
def __init__(this,interval,timeout,pingCallback,timeoutCallback):
this.run = True
this.startEvent = threading.Event()
this.pingEvent = threading.Event()
this.pongEvent = threading.Event()
this.interval = interval / 1000
this.timeout = timeout / 1000
this.pingCallback = pingCallback
this.timeoutCallback = timeoutCallback
this.thread = None
def __enter__(this):
return this
def __exit__(this, *err):
this.close()
def start(this):
if this.thread is None:
this.pongEvent.clear()
this.pingEvent.clear()
this.startEvent.set()
this.run = True
this.thread = threading.Thread(target=this.loop, daemon=True, name="SocketIOPinger")
this.thread.start()
def stop(this):
this.startEvent.clear()
def close(this):
this.run = False
this.startEvent.set()
this.pingEvent.set()
this.pongEvent.set()
def loop(this):
while this.run:
this.startEvent.wait()
if not this.pingEvent.wait(this.interval):
this.pingCallback()
if not this.pongEvent.wait(this.timeout):
this.timeoutCallback()
this.run = False
else:
this.pongEvent.clear()
def pong(this):
this.pongEvent.set()
|
main.py
|
import cv2
import config
import controller_listener
import controls
import logging
import start_web
import time
import _thread as thread
import tensorflow as tf
import ujson as json
import websocket as ws
from cameras.camera import USBCam, Camera
from cameras import image_converter
from cameras.video_async import VideoCaptureAsync
from controls import main_controller
from controls import CAMERA_MODE_RAW, CAMERA_MODE_LOADING_BAY, CAMERA_MODE_BALL, CAMERA_MODE_HEXAGON
from multiprocessing import Process
from processing import colors
from processing import bay_tracker
from processing import port_tracker
from processing import ball_tracker2
from processing import color_calibrate
from processing import cvfilters
from processing import filters
from processing import ml
from profiles.color_profile import ColorProfile
from websocket import create_connection
# initiate the top level logger
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s [%(name)s] [%(levelname)-5.5s] %(message)s",
handlers=[
logging.StreamHandler()
]
)
logger = logging.getLogger('app')
# creating instance of logger object(?)
def main(): # main method defined
# networktables.init(client=False)
# dashboard = networktables.get()
# dashboard.putBoolean(networktables.keys.vision_initialized, True)
cv2.destroyAllWindows()
# cap = cv2.VideoCapture(config.video_source_number)
# cap set to a cv2 object with input from a preset source
if main_controller.enable_read_image:
main_controller.enable_dual_camera = False
camera = Camera(1024, 768, 30)
else:
wideCam = USBCam()
wideCam.open(config.wide_video_source_number)
wideVideo = VideoCaptureAsync(wideCam)
wideVideo.startReading()
if main_controller.enable_dual_camera:
farCam = USBCam()
farCam.open(config.wide_video_source_number)
farVideo = VideoCaptureAsync(farCam)
farVideo.startReading()
cap = wideCam.getCam()
# Set video properties
camera = Camera(cap.get(cv2.CAP_PROP_FRAME_WIDTH),
cap.get(cv2.CAP_PROP_FRAME_HEIGHT),
cap.get(cv2.CAP_PROP_FPS))
color_profile_map = {}
for profile in [controls.CAMERA_MODE_RAW,
controls.CAMERA_MODE_BALL,
controls.CAMERA_MODE_HEXAGON,
controls.CAMERA_MODE_LOADING_BAY]:
color_profile_map[profile] = ColorProfile(profile)
main_controller.color_profiles = color_profile_map
time.sleep(5)
# camera_ws = create_connection("ws://localhost:5805/camera/ws")
wide_camera_ws = create_connection("ws://localhost:5805/wide_camera/ws")
far_camera_ws = create_connection("ws://localhost:5805/far_camera/ws")
processed_ws = create_connection("ws://localhost:5805/processed/ws")
calibration_ws = create_connection("ws://localhost:5805/calibration/ws")
tracking_ws = create_connection("ws://localhost:5805/tracking/ws")
controller_listener.start("ws://localhost:5805/dashboard/ws")
# Load the TFLite model and allocate tensors.
interpreter = tf.lite.Interpreter(model_path='model.tflite')
interpreter.allocate_tensors()
# Get input and output tensors.
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
logger.info('starting main loop ')
frame_cnt = 0
while (True):
tracking_data = []
ml_data = []
frame_cnt += 1
print(frame_cnt)
frame_cnt_str = str(frame_cnt)
frame_cnt_str = frame_cnt_str.zfill(4)
print(frame_cnt_str + '.jpg')
if main_controller.enable_camera:
if not main_controller.enable_read_image and not cap.isOpened():
print('opening camera')
if main_controller.enable_dual_camera:
farCam.open(config.far_video_source_number)
wideCam.open(config.wide_video_source_number)
# if the cap is not already open, do so
if main_controller.enable_read_image:
wide_bgr_frame = cv2.imread(frame_cnt_str + '.jpg')
else:
_, wide_bgr_frame = wideVideo.read()
wide_resized_frame = cvfilters.resize(wide_bgr_frame, 640, 480)
wide_rgb_frame = cv2.cvtColor(wide_resized_frame, cv2.COLOR_BGR2RGB)
if main_controller.enable_dual_camera:
_, far_bgr_frame = farVideo.read()
far_resized_frame = cvfilters.resize(far_bgr_frame, 640, 480)
far_rgb_frame = cv2.cvtColor(far_resized_frame, cv2.COLOR_BGR2RGB)
else:
far_rgb_frame = wide_rgb_frame
if main_controller.enable_camera_feed:
wide_jpg = image_converter.convert_to_jpg(wide_rgb_frame)
wide_camera_ws.send_binary(wide_jpg)
if main_controller.enable_dual_camera:
far_jpg = image_converter.convert_to_jpg(far_rgb_frame)
far_camera_ws.send_binary(far_jpg)
# camera_ws.send_binary(jpg)
# take rgb frame and convert it to a displayable jpg form, then send that as binary through websocket
if main_controller.enable_calibration_feed:
if main_controller.camera_mode == CAMERA_MODE_HEXAGON:
calibration_frame = far_rgb_frame.copy()
else:
calibration_frame = wide_rgb_frame.copy()
calibration_frame = color_calibrate.process(calibration_frame,
camera_mode=main_controller.calibration.get('camera_mode',
'RAW'),
color_mode=main_controller.calibration.get('color_mode'),
apply_mask=main_controller.calibration.get('apply_mask', False))
jpg = image_converter.convert_to_jpg(calibration_frame)
calibration_ws.send_binary(jpg)
if main_controller.camera_mode == CAMERA_MODE_RAW:
processed_frame = wide_rgb_frame
# Camera mode set to "raw" - takes rgb frame
elif main_controller.camera_mode == CAMERA_MODE_LOADING_BAY:
color_profile = main_controller.color_profiles[CAMERA_MODE_LOADING_BAY]
# Set color profile to that of "camera mode loading bay"
ml_data = ml.predict(wide_rgb_frame, interpreter, input_details, output_details)
tracking_data = bay_tracker.process(wide_rgb_frame,
camera,
frame_cnt,
color_profile)
# Frame is displayed with bay tracking properties
elif main_controller.camera_mode == CAMERA_MODE_BALL:
color_profile = main_controller.color_profiles[
CAMERA_MODE_BALL] # color profile set to the CAMERA MODE BALL one
# print("ball")
ml_data = ml.predict(wide_rgb_frame, interpreter, input_details, output_details)
tracking_data = ball_tracker2.process(wide_rgb_frame,
camera,
frame_cnt,
color_profile)
processed_frame, tracking_data = ball_tracker2.combine(wide_rgb_frame,
tracking_data,
ml_data,
15)
elif main_controller.camera_mode == CAMERA_MODE_HEXAGON:
color_profile_hex = main_controller.color_profiles[CAMERA_MODE_HEXAGON]
ml_data = ml.predict(far_rgb_frame, interpreter, input_details, output_details)
tracking_data_port = port_tracker.process(far_rgb_frame,
camera,
frame_cnt,
color_profile_hex)
processed_frame, tracking_data = port_tracker.combine(far_rgb_frame,
tracking_data_port,
ml_data,
15)
if main_controller.enable_processing_feed: # once we start showing our processing feed...
cv2.putText(processed_frame,
'Tracking Mode %s' % main_controller.camera_mode,
(10, 10),
cv2.FONT_HERSHEY_DUPLEX,
.4,
colors.BLUE,
1,
cv2.LINE_AA)
jpg = image_converter.convert_to_jpg(processed_frame)
processed_ws.send_binary(jpg)
# if out is not None:
# out.write(frame)
if len(tracking_data) != 0 and main_controller.send_tracking_data:
# sort tracking data by closests object
# logger.info(tracking_data)
tracking_data = sorted(tracking_data, key=lambda i: i['dist'])
tracking_ws.send(json.dumps(dict(targets=tracking_data)))
# cv2.imshow('frame', processed_frame )
# cv2.waitKey(0)
else:
logger.info('waiting for control socket')
# IDLE mode
if not main_controller.enable_read_image and cap.isOpened():
print('closing camera')
wideCam.stop()
if main_controller.enable_dual_camera:
farCam.stop()
time.sleep(.3)
cv2.waitKey(0)
# if cv2.waitKey(1) & 0xFF == ord('q'):
# break
if __name__ == '__main__':
p = Process(target=start_web.main)
p.start()
main()
p.join()
|
Vector_Addition.py
|
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 17 16:57:39 2017
@author: Usman
"""
import threading
import random,time,math,numpy as np
from matplotlib import pyplot as plt
def SeqAdd(vectorA,vectorB,vectorC):
for r in range(0, len(vectorA)):
vectorC[r]=int(vectorA[r])+int(vectorB[r]) #Adding Vector A and B to C sequentially
#print( vectorC)
#def Minimum(minimum,maximum,lock,i,step,vec_size,vectorA,vectorB,vectorC):
def AddVectors(minimum,maximum,lock,i,step,vec_size,vectorA,vectorB,vectorC): #Vector process definition
lock.acquire() #Acquiring Lock
try:
print (threading.current_thread()) #printing currentthread
threadname=threading.current_thread()
#print (threadname.split( ))
for i in range(minimum,maximum,1): #For loop with (Distributed Data)
vectorC[i]=int(vectorA[i])+int(vectorB[i]) #Adding Vector B with Vector A and saving in Vector C
finally:
# print("Vector A : ",vectorA) #Printing Value
# print("Vector B : ",vectorB) #Printing Value
# print("Vector C : ",vectorC) #Printing Value
lock.release() #Releasing Lock
return
def main(): #main function
print("")
minimum=0 #Min/Max Variables
maximum=0
vec_size=100000 #Declaring size for Vector 1 with 10**7
vectorA=[None]*vec_size
vectorB=[None]*vec_size
vectorC=[None]*vec_size
Sum_Time=0
lock=threading.Lock() #making a new lock object
nThreads=input("Please input number of threads you want to initialize: ")
threads=[]
step=int(vec_size)/int(nThreads) #CAlculating the Size of Chunk one jump needs to be for the thread
final_chunk=int(step)*int(nThreads)
print("Size of Vector: ",vec_size)
print("Number of threads: ",nThreads)
print("Size of 1 chunk: ",int(step))
#print("Final Chunk: ",final_chunk)##Outer loop will befor the threads
for i in range(0,len(vectorA),1):
vectorA[i]=1
vectorB[i]=1
vectorC[i]=1
for i in range(0,vec_size,int(step)):
if(i+int(step)<=vec_size and maximum!=vec_size): ##So that is stays within limits
minimum=i
maximum=int(i+int(step))
if(i+int(step)==final_chunk):
#print("final chunk reached")
maximum=maximum+(vec_size-final_chunk)
# print("Min: ",minimum)
# print("Max: ",maximum)
# AddVectors(int(minimum),int(maximum),lock,i,step)
#########Calling Addition Functions on Thread
time.sleep(0.5)
start = time.clock()
t=threading.Thread(target=AddVectors,args=(int(minimum),int(maximum),lock,i,step, vec_size,vectorA,vectorB,vectorC))
t.start()
threads.append(t)
#Calling the sequentual Addition fo rcomparative analysis
for x in range(len(threads)):
t.join()
end = time.clock()
#print("Vector A: ",vectorA)
#print("Vector B: ",vectorB)
#print("Vector C: ",vectorC)
print ("Processing Time for Paralell Addition: ",round(end - start,4))
startSeq = time.clock()
SeqAdd(vectorA,vectorB,vectorC)
endSeq = time.clock()
print("Process Time for Sequential Addition: ",round(endSeq-startSeq,4)) #Printing Parallell Time
print("Sequential Time - Paralell Time :",round((endSeq-startSeq)-(end-start),4)) #Printing Sequential Time
if((endSeq-startSeq)>(end-start)):
print("Paralell Mechanism was",round((((endSeq-startSeq))-((end-start)))/(end-start),4),"% Faster")
if((endSeq-startSeq)<(end-start)):
print("Sequential Mechanism was",round((((end-start))-((endSeq-startSeq)))/(endSeq-startSeq),4),"% Faster")
if((endSeq-startSeq)==(end-start)):
print("Sequential and Paralell were same")
x_axis=["Seq Mech Time","Par Mech Time"]
y_axis=[round((endSeq-startSeq),4),round((end-start),4)]
ind=np.arange(len(x_axis))
print("Graph shows the times for Paralell and Sequential Mechanisms")
plt.bar(ind,y_axis)
plt.xticks(ind,x_axis)
if __name__ == "__main__":
main()
|
camera.py
|
#This is not yet used and tested in my project
#/max
#https://github.com/Misterblue/ros2_raspicam_node/blob/master/src/ros2/ros2_raspicam_node/ros2_raspicam_node/ros2_raspicam_node.py
# Copyright 2018 Robert Adams
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import queue
import threading
import time
import sys
import rclpy
from rclpy.parameter import Parameter
from rclpy.node import Node
from sensor_msgs.msg import CompressedImage
import picamera
class ROS2_raspicam_node(Node):
def __init__(self):
super().__init__('ros2_raspicam_node', namespace='raspicam')
self.set_parameter_defaults( [
('compressed_image', Parameter.Type.BOOL, True),
('image_topic', Parameter.Type.STRING, 'raspicam_uncompressed'),
('compressed_image_topic', Parameter.Type.STRING, 'raspicam_compressed'),
# off, auto, sunlight, cloudy, shade, trungsten, florescent, incandescent, flash, horizon
('camera_awb_mode', Parameter.Type.STRING, 'auto'),
# ('camera_annotate_background', Parameter.Type.STRING, 'black'),
# ('camera_annotate_foreground', Parameter.Type.STRING, 'yellow'),
# ('camera_annotate_text', Parameter.Type.STRING, 'fun image'),
# text size: 6..160, default 32
# ('camera_annotate_text_size', Parameter.Type.INTEGER, 10),
# brightness: 1..100, default 50
('camera_brightness', Parameter.Type.INTEGER, 55),
# Contrast: -100..100, default 0
('camera_contrast', Parameter.Type.INTEGER, 0),
# ('camera_exif_copyright', Parameter.Type.STRING, 'Copyrightt 2018 MY NAME'),
# ('camera_user_comment', Parameter.Type.STRING, 'SOMETHING INFORMATIVE'),
# Exposure compenstation: -25..25, default 0, one step = 1/6 F-stop
('camera_exposure_compenstation', Parameter.Type.INTEGER, 0),
# off, auto, night, backlight, spotlight, sports, snow, beach, antishake, fireworks
('camera_exposure_mode', Parameter.Type.STRING, 'auto'),
# the camera is upside down in initial setup
('camera_hflip', Parameter.Type.BOOL, True),
('camera_vflip', Parameter.Type.BOOL, True),
# 'none', 'negative', 'solarize', 'sketch', 'denoise', 'emboss', 'oilpaint',
# 'hatch', 'gpen', 'pastel', 'watercolor', 'film', 'blur', 'saturation',
# 'colorswap', 'washedout', 'posterise', 'colorpoint', 'colorbalance', 'cartoon', 'deinterlace1',
# 'deinterlace2'
('camera_image_effect', Parameter.Type.STRING, 'none'),
# 'average' 'spot' 'backlit' 'matrix'
('camera_meter_mode', Parameter.Type.STRING, 'average'),
# 640/480, 800/600, 1280/720
('camera_image_width', Parameter.Type.INTEGER, 640),
('camera_image_height', Parameter.Type.INTEGER, 480),
# Saturation: -100..100, default 0
('camera_saturation', Parameter.Type.INTEGER, 0),
# Sharpness: -100..100, default 0
('camera_sharpness', Parameter.Type.INTEGER, 10),
] )
self.camera = picamera.PiCamera()
time.sleep(1); # let camera initialization complete
self.initialize_publisher()
self.set_camera_parameters()
self.initialize_capture_queue()
def destroy_node(self):
# overlay Node function called when class is being stopped and camera needs closing
# if hasattr(self, 'publisher') and self.publisher != None:
# # nothing to do
if hasattr(self, 'camera') and self.camera != None:
self.camera.close()
super().destroy_node()
def initialize_publisher(self):
if self.get_parameter_value('compressed_image'):
self.publisher = self.create_publisher(CompressedImage,
self.get_parameter_value('compressed_image_topic'))
else:
self.publisher = self.create_publisher(Image,
self.get_parameter_value('image_topic'))
self.frame_num = 0
def set_camera_parameters(self):
# https://picamera.readthedocs.io/en/release-1.13/api_camera.html
self.camera.awb_mode = self.get_parameter_value('camera_awb_mode')
self.parameter_set_if_set('camera_annotate_background',
lambda xx: setattr(self.camera, 'annotate_background', xx))
self.parameter_set_if_set('camera_annotate_foreground',
lambda xx: setattr(self.camera, 'annotate_foreground', xx))
self.parameter_set_if_set('camera_annotate_text',
lambda xx: setattr(self.camera, 'annotate_text', xx))
self.parameter_set_if_set('camera_annotate_text_size',
lambda xx: setattr(self.camera, 'annotate_text_size', xx))
self.camera.brightness = self.get_parameter_value('camera_brightness')
self.camera.contrast = self.get_parameter_value('camera_contrast')
if self.has_parameter('camera_exif_copyright'):
self.camera.exif_tage['IFDO.Copyright'] = self.get_parameter_value('camera_exif_copyright')
if self.has_parameter('camera_exif_user_comment'):
self.camera.exif_tage['EXIF.UserComment'] = self.get_parameter_value('camera_exif_user_comment')
self.camera.exposure_compensation = self.get_parameter_value('camera_exposure_compenstation')
self.camera.exposure_mode = self.get_parameter_value('camera_exposure_mode')
self.camera.hflip = self.get_parameter_value('camera_hflip')
self.camera.vflip = self.get_parameter_value('camera_vflip')
self.camera.image_effect = self.get_parameter_value('camera_image_effect')
self.camera.meter_mode = self.get_parameter_value('camera_meter_mode')
self.image_width = self.get_parameter_value('camera_image_width')
self.image_height = self.get_parameter_value('camera_image_height')
self.camera.resolution = ( self.image_width, self.image_height )
self.get_logger().debug('CAM: setting capture resolution = %s/%s'
% (self.camera.resolution[0], self.camera.resolution[1]))
self.camera.saturation = self.get_parameter_value('camera_saturation')
self.camera.sharpness = self.get_parameter_value('camera_sharpness')
def initialize_capture_queue(self):
# Create a queue and two threads to capture and then push the images to the topic
self.queue_lock = threading.Lock()
self.capture_queue = queue.Queue()
# self.capture_queue = queue.SimpleQueue() # introduced in Python 3.7
# thread to capture camera images and place in queue
self.capture_event = threading.Event()
self.capturer_thread = threading.Thread(target=self.take_pictures, name='capturer')
# thread to read queue and send them to the topic
self.publisher_event = threading.Event()
self.publisher_thread = threading.Thread(target=self.publish_images, name='publisher')
self.capturer_thread.start()
self.publisher_thread.start()
def stop_workers(self):
# if workers are initialized and running, tell them to stop and wait until stopped
if hasattr(self, 'capture_event') and self.capture_event != None:
self.capture_event.set()
if hasattr(self, 'publisher_event') and self.publisher_event != None:
self.publisher_event.set()
if hasattr(self, 'publisher_thread') and self.publisher_thread.is_alive():
self.publisher_thread.join()
if hasattr(self, 'capturer_thread') and self.capturer_thread.is_alive():
self.capturer_thread.join()
def take_pictures(self):
# Take compressed images and put into the queue.
# 'jpeg', 'rgb'
try:
for capture in self.camera.capture_continuous(self.write_capture(self), format='jpeg'):
if self.capture_event.is_set():
break
time.sleep(0.5)
# The exit flag could have been set while in the sleep
if self.capture_event.is_set():
break
except:
self.get_logger().error('CAM: exiting take_pictures because of exception')
class write_capture():
# Writer object that writes the passed data to the queue
def __init__(self, pparent):
self.parent = pparent
def write(self, d):
if not self.parent.capture_event.is_set():
with self.parent.queue_lock:
msg = CompressedImage()
msg.data = d
msg.format = 'jpeg'
msg.header.frame_id = str(self.parent.frame_num)
self.parent.frame_num += 1
self.parent.get_logger().debug('CAM: capture frame. size=%s, frame=%s'
% (len(d), msg.header.frame_id) )
# msg.header.stamp = time.Time
self.parent.capture_queue.put(msg)
def flush(self):
return
def publish_images(self):
# Loop reading from capture queue and send to ROS topic
while True:
if self.publisher_event.is_set():
break
try:
msg = self.capture_queue.get(block=True, timeout=2)
except queue.Empty:
msg = None
if self.publisher_event.is_set():
break
if msg != None:
self.get_logger().debug('CAM: sending frame. frame=%s'
% (msg.header.frame_id) )
self.publisher.publish(msg)
def get_parameter_or(self, param, default):
# Helper function to return value of a parameter or a default if not set
ret = None
param_desc = self.get_parameter(param)
if param_desc.type_== Parameter.Type.NOT_SET:
ret = default
else:
ret = param_desc.value
return ret
def get_parameter_value(self, param):
# Helper function to return value of a parameter
ret = None
param_desc = self.get_parameter(param)
if param_desc.type_== Parameter.Type.NOT_SET:
raise Exception('Fetch of parameter that does not exist: ' + param)
else:
ret = param_desc.value
return ret
def set_parameter_defaults(self, params):
# If a parameter has not been set externally, set the value to a default.
# Passed a list of "(parameterName, parameterType, defaultValue)" tuples.
parameters_to_set = []
for (pparam, ptype, pdefault) in params:
if not self.has_parameter(pparam):
parameters_to_set.append( Parameter(pparam, ptype, pdefault) )
if len(parameters_to_set) > 0:
self.set_parameters(parameters_to_set)
def parameter_set_if_set(self, param, set_function):
# If there is a parameter set, do set_function with the value
if self.has_parameter(param):
set_function(self.get_parameter_value(param))
def has_parameter(self, param):
# Return 'True' if a parameter by that name is specified
param_desc = self.get_parameter(param)
if param_desc.type_== Parameter.Type.NOT_SET:
return False
return True
def main(args=None):
rclpy.init(args=args)
camNode = ROS2_raspicam_node()
try:
rclpy.spin(camNode)
except KeyboardInterrupt:
camNode.get_logger().info('CAM: Keyboard interrupt')
camNode.stop_workers()
camNode.destroy_node()
rclpy.shutdown()
if __name__ == '__main__':
main()
|
__init__.py
|
import os
import inspect
import functools
import threading
from timeit import default_timer
from flask import request, make_response, current_app
from flask import Flask, Response
from werkzeug.serving import is_running_from_reloader
from werkzeug.exceptions import HTTPException
from prometheus_client import Counter, Histogram, Gauge, Summary
from prometheus_client import generate_latest, CONTENT_TYPE_LATEST
from prometheus_client import REGISTRY as DEFAULT_REGISTRY
from prometheus_client import multiprocess, CollectorRegistry
class PrometheusMetrics(object):
"""
Prometheus metrics export configuration for Flask.
The default metrics include a Histogram for HTTP request latencies
and number of HTTP requests plus a Counter for the total number
of HTTP requests.
Sample usage:
app = Flask(__name__)
metrics = PrometheusMetrics(app)
# static information as metric
metrics.info('app_info', 'Application info', version='1.0.3')
@app.route('/')
def main():
pass # requests tracked by default
@app.route('/skip')
@metrics.do_not_track()
def skip():
pass # default metrics are not collected
@app.route('/<item_type>')
@metrics.do_not_track()
@metrics.counter('invocation_by_type', 'Number of invocations by type',
labels={'item_type': lambda: request.view_args['type']})
def by_type(item_type):
pass # only the counter is collected, not the default metrics
@app.route('/long-running')
@metrics.gauge('in_progress', 'Long running requests in progress')
def long_running():
pass
@app.route('/status/<int:status>')
@metrics.do_not_track()
@metrics.summary('requests_by_status', 'Request latencies by status',
labels={'status': lambda r: r.status_code})
@metrics.histogram('requests_by_status_and_path', 'Request latencies by status and path',
labels={'status': lambda r: r.status_code, 'path': lambda: request.path})
def echo_status(status):
return 'Status: %s' % status, status
Label values can be defined as callables:
- With a single argument that will be the Flask Response object
- Without an argument, possibly to use with the Flask `request` object
"""
def __init__(self, app, path='/metrics',
export_defaults=True, defaults_prefix='flask',
group_by_endpoint=False, buckets=None,
registry=DEFAULT_REGISTRY):
"""
Create a new Prometheus metrics export configuration.
:param app: the Flask application
:param path: the metrics path (defaults to `/metrics`)
:param export_defaults: expose all HTTP request latencies
and number of HTTP requests
:param defaults_prefix: string to prefix the default exported
metrics name with (when either `export_defaults=True` or
`export_defaults(..)` is called)
:param group_by_endpoint: group default HTTP metrics
by the endpoints' function name instead of the URI path
:param buckets: the time buckets for request latencies
(will use the default when `None`)
:param registry: the Prometheus Registry to use
"""
self.app = app
self.path = path
self._export_defaults = export_defaults
self._defaults_prefix = defaults_prefix or 'flask'
self.group_by_endpoint = group_by_endpoint
self.buckets = buckets
self.registry = registry
self.version = __version__
if app is not None:
self.init_app(app)
def init_app(self, app):
"""
This callback can be used to initialize an application for the
use with this prometheus reporter setup.
This is usually used with a flask "app factory" configuration. Please
see: http://flask.pocoo.org/docs/1.0/patterns/appfactories/
Note, that you need to use `PrometheusMetrics(app=None, ...)`
for this mode, otherwise it is called automatically.
:param app: the Flask application
"""
if self.path:
self.register_endpoint(self.path, app)
if self._export_defaults:
self.export_defaults(
self.buckets, self.group_by_endpoint,
self._defaults_prefix, app
)
def register_endpoint(self, path, app=None):
"""
Register the metrics endpoint on the Flask application.
:param path: the path of the endpoint
:param app: the Flask application to register the endpoint on
(by default it is the application registered with this class)
"""
if is_running_from_reloader():
return
if app is None:
app = self.app or current_app
@app.route(path)
@self.do_not_track()
def prometheus_metrics():
if 'prometheus_multiproc_dir' in os.environ:
registry = CollectorRegistry()
else:
registry = self.registry
if 'name[]' in request.args:
registry = registry.restricted_registry(request.args.getlist('name[]'))
if 'prometheus_multiproc_dir' in os.environ:
multiprocess.MultiProcessCollector(registry)
headers = {'Content-Type': CONTENT_TYPE_LATEST}
return generate_latest(registry), 200, headers
def start_http_server(self, port, host='0.0.0.0', endpoint='/metrics'):
"""
Start an HTTP server for exposing the metrics.
This will be an individual Flask application,
not the one registered with this class.
:param port: the HTTP port to expose the metrics endpoint on
:param host: the HTTP host to listen on (default: `0.0.0.0`)
:param endpoint: the URL path to expose the endpoint on
(default: `/metrics`)
"""
if is_running_from_reloader():
return
app = Flask('prometheus-flask-exporter-%d' % port)
self.register_endpoint(endpoint, app)
def run_app():
app.run(host=host, port=port)
thread = threading.Thread(target=run_app)
thread.setDaemon(True)
thread.start()
def export_defaults(self, buckets=None, group_by_endpoint=False,
prefix='flask', app=None):
"""
Export the default metrics:
- HTTP request latencies
- Number of HTTP requests
:param buckets: the time buckets for request latencies
(will use the default when `None`)
:param group_by_endpoint: group default HTTP metrics
by the endpoints' function name instead of the URI path
:param prefix: prefix to start the default metrics names with
:param app: the Flask application
"""
if app is None:
app = self.app or current_app
if not prefix:
prefix = self._defaults_prefix or 'flask'
# use the default buckets from prometheus_client if not given here
buckets_as_kwargs = {}
if buckets is not None:
buckets_as_kwargs['buckets'] = buckets
duration_group = 'endpoint' if group_by_endpoint else 'path'
histogram = Histogram(
'%s_http_request_duration_seconds' % prefix,
'Flask HTTP request duration in seconds',
('method', duration_group, 'status'),
registry=self.registry,
**buckets_as_kwargs
)
counter = Counter(
'%s_http_request_total' % prefix,
'Total number of HTTP requests',
('method', 'status'),
registry=self.registry
)
self.info(
'%s_exporter_info' % prefix,
'Information about the Prometheus Flask exporter',
version=self.version
)
def before_request():
request.prom_start_time = default_timer()
def after_request(response):
if hasattr(request, 'prom_do_not_track'):
return response
total_time = max(default_timer() - request.prom_start_time, 0)
histogram.labels(
request.method,
getattr(request, duration_group),
response.status_code
).observe(total_time)
counter.labels(request.method, response.status_code).inc()
return response
app.before_request(before_request)
app.after_request(after_request)
def histogram(self, name, description, labels=None, **kwargs):
"""
Use a Histogram to track the execution time and invocation count
of the method.
:param name: the name of the metric
:param description: the description of the metric
:param labels: a dictionary of `{labelname: callable_or_value}` for labels
:param kwargs: additional keyword arguments for creating the Histogram
"""
return self._track(
Histogram,
lambda metric, time: metric.observe(time),
kwargs, name, description, labels,
registry=self.registry
)
def summary(self, name, description, labels=None, **kwargs):
"""
Use a Summary to track the execution time and invocation count
of the method.
:param name: the name of the metric
:param description: the description of the metric
:param labels: a dictionary of `{labelname: callable_or_value}` for labels
:param kwargs: additional keyword arguments for creating the Summary
"""
return self._track(
Summary,
lambda metric, time: metric.observe(time),
kwargs, name, description, labels,
registry=self.registry
)
def gauge(self, name, description, labels=None, **kwargs):
"""
Use a Gauge to track the number of invocations in progress
for the method.
:param name: the name of the metric
:param description: the description of the metric
:param labels: a dictionary of `{labelname: callable_or_value}` for labels
:param kwargs: additional keyword arguments for creating the Gauge
"""
return self._track(
Gauge,
lambda metric, time: metric.dec(),
kwargs, name, description, labels,
registry=self.registry,
before=lambda metric: metric.inc()
)
def counter(self, name, description, labels=None, **kwargs):
"""
Use a Counter to track the total number of invocations of the method.
:param name: the name of the metric
:param description: the description of the metric
:param labels: a dictionary of `{labelname: callable_or_value}` for labels
:param kwargs: additional keyword arguments for creating the Counter
"""
return self._track(
Counter,
lambda metric, time: metric.inc(),
kwargs, name, description, labels,
registry=self.registry
)
@staticmethod
def _track(metric_type, metric_call, metric_kwargs, name, description, labels,
registry, before=None):
"""
Internal method decorator logic.
:param metric_type: the type of the metric from the `prometheus_client` library
:param metric_call: the invocation to execute as a callable with `(metric, time)`
:param metric_kwargs: additional keyword arguments for creating the metric
:param name: the name of the metric
:param description: the description of the metric
:param labels: a dictionary of `{labelname: callable_or_value}` for labels
:param before: an optional callable to invoke before executing the
request handler method accepting the single `metric` argument
:param registry: the Prometheus Registry to use
"""
if labels is not None and not isinstance(labels, dict):
raise TypeError('labels needs to be a dictionary of {labelname: callable}')
label_names = labels.keys() if labels else tuple()
parent_metric = metric_type(
name, description, labelnames=label_names, registry=registry,
**metric_kwargs
)
def label_value(f):
if not callable(f):
return lambda x: f
if inspect.getargspec(f).args:
return lambda x: f(x)
else:
return lambda x: f()
label_generator = tuple(
(key, label_value(call))
for key, call in labels.items()
) if labels else tuple()
def get_metric(response):
if label_names:
return parent_metric.labels(
**{key: call(response) for key, call in label_generator}
)
else:
return parent_metric
def decorator(f):
@functools.wraps(f)
def func(*args, **kwargs):
if before:
metric = get_metric(None)
before(metric)
else:
metric = None
start_time = default_timer()
try:
response = f(*args, **kwargs)
except HTTPException as ex:
response = ex
except Exception as ex:
response = make_response('Exception: %s' % ex, 500)
total_time = max(default_timer() - start_time, 0)
if not metric:
response_for_metric = response
if not isinstance(response, Response):
if request.endpoint == f.__name__:
# we are in a request handler method
response_for_metric = make_response(response)
metric = get_metric(response_for_metric)
metric_call(metric, time=total_time)
return response
return func
return decorator
@staticmethod
def do_not_track():
"""
Decorator to skip the default metrics collection for the method.
*Note*: explicit metrics decorators will still collect the data
"""
def decorator(f):
@functools.wraps(f)
def func(*args, **kwargs):
request.prom_do_not_track = True
return f(*args, **kwargs)
return func
return decorator
def info(self, name, description, labelnames=None, labelvalues=None, **labels):
"""
Report any information as a Prometheus metric.
This will create a `Gauge` with the initial value of 1.
The easiest way to use it is:
metrics = PrometheusMetrics(app)
metrics.info(
'app_info', 'Application info',
version='1.0', major=1, minor=0
)
If the order of the labels matters:
metrics = PrometheusMetrics(app)
metrics.info(
'app_info', 'Application info',
('version', 'major', 'minor'),
('1.0', 1, 0)
)
:param name: the name of the metric
:param description: the description of the metric
:param labelnames: the names of the labels
:param labelvalues: the values of the labels
:param labels: the names and values of the labels
:return: the newly created `Gauge` metric
"""
if labels and labelnames:
raise ValueError(
'Cannot have labels defined as `dict` '
'and collections of names and values'
)
if labelnames is None and labels:
labelnames = labels.keys()
elif labelnames and labelvalues:
for idx, label_name in enumerate(labelnames):
labels[label_name] = labelvalues[idx]
gauge = Gauge(
name, description, labelnames,
registry=self.registry
)
if labels:
gauge = gauge.labels(**labels)
gauge.set(1)
return gauge
__version__ = '0.3.1'
|
gui.py
|
#!/usr/bin/env python
from __future__ import division
import sys,os, os.path, PyMC
import PyMC.database
try:
import pygtk
pygtk.require("2.0")
# Note for OS X: You can get pygtk from the MacPort py-gtk (Python 2.4) or py25-gtk (Python 2.5)...
# but the MacPorts py-gobject is buggy. I reported it on their trac page.
except:
pass
try:
import gtk, gobject
import gtk.glade
except:
sys.exit(1)
import re
handler_re = re.compile(r'(on|after)_(.*)_(.*)')
from threading import Thread
def progress_timeout(self):
# Calculate the value of the progress bar
if self.sampler.status == 'running':
self.pbar.set_fraction(self.sampler._current_iter/self.sampler._iter)
elif self.sampler.status == 'ready':
self.pbar.set_fraction(0.)
self.button2.set_label('Start')
self.button2.set_image(gtk.image_new_from_stock('gtk-yes', gtk.ICON_SIZE_BUTTON))
return False
else: # Sampling is interrupted.
return False
# As this is a timeout function, return TRUE so that it
# continues to get called
return True
class GladeWidget:
def __init__(self, glade_file, widget_name):
"""Connects signal handling methods to Glade widgets.
Methods named like on_widget__signal or after_widget__signal
are connected to the appropriate widgets and signals.
"""
get_widget = gtk.glade.XML(glade_file, widget_name).get_widget
W = {}
for attr in dir(self):
match = handler_re.match(attr)
if match:
when, widget, signal = match.groups()
method = getattr(self, attr)
assert callable(method)
if when == 'on':
get_widget(widget).connect(signal, method)
elif when == 'after':
get_widget(widget).connect_after(signal, method)
W[widget]=get_widget(widget)
elif attr.startswith('on_') or attr.startswith('after_'):
# Warn about some possible typos like separating
# widget and signal name with _ instead of __.
print ('Warning: attribute %r not connected'
' as a signal handler' % (attr,))
self.__dict__.update(W)
self.get_widget = get_widget
for db in PyMC.database.available_modules:
self.combobox1.append_text(db)
self.combobox1.set_active(0)
def on_window1_destroy(self, widget):
gtk.main_quit()
def on_button1_clicked(self, widget):
dialog = gtk.FileChooserDialog("Open python module",
None,
gtk.FILE_CHOOSER_ACTION_OPEN,
(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
gtk.STOCK_OPEN, gtk.RESPONSE_OK))
dialog.set_default_response(gtk.RESPONSE_OK)
filter = gtk.FileFilter()
filter.set_name("Python files")
filter.add_pattern("*.py")
dialog.add_filter(filter)
dialog.set_filename('ExtremeRainfall.py')
response = dialog.run()
if response == gtk.RESPONSE_OK:
self.filename= dialog.get_filename()
self.modulename = os.path.splitext(os.path.basename(self.filename))[0]
sys.path.append(os.path.dirname(self.filename))
mod = __import__(self.modulename)
db = self.combobox1.get_active_text()
self.sampler = PyMC.Sampler(mod, db=db)
elif response == gtk.RESPONSE_CANCEL:
print 'Closed, no files selected'
dialog.destroy()
self.button1.set_label(self.modulename)
def on_spinbuttonit_changed(self, widget):
pass
def on_spinbuttonburn_changed(self, widget):
pass
def on_spinbuttonthin_changed(self, widget):
pass
def on_combobox1_changed(self, widget):
"""Close last database and assign new one."""
db = widget.get_active_text()
try:
self.sampler.db._finalize()
self.sampler._assign_database_backend(db)
except AttributeError:
pass
def on_button2_clicked(self, widget):
self.pbar = self.get_widget('progressbar1')
# Not started, not sampling
if self.sampler.status =='ready':
self.iter = int(self.spinbuttonit.get_value())
self.burn = int(self.spinbuttonburn.get_value())
self.thin = int(self.spinbuttonthin.get_value())
#self.pbar.set_fraction(0.0)
self.T = Thread(target=self.sampler.sample, args=(self.iter, self.burn, self.thin))
self.T.start()
self.timer = Thread(target=gobject.timeout_add, args= (100, progress_timeout, self))
self.timer.start()
# Change label to stop
widget.set_label('Stop')
widget.set_image(gtk.image_new_from_stock('gtk-stop', gtk.ICON_SIZE_BUTTON))
elif self.sampler.status == 'running':
self.sampler.status = 'paused'
widget.set_label('Continue')
widget.set_image(gtk.image_new_from_stock('gtk-yes', gtk.ICON_SIZE_BUTTON))
elif self.sampler.status == 'paused':
self.sampler.interactive_continue()
widget.set_label('Stop')
widget.set_image(gtk.image_new_from_stock('gtk-stop', gtk.ICON_SIZE_BUTTON))
self.timer = Thread(target=gobject.timeout_add, args= (100, progress_timeout, self))
self.timer.start()
if __name__ == "__main__":
hwg = GladeWidget('gui.glade', 'window1')
gtk.gdk.threads_init()
gtk.main()
|
UTMaster.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
################################################################################
#
# Copyright (c) 2015 Baidu.com, Inc. All Rights Reserved
#
################################################################################
"""
Authors: zhousongsong([email protected])
Date: 2015/11/16 14:07:06
"""
import os
import sys
import Queue
import threading
broc_path = os.path.realpath(os.path.join(os.path.realpath(__file__), '..', '..'))
sys.path.insert(0, broc_path)
from util import Function
class UTMaster(object):
"""
UTMaster dispatches ut command to ut threads
"""
def __init__(self, queue, logger):
"""
Args:
queue : the ut command queue
logger : the Log.Log() object
"""
self._queue = queue
self._logger = logger
self._errors = list()
def Run(self):
"""
thread entrence function
"""
while not self._queue.empty():
try:
cmd = self._queue.get(True, 1)
except Queue.Empty:
break
ret, msg = Function.RunCommand(cmd, True)
if ret != 0:
self._logger.LevPrint("ERROR", "run ut cmd(%s) failed: %s" % (cmd, msg))
self._errors.append(msg)
else:
self._logger.LevPrint("MSG", "run ut cmd(%s) OK\n%s" % (cmd, msg))
self._queue.task_done()
def Start(self):
"""
run all ut threads
"""
num = 4
if self._queue.qsize() < 4:
num = self._queue.qsize()
workers = list()
for i in xrange(0, num):
t = threading.Thread(target=self.Run)
workers.append(t)
t.start()
# wait all ut comands done
self._queue.join()
# wait all ut threads exit
for worker in workers:
worker.join()
def Errors(self):
"""
return all error msg
"""
return self._errors
|
pilogee.py
|
import socket
import time
import threading
import traceback
from message_socket.message_socket import MessageSocket, MessageType, PiLogMsg
class PiLoggee():
def __init__(self):
""" """
self._client_lock = threading.Lock()
self._num_clients = 0
self._running = False
def start_server(self):
""" """
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self._socket.bind(("0.0.0.0", 12346))
self._running = True
self._listen_thread = threading.Thread(target=self.listen)
self._listen_thread.start()
def stop_server(self):
print("Received shutdown signal")
if self._running:
self._running = False
self._socket.shutdown(socket.SHUT_RDWR)
def handle_client(self, args):
""" """
self.increment_clients()
(client, addr) = args
client_sock = MessageSocket(client)
try:
msg = client_sock.recv_message()
while(msg.type != MessageType.DISCONNECT and self._running):
self.handle_message(client, msg)
msg = client_sock.recv_message()
if not self._running:
client_sock.close()
else:
print("Client {0} has disconneccted".format(addr[0]))
except:
print("Error handling client in PiLogee.work. Disconnecting from client {0}".format(addr[0]))
traceback.print_exc()
self.decrement_clients()
def increment_clients(self):
with self._client_lock:
self._num_clients += 1
def decrement_clients(self):
with self._client_lock:
self._num_clients -= 1
def handle_message(self, client, msg):
""" handles a message received by a client """
print("from client {0}, payload: {1}".format(client, msg.payload))
def listen(self):
""" listens for clients """
try:
while(self._running):
self._socket.listen(5)
(client, addr) = self._socket.accept()
clientThread = threading.Thread(target=self.handle_client, args=((client, addr),))
clientThread.start()
except:
if self._running:
print("Exception in listen.")
traceback.print_exc()
else:
print("Shutting down")
|
__init__.py
|
'''
Polyglot HTTP server
| Public Functions:
| AbstractHandler Class
| register(handlers, subdomain=None, parent_dir=None)
:ivar AUTH_USER: The required HTTP user
:ivar AUTH_PASS: The required HTTP password
:ivar SERVER: The Tornado web server applicaiton.
'''
from basic_auth import basic_auth
import logging
import os
import threading
import tornado.web
import tornado.ioloop
DEFAULT_CONFIG = {'port': 8080, 'username': 'admin', 'password': 'admin'}
AUTH_USER = None
AUTH_PASS = None
PORT = None
SERVER = None
_THREAD = None
_LOGGER = logging.getLogger(__name__)
def load(pglot, user_config):
''' setup the http server '''
# pylint: disable=global-statement, unused-argument
global SERVER, _THREAD
# set configuration
config = DEFAULT_CONFIG
config.update(user_config)
set_config(config)
# create server
SERVER = tornado.web.Application([], {})
SERVER.listen(PORT)
# run server on a thread
_THREAD = threading.Thread(target=run_server)
_THREAD.daemon = True
_THREAD.start()
_LOGGER.info('Started HTTP server on port %d', PORT)
_LOGGER.info('Loaded HTTP element')
def unload():
''' stops the http server '''
ioloop = tornado.ioloop.IOLoop.instance()
ioloop.add_callback(lambda x: x.stop(), ioloop)
_LOGGER.info('Unloaded HTTP element')
def get_config():
""" Returns the element's configuration. """
return {'password': AUTH_PASS, 'username': AUTH_USER, 'port': PORT}
def set_config(config):
""" Updates the current configuration. """
# pylint: disable=global-statement, unused-argument
global AUTH_USER, AUTH_PASS, PORT
# pull config settings
PORT = config['port']
AUTH_USER = config['username']
AUTH_PASS = config['password']
def register(handlers=None, subdomain=None, parent_dir=None, urls=None):
'''
Register additional handlers to the server.
:param handlers: List of handler classes to register.
:param subdomain: The desired subdomain
:param parent_dir: The directory under which all the handlers should be
placed
:param urls: List of lists like [['path', Handler]].
Overwrites handlers input.
'''
# parse input
if subdomain is None:
subdomain = "[A-Za-z0-9.]*"
else:
subdomain = r'{}\.[A-Za-z0-9.]*'.format(subdomain)
if parent_dir is None:
parent_dir = ''
else:
parent_dir = '/{}'.format(parent_dir)
# create proper URLSpec handlers and callback
if not urls:
def doc_url(handler):
""" reads the url regexp from the handler's docstring. """
docs = handler.__doc__
pieces = docs.strip().split('\n\n')[0].split('\n')
return ''.join([piece.strip() for piece in pieces])
handlers = [tornado.web.URLSpec('{}{}'.format(parent_dir,
doc_url(handler)),
handler)
for handler in handlers]
else:
handlers = urls
def add_handler_callback(server, subdmn, hndls):
""" add handler to server """
server.add_handlers(subdmn, hndls)
# schedule handler addition to next IOLoop iteration
ioloop = tornado.ioloop.IOLoop.instance()
ioloop.add_callback(add_handler_callback, SERVER, subdomain, handlers)
def run_server():
''' run the tornado web server '''
tornado.ioloop.IOLoop.instance().start()
def authenticate(username, password):
'''
Authenticate the credentials.
:param username: Supplied username
:param password: Supplied password
'''
if AUTH_USER is None:
return True
return username == AUTH_USER and password == AUTH_PASS
@basic_auth(authenticate)
class AbstractHandler(tornado.web.RequestHandler):
''' An abstract request handler with authentication '''
def get(self):
''' Get handler '''
self.write('Polyglot is Running')
self.finish()
def data_received(self, chunk):
''' Overwriting abstract method. '''
pass
@basic_auth(authenticate)
class AuthStaticFileHandler(tornado.web.StaticFileHandler):
""" Static file handler with authentication. """
def data_received(self, chunk):
""" Overwriting abstract method. """
pass
|
batching.py
|
# Copyright 2021 Cortex Labs, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import threading as td
import time
import traceback
from collections import defaultdict
from http import HTTPStatus
from typing import Any, Callable, Dict, List
from starlette.responses import Response
from ..exceptions import UserRuntimeException
from ..log import logger
class DynamicBatcher:
def __init__(self, predictor_impl: Callable, max_batch_size: int, batch_interval: int):
self.predictor_impl = predictor_impl
self.batch_max_size = max_batch_size
self.batch_interval = batch_interval # measured in seconds
# waiter prevents new threads from modifying the input batch while a batch prediction is in progress
self.waiter = td.Event()
self.waiter.set()
self.barrier = td.Barrier(self.batch_max_size + 1, action=self.waiter.clear)
self.samples = {}
self.predictions = {}
td.Thread(target=self._batch_engine).start()
def _batch_engine(self):
while True:
if len(self.predictions) > 0:
time.sleep(0.001)
continue
try:
self.barrier.wait(self.batch_interval)
except td.BrokenBarrierError:
pass
self.predictions = {}
try:
if self.samples:
batch = self._make_batch(self.samples)
predictions = self.predictor_impl.predict(**batch)
if not isinstance(predictions, list):
raise UserRuntimeException(
f"please return a list when using server side batching, got {type(predictions)}"
)
self.predictions = dict(zip(self.samples.keys(), predictions))
except Exception as e:
self.predictions = {thread_id: e for thread_id in self.samples}
logger.error(traceback.format_exc())
finally:
self.samples = {}
self.barrier.reset()
self.waiter.set()
@staticmethod
def _make_batch(samples: Dict[int, Dict[str, Any]]) -> Dict[str, List[Any]]:
batched_samples = defaultdict(list)
for thread_id in samples:
for key, sample in samples[thread_id].items():
batched_samples[key].append(sample)
return dict(batched_samples)
def _enqueue_request(self, **kwargs):
"""
Enqueue sample for batch inference. This is a blocking method.
"""
thread_id = td.get_ident()
self.waiter.wait()
self.samples[thread_id] = kwargs
try:
self.barrier.wait()
except td.BrokenBarrierError:
pass
def predict(self, **kwargs):
"""
Queues a request to be batched with other incoming request, waits for the response
and returns the prediction result. This is a blocking method.
"""
self._enqueue_request(**kwargs)
prediction = self._get_prediction()
return prediction
def _get_prediction(self) -> Any:
"""
Return the prediction. This is a blocking method.
"""
thread_id = td.get_ident()
while thread_id not in self.predictions:
time.sleep(0.001)
prediction = self.predictions[thread_id]
del self.predictions[thread_id]
if isinstance(prediction, Exception):
return Response(
content=str(prediction),
status_code=HTTPStatus.INTERNAL_SERVER_ERROR,
media_type="text/plain",
)
return prediction
|
test_pool.py
|
#!/usr/bin/env python
# --coding:utf-8--
# Copyright (c) 2020 vesoft inc. All rights reserved.
#
# This source code is licensed under Apache 2.0 License,
# attached with Common Clause Condition 1.0, found in the LICENSES directory.
import sys
import os
import threading
import time
current_dir = os.path.dirname(os.path.abspath(__file__))
root_dir = os.path.join(current_dir, '..')
sys.path.insert(0, root_dir)
from unittest import TestCase
from nebula2.gclient.net import ConnectionPool
from nebula2.Config import Config
from nebula2.Exception import (
NotValidConnectionException,
InValidHostname,
IOErrorException
)
class TestConnectionPool(TestCase):
@classmethod
def setup_class(self):
self.addresses = list()
self.addresses.append(('127.0.0.1', 9669))
self.addresses.append(('127.0.0.1', 9670))
self.configs = Config()
self.configs.min_connection_pool_size = 2
self.configs.max_connection_pool_size = 4
self.pool = ConnectionPool()
assert self.pool.init(self.addresses, self.configs)
assert self.pool.connnects() == 2
def test_right_hostname(self):
pool = ConnectionPool()
assert pool.init([('localhost', 9669)], Config())
def test_wrong_hostname(self):
pool = ConnectionPool()
try:
pool.init([('wrong_host', 9669)], Config())
assert False
except InValidHostname:
assert True
def test_ping(self):
assert self.pool.ping(('127.0.0.1', 9669))
assert self.pool.ping(('127.0.0.1', 5000)) is False
def test_init_failed(self):
# init succeeded
pool1 = ConnectionPool()
addresses = list()
addresses.append(('127.0.0.1', 9669))
addresses.append(('127.0.0.1', 9670))
assert pool1.init(addresses, Config())
# init failed, connected failed
pool2 = ConnectionPool()
addresses = list()
addresses.append(('127.0.0.1', 3800))
try:
pool2.init(addresses, Config())
assert False
except Exception:
assert True
# init failed, hostname not existed
try:
pool3 = ConnectionPool()
addresses = list()
addresses.append(('not_exist_hostname', 3800))
assert not pool3.init(addresses, Config())
except InValidHostname:
assert True, "We expected get the exception"
def test_get_session(self):
# get session succeeded
sessions = list()
for num in range(0, self.configs.max_connection_pool_size):
session = self.pool.get_session('root', 'nebula')
resp = session.execute('SHOW SPACES')
assert resp.is_succeeded()
sessions.append(session)
# get session failed
try:
self.pool.get_session('root', 'nebula')
except NotValidConnectionException:
assert True
assert self.pool.in_used_connects() == 4
# release session
for session in sessions:
session.release()
assert self.pool.in_used_connects() == 0
# test get session after release
for num in range(0, self.configs.max_connection_pool_size - 1):
session = self.pool.get_session('root', 'nebula')
resp = session.execute('SHOW SPACES')
assert resp.is_succeeded()
sessions.append(session)
assert self.pool.in_used_connects() == 3
def test_stop_close(self):
session = self.pool.get_session('root', 'nebula')
assert session is not None
resp = session.execute('SHOW SPACES')
assert resp.is_succeeded()
self.pool.close()
try:
new_session = self.pool.get_session('root', 'nebula')
except NotValidConnectionException:
assert True
except Exception as e:
assert False, "We don't expect reach here:{}".format(e)
try:
session.execute('SHOW SPACES')
except IOErrorException:
assert True
except Exception as e:
assert False, "We don't expect reach here:".format(e)
def test_multi_thread():
# Test multi thread
addresses = [('127.0.0.1', 9669), ('127.0.0.1', 9670)]
configs = Config()
configs.max_connection_pool_size = 4
pool = ConnectionPool()
assert pool.init(addresses, configs)
global success_flag
success_flag = True
def main_test():
session = None
global success_flag
try:
session = pool.get_session('root', 'nebula')
if session is None:
success_flag = False
return
space_name = 'space_' + threading.current_thread().getName()
session.execute('DROP SPACE %s' % space_name)
resp = session.execute('CREATE SPACE IF NOT EXISTS %s' % space_name)
if not resp.is_succeeded():
raise RuntimeError('CREATE SPACE failed: {}'.format(resp.error_msg()))
time.sleep(3)
resp = session.execute('USE %s' % space_name)
if not resp.is_succeeded():
raise RuntimeError('USE SPACE failed:{}'.format(resp.error_msg()))
except Exception as x:
print(x)
success_flag = False
return
finally:
if session is not None:
session.release()
thread1 = threading.Thread(target=main_test, name='thread1')
thread2 = threading.Thread(target=main_test, name='thread2')
thread3 = threading.Thread(target=main_test, name='thread3')
thread4 = threading.Thread(target=main_test, name='thread4')
thread1.start()
thread2.start()
thread3.start()
thread4.start()
thread1.join()
thread2.join()
thread3.join()
thread4.join()
pool.close()
assert success_flag
|
mplsmall_main.py
|
from PyQt5.QtWidgets import *
import matplotlib.pyplot as plt
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from PyQt5 import QtCore
import threading
from PyQt5.Qt import QPoint, QRect
class mplsmall_main(QWidget):
def __init__(self,parent=None):
QWidget.__init__(self)
self.setMaximumHeight(60)
self.verticalLayout = QVBoxLayout(self)
self.verticalLayout.setContentsMargins(0, 0, 0, 0)
self.verticalLayout.setSpacing(0)
self.scrollArea = QScrollArea(self)
self.scrollArea.setWidgetResizable(False)
self.scrollArea.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.scrollArea.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.figure = plt.figure() #QWidget //FigureCanvas//Figure// subplot
self.axes = self.figure.add_subplot(1, 1, 1)
self.canvas = FigureCanvas(self.figure) # FigureCanvas//Figure// called canvas
self.figure.set_facecolor("black")
self.scrollArea.setSizePolicy(QSizePolicy.Minimum,QSizePolicy.Maximum)
self.scrollArea.setContentsMargins(0, 0, 0, 0)
self.scrollArea.setStyleSheet("border:0px;padding:0px")
self.scrollArea.setWidget(self.canvas)
self.verticalLayout.addWidget(self.scrollArea)
self.canvas.mpl_connect('draw_event',self.On_Canvas_drawn)
self.axes_list=[]
self.plot_list=[]
self.line_list=[]
#self.canvas.setFocusPolicy( QtCore.Qt.ClickFocus )
#self.canvas.mpl_connect('key_press_event', self.mpEvent)
self.canvas.mpl_connect('button_press_event', self.mouse_pressed)
self.canvas.mpl_connect('button_release_event', self.mouse_released)
self.canvas.mpl_connect('motion_notify_event',self.mouse_in_motion)
self.is_clicked=False
self.green_clicked=False
self.rescale_done_by_selection=False
#####################################events############################################
def resizeEvent(self,event):
QWidget.resizeEvent(self, event)
print("small Win resized")
def showEvent(self, event):
QWidget.showEvent(self, event)
print("shown_small")
def On_Canvas_drawn(self,draw_event):
print("Draw_evt_on small")
pass
def mouse_pressed(self, e):
if self.main.small_view_start_pos<e.x and e.x<self.main.small_view_end_pos:
#print("clk_inside")
self.click_pos=e.x
self.Scroll_val_at_clicked=self.main.MplWidget.scrollArea.horizontalScrollBar().value()
main_c_width=self.main.MplWidget.canvas.size().width()
self.width_ratio=main_c_width/self.canvas.size().width()
self.is_clicked=True
#print("CLICKED_VAL:",self.Scroll_val_at_clicked)
elif e.x>self.main.CoordMin and e.x<self.main.CoordMax and self.main.plotted_out_of_range:
self.canvas_width=self.canvas.size().width()
self.click_pos=e.x
self.green_clicked=True
def mouse_released(self,e):
self.is_clicked=False
if self.green_clicked==True:
mid_pt=((self.main.CoordMin+self.main.CoordMax)/2)+self.change_in_pos###start_pos+(end_pos-start_pos)/2
self.reseted_mid=self.main.mplsmall_px2pt.transform((mid_pt,0))[0]
self.green_clicked=False
self.rescale_done_by_selection=True
if hasattr(self, "scale_thread")==True:
if self.scale_thread.is_alive():
self.scale_thread.cancel()
self.scale_thread=threading.Thread(target=self.main.rescale_x)
self.scale_thread.start()
def mouse_in_motion(self,e):
if self.is_clicked==True:
#print("CLICKED_VAL:",self.Scroll_val_at_clicked,"pos:",e.x,"change_in_pos:",(e.x-self.click_pos))
change_in_pos=(e.x-self.click_pos)*self.width_ratio
self.main.MplWidget.scrollArea.horizontalScrollBar().setValue(self.Scroll_val_at_clicked+change_in_pos)
elif self.green_clicked==True:
change_in_pos=(e.x-self.click_pos)
if (self.main.CoordMin+change_in_pos)>0 and (self.main.CoordMax+change_in_pos)<self.canvas_width:
self.change_in_pos=change_in_pos
self.rubberbands_draw_shifted(self.change_in_pos)
#######################################PLOT#################################################
def addplot_(self,x_,y_,ch_color,xlimit):
ax=self.axes.twiny()
plot_ = ax.plot(x_,y_,color=ch_color)
ax.set_yticklabels([])
ax.set(xlim=xlimit,autoscale_on=False)
i=0
for i in range(len(self.axes_list)):
self.axes_list[i].set(xlim=xlimit,autoscale_on=False)
i+=1
self.axes.set(xlim=xlimit,autoscale_on=False)
self.axes_list.append(ax)
self.plot_list.append(plot_)
self.line_list.append(plot_[0])
def change_color(self,lineno,col,):
self.axes_list[lineno].get_lines()[lineno].set_color(col)
def edit_plot(self,plot_num,x_,y_,ch_color,x_limit):
self.axes_list[plot_num].clear()
plot_=self.axes_list[plot_num].plot(x_,y_, color=ch_color)
self.axes_list[plot_num].set_yticklabels([])
if self.main.plotted_out_of_range==False:
self.axes_list[plot_num].set_xlim(x_limit[0],x_limit[1])
self.axes.set_xlim(x_limit[0],x_limit[1])
else:
self.axes_list[plot_num].set_xlim(self.axes.get_xlim())
#print("small limit:",x_limit[0],x_limit[1])
self.plot_list[plot_num]=plot_
self.line_list[plot_num]=plot_[0]
def rem_plot_0(self):
self.axes_list[0].clear()
self.axes_list=[]
self.plot_list=[]
self.line_list=[]
def refresh_plot(self):
draw_thrd=threading.Thread(target=self.canvas.draw_idle()).start()
def init_fig(self,parent):
self.main=parent
#self.axes = self.figure.add_subplot(1, 1, 1)
self.axes.set_in_layout(False)
self.axes.patch.set_facecolor('xkcd:black')
self.axes.set_yticklabels([])
self.figure.tight_layout(pad=0, w_pad=None, h_pad=0)
self.axes.set_ylim(self.main.main_ylim)
############################################Redraw_Rubberbands########################################
def rubberbands_draw_shifted(self,ch_in_pos):
self.main.rubberBand_red.hide()
self.main.rubberBand_red1.hide()
self.main.rubberBand.setGeometry(QRect(QPoint(int(self.main.CoordMin+self.change_in_pos),0),QPoint(int(self.main.small_view_start_pos+self.change_in_pos),60)))
self.main.rubberBand1.setGeometry(QRect(QPoint(int(self.main.small_view_end_pos+self.change_in_pos),0),QPoint(int(self.main.CoordMax+self.change_in_pos),60)))
#changelog
#changed on plot rescaled to main plot when rescaled out of range which is unwanted in case of smallplot
'''def edit_plot(self,plot_num,x_,y_,ch_color,x_limit):
self.axes_list[plot_num].clear()
plot_=self.axes_list[plot_num].plot(x_,y_, color=ch_color)
self.axes_list[plot_num].set_yticklabels([])
self.axes_list[plot_num].set_xlim(x_limit[0],x_limit[1])
self.axes.set_xlim(x_limit[0],x_limit[1])
print("small limit:",x_limit[0],x_limit[1])'''
|
socks5_server.py
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
__created__ = '2018/03/09'
__author__ = 'aspiral'
import logging
import platform
import struct
from socket import *
from selectors import *
import select
import threading
SOCKS_VERSION_5 = b'\x05'
METHOD_NO_AUTH = b'\x00'
METHOD_GSSAPI = b'\x01'
METHOD_USERPWD = b'\x02'
METHOD_IANA = b'\x03'
METHOD_PERSONAL = b'\x80'
METHOD_FAILURE = b'\xFF'
CMD_CONNECT = b'\x01'
CMD_BIND = b'\x02'
CMD_UDP = b'\x03'
RSV = b'\x00'
ATYP_IPV4 = b'\x01'
ATYP_HOSTNAME = b'\x03'
ATYP_IPV6 = b'\x04'
REP_SUCCESS = b'\x00'
REP_FAILURE = b'\x01'
REP_RULE_UNCONNECT = b'\x02'
REP_NET_NOT_ARRIVE = b'\x03'
REP_HOST_NOT_ARRIVE = b'\x04'
REP_CONNECT_REFUSE = b'\x05'
REP_TTL_TIMEOUT = b'\x06'
REP_UNAVAILABLE_CMD = b'\x07'
REP_UNAVAILABLE_ATYP = b'\x08'
# 根据系统判断日志存储的路径 windows 存储在上级目录的.log中
# Linux等系统存储在log中统一管理
if platform.system() == 'Windows':
filename = '.log/socks5_server.log'
else:
filename = '.log/socks5_server.log'
# 格式化日志的格式
FORMAT = '[%(asctime)s] [%(levelname)s] [%(thread)d] %(message)s'
logging.basicConfig(filename=filename, level=logging.INFO, format=FORMAT)
logger = logging.getLogger(__name__)
# 发送所有数据
def send_all(sock, data):
"""
发送数据
:param sock: 发送数据的目标地址连接对象
:param data: 发送的数据
"""
bytes_sent = 0
while True:
r = sock.send(data[bytes_sent:])
if r < 0:
return r
bytes_sent += r
if bytes_sent == len(data):
return bytes_sent
def handle_tcp(client, remote, client_ip, remote_ip):
"""
客户端与目标地址的数据传输
:param client: client对象
:param remote: remote对象
:param client_ip: 客户端IPV4地址
:param remote_ip: 目标IPV4地址
:return:
"""
logger.info("Sending data between %s and %s !!!" % (client_ip, remote_ip))
try:
fds = [client, remote]
while True:
r, w, e = select.select(fds, [], [])
if client in r:
cli_data = client.recv(1024 * 100)
if len(cli_data) <= 0:
break
result = send_all(remote, cli_data)
if result < len(cli_data):
logger.error("Failed pipping all data to %s !!!" % remote_ip)
break
if remote in r:
remote_data = remote.recv(1024 * 100)
if len(remote_data) <= 0:
break
result = send_all(client, remote_data)
if result < len(remote_data):
logger.error("Failed pipping all data to %s !!!" % client_ip)
break
finally:
client.close()
remote.close()
logger.info("Piping data between %s and %s are done!" % (client_ip, remote_ip))
def handle_client_connect(conn):
"""
处理连接请求
:param conn:
:return:
"""
client, client_addr = conn.accept()
logger.info("Client connected with IP: %s", client_addr)
# 获取socks版本 methods的长度 METHODS是客户端支持的认证方式列表
ver, methods = client.recv(1), client.recv(1)
# 根据methods的长度获取methods
methods = client.recv(ord(methods))
# 发送给客户端当前服务端版本和支持的验证类型
client.send(SOCKS_VERSION_5 + METHOD_NO_AUTH)
# 服务端接收socks版本 cmd命令码 ip类型
ver, cmd, rsv, atype = client.recv(1), client.recv(1), client.recv(1), client.recv(1)
# 如果cmd不是connect请求
if ord(cmd) is not 1:
client.close()
return
# 如果地址类型是IPV4
if ord(atype) == 1:
remote_addr = inet_ntoa(client.recv(4))
remote_port = struct.unpack(">H", client.recv(2))[0]
# 如果地址类型是域名
elif ord(atype) == 3:
addr_len = ord(client.recv(1))
remote_addr = client.recv(addr_len)
remote_port = struct.unpack(">H", client.recv(2))[0]
# 其他情况拒绝连接
else:
reply = SOCKS_VERSION_5 + REP_UNAVAILABLE_ATYP + RSV + ATYP_IPV4 + inet_aton(
"0.0.0.0") + struct.pack(">H", 2222)
logger.info("IP version is not IPV4, refused connect......")
client.send(reply)
client.close()
return
remote = socket(AF_INET, SOCK_STREAM)
try:
remote.connect((remote_addr, remote_port))
except TimeoutError:
logger.error("Connect time out with remote addr : %s " % remote_addr)
logger.info("Connect to remote addr : %s:%s" % (remote_addr, remote_port))
# 请求成功 继续发送数据
reply = SOCKS_VERSION_5 + REP_SUCCESS + RSV + ATYP_IPV4 + inet_aton(
"0.0.0.0") + struct.pack(">H", 2222)
client.send(reply)
# 在客户端和服务端之间传输数据
handle_tcp(client, remote, client_addr, remote_addr)
def start_server(ip_addr: str, ip_port: int):
"""
:param ip_addr:绑定的IP地址
:param ip_port:绑定的IP端口
:return: 返回server对象
"""
server = socket(AF_INET, SOCK_STREAM)
server.bind((ip_addr, ip_port))
server.listen(1000)
logger.info("Socks5 server listening on server: %s : %d ......" % (ip_addr, ip_port))
return server
def thread_socks_connect(conn):
"""
开启新线程处理连接
:param conn: 一个连接
:return:
"""
t = threading.Thread(target=handle_client_connect, args=(conn,))
t.start()
logger.info("Socks start a new session!")
def loop_forever(server):
"""
IO多路复用
:param server: 正在监听的服务
:return:
"""
selector = DefaultSelector()
selector.register(server, EVENT_READ, thread_socks_connect)
while True:
ready = selector.select()
for key, event in ready:
callback = key.data
callback(key.fileobj)
if __name__ == '__main__':
started_server = start_server('0.0.0.0', 10030)
loop_forever(started_server)
|
Hiwin_RT605_ArmCommand_Socket_20190627174605.py
|
#!/usr/bin/env python3
# license removed for brevity
import rospy
import os
import socket
##多執行序
import threading
import time
import sys
import matplotlib as plot
import HiwinRA605_socket_TCPcmd as TCP
import HiwinRA605_socket_Taskcmd as Taskcmd
import numpy as np
from std_msgs.msg import String
from ROS_Socket.srv import *
from ROS_Socket.msg import *
from std_msgs.msg import Int32MultiArray
import math
import enum
Socket = 0
data = '0' #設定傳輸資料初始值
Arm_feedback = 1 #假設手臂忙碌
NAME = 'socket_server'
arm_mode_flag = False
##------------class pos-------
class point():
def __init__(self, x, y, z, pitch, roll, yaw):
self.x = x
self.y = y
self.z = z
self.pitch = pitch
self.roll = roll
self.yaw = yaw
pos = point(0.0,36.8,11.35,-90.0,0.0,0.0)
##------------class socket_cmd---------
class socket_data():
def __init__(self, grip, setvel, ra, delay, setboth, action,Speedmode):
self.grip = grip
self.setvel = setvel
self.ra = ra
self.delay = delay
self.setboth = setboth
self.action = action
self.Speedmode = Speedmode
socket_cmd = socket_data(0,0.0,0,0,0,0,0)
##-----------switch define------------##
class switch(object):
def __init__(self, value):
self.value = value
self.fall = False
def __iter__(self):
"""Return the match method once, then stop"""
yield self.match
raise StopIteration
def match(self, *args):
"""Indicate whether or not to enter a case suite"""
if self.fall or not args:
return True
elif self.value in args: # changed for v1.5, see below
self.fall = True
return True
else:
return False
##-----------client feedback arm state----------
class StateFeedback():
def __init__(self,ArmState,SentFlag):
self.ArmState = ArmState
self.SentFlag = SentFlag
state_feedback = StateFeedback(0,0)
def point_data(x,y,z,pitch,roll,yaw): ##接收策略端傳送位姿資料
pos.x = '%s'%x
pos.y = '%s'%y
pos.z = '%s'%z
pos.pitch = '%s'%pitch
pos.roll = '%s'%roll
pos.yaw = '%s'%yaw
##----------Arm Mode-------------###
def Arm_Mode(action,grip,ra,setvel,setboth): ##接收策略端傳送手臂模式資料
global arm_mode_flag
socket_cmd.action = int('%s'%action)
socket_cmd.grip = int('%s'%grip)
socket_cmd.ra = int('%s'%ra)
socket_cmd.setvel = int('%s'%setvel)
socket_cmd.setboth = int('%s'%setboth)
arm_mode_flag = True
#print("sssss:",socket_cmd.action)
#Socket_command()
##-------Arm Speed Mode------------###
def Speed_Mode(speedmode): ##接收策略端傳送手臂模式資料
global speed_mode_flag
socket_cmd.Speedmode = speedmode
# def Grip_Mode(req): ##接收策略端傳送夾爪動作資料
# socket_cmd.grip = int('%s'%req.grip)
# return(1)
def socket_talker(): ##創建Server node
pub = rospy.Publisher('chatter', Int32MultiArray, queue_size=10)
rospy.init_node(NAME)
rate = rospy.Rate(10) # 10hz
print ("Ready to connect")
while not rospy.is_shutdown():
# hello_str = "hello world %s" % rospy.get_time()
state = Int32MultiArray()
state.data = [state_feedback.ArmState,state_feedback.SentFlag]
# rospy.loginfo(state)
pub.publish(state)
rate.sleep()
# a = rospy.Service('arm_mode',arm_mode, Arm_Mode) ##server arm mode data
# s = rospy.Service('arm_pos',arm_data, point_data) ##server arm point data
# b = rospy.Service('speed_mode',speed_mode, Speed_Mode) ##server speed mode data
#c = rospy.Service('grip_mode',grip_mode, Grip_Mode) ##server grip mode data
#print ("Ready to connect")
#rospy.spin() ## spin one
##------------server 端 end-------
##----------socket 封包傳輸--------------##
##---------------socket 傳輸手臂命令-----------------
def Socket_command():
global Socket,arm_mode_flag
if arm_mode_flag == True:
arm_mode_flag = False
for case in switch(socket_cmd.action):
#-------PtP Mode--------
if case(Taskcmd.Action_Type.PtoP):
for case in switch(socket_cmd.setboth):
if case(Taskcmd.Ctrl_Mode.CTRL_POS):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_EULER):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_BOTH):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
break
#-------Line Mode--------
if case(Taskcmd.Action_Type.Line):
for case in switch(socket_cmd.setboth):
if case(Taskcmd.Ctrl_Mode.CTRL_POS):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_EULER):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel )
break
if case(Taskcmd.Ctrl_Mode.CTRL_BOTH):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel )
break
break
#-------設定手臂速度--------
if case(Taskcmd.Action_Type.SetVel):
data = TCP.SetVel(socket_cmd.grip, socket_cmd.setvel)
break
#-------設定手臂Delay時間--------
if case(Taskcmd.Action_Type.Delay):
data = TCP.SetDelay(socket_cmd.grip,0)
break
#-------設定手臂急速&安全模式--------
if case(Taskcmd.Action_Type.Mode):
data = TCP.Set_SpeedMode(socket_cmd.grip,socket_cmd.Speedmode)
break
socket_cmd.action= 5 ##切換初始mode狀態
#print(data)
Socket.send(data.encode('utf-8'))#socket傳送for python to translate str
# Socket_sent_flag = True
# socket_client_sent_flag(Socket_sent_flag)
##-----------socket client--------
def socket_client():
global Socket
try:
Socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
Socket.connect(('192.168.0.1', 8080))#iclab 5 & iclab hiwin
#s.connect(('192.168.1.102', 8080))#iclab computerx
except socket.error as msg:
print(msg)
sys.exit(1)
print('Connection has been successful')
print(Socket.recv(1024))
while 1:
feedback_str = Socket.recv(1024)
#手臂端傳送手臂狀態
if str(feedback_str[2]) == '48':# F 手臂為Ready狀態準備接收下一個運動指令
state_feedback.ArmState = 0
# Arm_feedback = 0
# socket_client_arm_state(Arm_feedback)
#print("isbusy false")
if str(feedback_str[2]) == '49':# T 手臂為忙碌狀態無法執行下一個運動指令
state_feedback.ArmState = 1
# Arm_feedback = 1
# socket_client_arm_state(Arm_feedback)
#print("isbusy true")
if str(feedback_str[2]) == '54':# 6 策略完成
state_feedback.ArmState = 6
# Arm_feedback = 6
# socket_client_arm_state(Arm_feedback)
print("shutdown")
#確認傳送旗標
if str(feedback_str[4]) == '48':#回傳0 false
state_feedback.SentFlag = 0
# Socket_sent_flag = False
# socket_client_sent_flag(Socket_sent_flag)
if str(feedback_str[4]) == '49':#回傳1 true
state_feedback.SentFlag = 1
# Socket_sent_flag = True
# socket_client_sent_flag(Socket_sent_flag)
##---------------socket 傳輸手臂命令 end-----------------
if Arm_feedback == Taskcmd.Arm_feedback_Type.shutdown:
break
rospy.on_shutdown(myhook)
Socket.close()
##-----------socket client end--------
##-------------socket 封包傳輸 end--------------##
## 多執行緒
def thread_test():
socket_client()
## 多執行序 end
def myhook():
print ("shutdown time!")
if __name__ == '__main__':
socket_cmd.action = 5##切換初始mode狀態
t = threading.Thread(target=thread_test)
t.start() # 開啟多執行緒
try:
socket_talker()
except rospy.ROSInterruptException:
pass
t.join()
|
make.py
|
import os
import glob
import time
import shutil
import bpy
import json
import stat
from bpy.props import *
import subprocess
import threading
import webbrowser
import arm.utils
import arm.write_data as write_data
import arm.make_logic as make_logic
import arm.make_renderpath as make_renderpath
import arm.make_world as make_world
import arm.make_state as state
import arm.assets as assets
import arm.log as log
import arm.lib.make_datas
import arm.lib.server
from arm.exporter import ArmoryExporter
import time
try:
import barmory
except ImportError:
pass
exporter = ArmoryExporter()
scripts_mtime = 0 # Monitor source changes
code_parsed = False
profile_time = 0
def compile_shader_pass(res, raw_shaders_path, shader_name, defs):
os.chdir(raw_shaders_path + '/' + shader_name)
# Open json file
json_name = shader_name + '.json'
with open(json_name) as f:
json_file = f.read()
json_data = json.loads(json_file)
fp = arm.utils.get_fp_build()
arm.lib.make_datas.make(res, shader_name, json_data, fp, defs)
path = fp + '/compiled/Shaders'
c = json_data['contexts'][0]
for s in ['vertex_shader', 'fragment_shader', 'geometry_shader', 'tesscontrol_shader', 'tesseval_shader']:
if s in c:
shutil.copy(c[s], path + '/' + c[s].split('/')[-1])
def remove_readonly(func, path, excinfo):
os.chmod(path, stat.S_IWRITE)
func(path)
def export_data(fp, sdk_path, is_play=False, is_publish=False, in_viewport=False):
global exporter
wrd = bpy.data.worlds['Arm']
print('\nArmory v{0} ({1})'.format(wrd.arm_version, wrd.arm_commit))
print('OS: ' + arm.utils.get_os() + ', Target: ' + state.target + ', GAPI: ' + arm.utils.get_gapi())
# Clean compiled variants if cache is disabled
build_dir = arm.utils.get_fp_build()
if wrd.arm_cache_shaders == False:
if os.path.isdir(build_dir + '/debug/html5-resources'):
shutil.rmtree(build_dir + '/debug/html5-resources', onerror=remove_readonly)
if os.path.isdir(build_dir + '/krom-resources'):
shutil.rmtree(build_dir + '/krom-resources', onerror=remove_readonly)
if os.path.isdir(build_dir + '/debug/krom-resources'):
shutil.rmtree(build_dir + '/debug/krom-resources', onerror=remove_readonly)
if os.path.isdir(build_dir + '/windows-resources'):
shutil.rmtree(build_dir + '/windows-resources', onerror=remove_readonly)
if os.path.isdir(build_dir + '/linux-resources'):
shutil.rmtree(build_dir + '/linux-resources', onerror=remove_readonly)
if os.path.isdir(build_dir + '/osx-resources'):
shutil.rmtree(build_dir + '/osx-resources', onerror=remove_readonly)
if os.path.isdir(build_dir + '/compiled/Shaders'):
shutil.rmtree(build_dir + '/compiled/Shaders', onerror=remove_readonly)
# Detect camera plane changes
if len(bpy.data.cameras) > 0:
cam = bpy.data.cameras[0]
if state.last_clip_start == 0:
state.last_clip_start = cam.clip_start
state.last_clip_end = cam.clip_end
elif cam.clip_start != state.last_clip_start or cam.clip_end != state.last_clip_end:
if os.path.isdir(build_dir + '/compiled/Shaders'):
shutil.rmtree(build_dir + '/compiled/Shaders', onerror=remove_readonly)
state.last_clip_start = cam.clip_start
state.last_clip_end = cam.clip_end
raw_shaders_path = sdk_path + 'armory/Shaders/'
assets_path = sdk_path + 'armory/Assets/'
export_physics = bpy.data.worlds['Arm'].arm_physics != 'Disabled'
export_navigation = bpy.data.worlds['Arm'].arm_navigation != 'Disabled'
export_ui = bpy.data.worlds['Arm'].arm_ui != 'Disabled'
assets.reset()
# Build node trees
ArmoryExporter.import_traits = []
make_logic.build()
make_world.build()
make_renderpath.build()
# Export scene data
assets.embedded_data = sorted(list(set(assets.embedded_data)))
physics_found = False
navigation_found = False
ui_found = False
ArmoryExporter.compress_enabled = is_publish and wrd.arm_asset_compression
ArmoryExporter.in_viewport = in_viewport
for scene in bpy.data.scenes:
if scene.arm_export:
ext = '.zip' if (scene.arm_compress and is_publish) else '.arm'
asset_path = build_dir + '/compiled/Assets/' + arm.utils.safestr(scene.name) + ext
exporter.execute(bpy.context, asset_path, scene=scene, write_capture_info=state.is_render_anim, play_area=state.play_area)
if ArmoryExporter.export_physics:
physics_found = True
if ArmoryExporter.export_navigation:
navigation_found = True
if ArmoryExporter.export_ui:
ui_found = True
assets.add(asset_path)
if physics_found == False: # Disable physics if no rigid body is exported
export_physics = False
if navigation_found == False:
export_navigation = False
if ui_found == False:
export_ui = False
if wrd.arm_ui == 'Enabled':
export_ui = True
modules = []
if export_physics:
modules.append('physics')
if export_navigation:
modules.append('navigation')
if export_ui:
modules.append('ui')
print('Exported modules: ' + str(modules))
defs = arm.utils.def_strings_to_array(wrd.world_defs)
cdefs = arm.utils.def_strings_to_array(wrd.compo_defs)
print('Shader flags: ' + str(defs))
# Write compiled.glsl
shaders_path = build_dir + '/compiled/Shaders'
if not os.path.exists(shaders_path):
os.makedirs(shaders_path)
write_data.write_compiledglsl(defs + cdefs)
# Write referenced shader passes
if not os.path.isfile(build_dir + '/compiled/Shaders/shader_datas.arm') or state.last_world_defs != wrd.world_defs:
res = {}
res['shader_datas'] = []
for ref in assets.shader_passes:
# Ensure shader pass source exists
if not os.path.exists(raw_shaders_path + '/' + ref):
continue
assets.shader_passes_assets[ref] = []
if ref.startswith('compositor_pass'):
compile_shader_pass(res, raw_shaders_path, ref, defs + cdefs)
# elif ref.startswith('grease_pencil'):
# compile_shader_pass(res, raw_shaders_path, ref, [])
else:
compile_shader_pass(res, raw_shaders_path, ref, defs)
arm.utils.write_arm(shaders_path + '/shader_datas.arm', res)
for ref in assets.shader_passes:
for s in assets.shader_passes_assets[ref]:
assets.add_shader(shaders_path + '/' + s + '.glsl')
for file in assets.shaders_external:
name = file.split('/')[-1].split('\\')[-1]
target = build_dir + '/compiled/Shaders/' + name
if not os.path.exists(target):
shutil.copy(file, target)
state.last_world_defs = wrd.world_defs
# Reset path
os.chdir(fp)
# Copy std shaders
if not os.path.isdir(build_dir + '/compiled/Shaders/std'):
shutil.copytree(raw_shaders_path + 'std', build_dir + '/compiled/Shaders/std')
# Write khafile.js
enable_dce = is_publish and wrd.arm_dce
import_logic = not is_publish and arm.utils.logic_editor_space() != None
write_data.write_khafilejs(is_play, export_physics, export_navigation, export_ui, is_publish, enable_dce, in_viewport, ArmoryExporter.import_traits, import_logic)
# Write Main.hx - depends on write_khafilejs for writing number of assets
scene_name = arm.utils.get_project_scene_name()
resx, resy = arm.utils.get_render_resolution(arm.utils.get_active_scene())
# Import all logic nodes for patching if logic is being edited
if wrd.arm_write_config:
write_data.write_config(resx, resy)
write_data.write_main(scene_name, resx, resy, is_play, in_viewport, is_publish)
if scene_name != state.last_scene or resx != state.last_resx or resy != state.last_resy:
wrd.arm_recompile = True
state.last_resx = resx
state.last_resy = resy
state.last_scene = scene_name
def compile_project(target_name=None, watch=False, patch=False, no_project_file=False):
"""
:param no_project_file: Pass '--noproject' to kha make. In the future assets will be copied.
"""
wrd = bpy.data.worlds['Arm']
fp = arm.utils.get_fp()
os.chdir(fp)
# Set build command
if target_name == None:
target_name = state.target
if target_name == 'native':
target_name = ''
node_path = arm.utils.get_node_path()
khamake_path = arm.utils.get_khamake_path()
kha_target_name = arm.utils.get_kha_target(target_name)
cmd = [node_path, khamake_path, kha_target_name]
ffmpeg_path = arm.utils.get_ffmpeg_path() # Path to binary
if ffmpeg_path != '':
cmd.append('--ffmpeg')
cmd.append(ffmpeg_path) # '"' + ffmpeg_path + '"'
if kha_target_name == 'krom':
cmd.append('-g')
cmd.append('opengl')
if state.in_viewport:
if arm.utils.glsl_version() >= 330:
cmd.append('--shaderversion')
cmd.append('330')
else:
cmd.append('--shaderversion')
cmd.append('110')
else:
cmd.append('-g')
cmd.append(arm.utils.get_gapi())
# Kha defaults to 110 on Linux
is_linux = arm.utils.get_os() == 'linux'
is_native = kha_target_name == 'krom' or kha_target_name == ''
if is_linux and is_native and not state.in_viewport and not arm.utils.get_legacy_shaders():
cmd.append('--shaderversion')
cmd.append('330')
if '_VR' in wrd.world_defs:
cmd.append('--vr')
cmd.append('webvr')
cmd.append('--to')
if (kha_target_name == 'krom' and not state.in_viewport and not state.is_publish) or (kha_target_name == 'html5' and not state.is_publish):
cmd.append(arm.utils.build_dir() + '/debug')
else:
cmd.append(arm.utils.build_dir())
# User defined commands
if wrd.arm_khamake != '':
for s in bpy.data.texts[wrd.arm_khamake].as_string().split(' '):
cmd.append(s)
if patch:
if state.compileproc == None:
cmd.append('--nohaxe')
cmd.append('--noproject')
print("Running: ", cmd)
state.compileproc = subprocess.Popen(cmd, stderr=subprocess.PIPE)
if state.playproc == None:
if state.in_viewport:
mode = 'play_viewport'
else:
mode = 'play'
else:
mode = 'build'
threading.Timer(0.1, watch_patch, [mode]).start()
return state.compileproc
elif watch:
print("Running: ", cmd)
state.compileproc = subprocess.Popen(cmd)
mode = 'publish' if state.is_publish else 'build'
threading.Timer(0.1, watch_compile, [mode]).start()
return state.compileproc
else:
if no_project_file:
cmd.append('--noproject')
print("Running: ", cmd)
return subprocess.Popen(cmd)
def build_project(is_play=False, is_publish=False, is_render=False, is_render_anim=False, in_viewport=False):
global profile_time
profile_time = time.time()
wrd = bpy.data.worlds['Arm']
state.is_render = is_render
state.is_render_anim = is_render_anim
state.is_publish = is_publish
state.in_viewport = in_viewport
# Save blend
if arm.utils.get_save_on_build() and not state.krom_running:
bpy.ops.wm.save_mainfile()
log.clear()
# Set camera in active scene
active_scene = arm.utils.get_active_scene()
if active_scene.camera == None:
for o in active_scene.objects:
if o.type == 'CAMERA':
active_scene.camera = o
break
# Get paths
sdk_path = arm.utils.get_sdk_path()
raw_shaders_path = sdk_path + '/armory/Shaders/'
# Set dir
fp = arm.utils.get_fp()
os.chdir(fp)
# Create directories
sources_path = 'Sources/' + arm.utils.safestr(wrd.arm_project_package)
if not os.path.exists(sources_path):
os.makedirs(sources_path)
# Save external scripts edited inside Blender
write_texts = False
for text in bpy.data.texts:
if text.filepath != '' and text.is_dirty:
write_texts = True
break
if write_texts:
area = bpy.context.area
old_type = area.type
area.type = 'TEXT_EDITOR'
for text in bpy.data.texts:
if text.filepath != '' and text.is_dirty and os.path.isfile(text.filepath):
area.spaces[0].text = text
bpy.ops.text.save()
area.type = old_type
# Save internal Haxe scripts
for text in bpy.data.texts:
if text.filepath == '' and text.name[-3:] == '.hx':
with open('Sources/' + arm.utils.safestr(wrd.arm_project_package) + '/' + text.name, 'w') as f:
f.write(text.as_string())
# Export data
export_data(fp, sdk_path, is_play=is_play, is_publish=is_publish, in_viewport=in_viewport)
if state.target == 'html5':
w, h = arm.utils.get_render_resolution(arm.utils.get_active_scene())
write_data.write_indexhtml(w, h, is_publish)
# Bundle files from include dir
if os.path.isdir('include'):
dest = '/html5/' if is_publish else '/debug/html5/'
for fn in glob.iglob(os.path.join('include', '**'), recursive=False):
shutil.copy(fn, arm.utils.build_dir() + dest + os.path.basename(fn))
if state.playproc == None:
log.print_progress(50)
def stop_project():
if state.playproc != None:
state.playproc.terminate()
state.playproc = None
def watch_play():
if state.playproc == None:
return
line = b''
while state.playproc != None and state.playproc.poll() == None:
char = state.playproc.stderr.read(1) # Read immediately one by one
if char == b'\n':
msg = str(line).split('"', 1) # Extract message
if len(msg) > 1:
trace = msg[1].rsplit('"', 1)[0]
log.krom_trace(trace)
line = b''
else:
line += char
state.playproc = None
state.playproc_finished = True
log.clear()
def watch_compile(mode):
state.compileproc.wait()
log.print_progress(100)
print('Finished in ' + str(time.time() - profile_time))
if state.compileproc == None: ##
return
result = state.compileproc.poll()
state.compileproc = None
state.compileproc_finished = True
if result == 0:
bpy.data.worlds['Arm'].arm_recompile = False
state.compileproc_success = True
on_compiled(mode)
else:
state.compileproc_success = False
log.print_info('Build failed, check console')
def watch_patch(mode):
state.compileproc.wait()
log.print_progress(100)
state.compileproc = None
state.compileproc_finished = True
on_compiled(mode)
def runtime_to_target(in_viewport):
wrd = bpy.data.worlds['Arm']
if in_viewport or wrd.arm_play_runtime == 'Krom':
return 'krom'
elif wrd.arm_play_runtime == 'Native':
return 'native'
else:
return 'html5'
def get_khajs_path(in_viewport, target):
if in_viewport:
return arm.utils.build_dir() + '/krom/krom.js'
elif target == 'krom':
return arm.utils.build_dir() + '/debug/krom/krom.js'
else: # Browser
return arm.utils.build_dir() + '/debug/html5/kha.js'
def play_project(in_viewport, is_render=False, is_render_anim=False):
global scripts_mtime
global code_parsed
wrd = bpy.data.worlds['Arm']
log.clear()
# Store area
if arm.utils.with_krom() and in_viewport and bpy.context.area != None and bpy.context.area.type == 'VIEW_3D':
state.play_area = bpy.context.area
state.target = runtime_to_target(in_viewport)
# Build data
build_project(is_play=True, is_render=is_render, is_render_anim=is_render_anim, in_viewport=in_viewport)
khajs_path = get_khajs_path(in_viewport, state.target)
if not wrd.arm_cache_compiler or \
not os.path.isfile(khajs_path) or \
assets.khafile_defs_last != assets.khafile_defs or \
state.last_target != state.target or \
state.last_in_viewport != state.in_viewport or \
state.target == 'native':
wrd.arm_recompile = True
state.last_target = state.target
state.last_in_viewport = state.in_viewport
if state.in_viewport:
if arm.utils.get_rp().rp_gi != 'Off' and bpy.app.version < (2, 80, 1):
log.warn('Use Blender 2.8 to run Voxel GI in viewport')
# Trait sources modified
state.mod_scripts = []
script_path = arm.utils.get_fp() + '/Sources/' + arm.utils.safestr(wrd.arm_project_package)
if os.path.isdir(script_path):
new_mtime = scripts_mtime
for fn in glob.iglob(os.path.join(script_path, '**', '*.hx'), recursive=True):
mtime = os.path.getmtime(fn)
if scripts_mtime < mtime:
arm.utils.fetch_script_props(fn) # Trait props
fn = fn.split('Sources/')[1]
fn = fn[:-3] #.hx
fn = fn.replace('/', '.')
state.mod_scripts.append(fn)
wrd.arm_recompile = True
if new_mtime < mtime:
new_mtime = mtime
scripts_mtime = new_mtime
if len(state.mod_scripts) > 0: # Trait props
arm.utils.fetch_trait_props()
# New compile requred - traits changed
if wrd.arm_recompile:
state.recompiled = True
if state.krom_running:
# TODO: Unable to live-patch, stop player
bpy.ops.arm.space_stop('EXEC_DEFAULT')
return
if not code_parsed:
code_parsed = True
barmory.parse_code()
else:
code_parsed = False
mode = 'play'
if state.target == 'native':
state.compileproc = compile_project(target_name='--run')
elif state.target == 'krom':
if in_viewport:
mode = 'play_viewport'
state.compileproc = compile_project(target_name='krom')
else: # Browser
state.compileproc = compile_project(target_name='html5')
threading.Timer(0.1, watch_compile, [mode]).start()
else: # kha.js up to date
state.recompiled = False
compile_project(patch=True)
def on_compiled(mode): # build, play, play_viewport, publish
log.clear()
wrd = bpy.data.worlds['Arm']
# Launch project in new window
if mode == 'play':
if wrd.arm_play_runtime == 'Browser':
# Start server
os.chdir(arm.utils.get_fp())
t = threading.Thread(name='localserver', target=arm.lib.server.run)
t.daemon = True
t.start()
html5_app_path = 'http://localhost:8040/' + arm.utils.build_dir() + '/debug/html5'
webbrowser.open(html5_app_path)
elif wrd.arm_play_runtime == 'Krom':
krom_location, krom_path = arm.utils.krom_paths()
os.chdir(krom_location)
args = [krom_path, arm.utils.get_fp_build() + '/debug/krom', arm.utils.get_fp_build() + '/debug/krom-resources']
# TODO: Krom sound freezes on MacOS
if arm.utils.get_os() == 'mac':
args.append('--nosound')
if state.is_render:
args.append('--nowindow')
state.playproc = subprocess.Popen(args, stderr=subprocess.PIPE)
watch_play()
elif mode == 'publish':
sdk_path = arm.utils.get_sdk_path()
target_name = arm.utils.get_kha_target(state.target)
files_path = arm.utils.get_fp_build() + '/' + target_name
if (target_name == 'html5' or target_name == 'krom') and wrd.arm_minify_js:
# Minify JS
minifier_path = sdk_path + '/lib/armory_tools/uglifyjs/bin/uglifyjs'
if target_name == 'html5':
jsfile = files_path + '/kha.js'
else:
jsfile = files_path + '/krom.js'
args = [arm.utils.get_node_path(), minifier_path, jsfile, '-o', jsfile]
proc = subprocess.Popen(args)
proc.wait()
if target_name == 'krom':
# Clean up
mapfile = files_path + '/krom.js.temp.map'
if os.path.exists(mapfile):
os.remove(mapfile)
# Copy Krom binaries
if state.target == 'krom-windows':
krom_location = sdk_path + '/Krom/win32/'
elif state.target == 'krom-linux':
krom_location = sdk_path + '/Krom/linux/'
else:
krom_location = sdk_path + '/Krom/macos/Krom.app'
if state.target == 'krom-macos':
shutil.copytree(krom_location, files_path + '/Krom.app')
game_files = os.listdir(files_path)
for f in game_files:
f = files_path + '/' + f
if os.path.isfile(f):
shutil.move(f, files_path + '/Krom.app/Contents/MacOS')
else:
krom_files = os.listdir(krom_location)
for f in krom_files:
f = krom_location + '/' + f
if os.path.isfile(f):
shutil.copy(f, files_path)
if state.target == 'krom-windows':
os.rename(files_path + '/Krom.exe', files_path + '/' + arm.utils.safestr(wrd.arm_project_name) + '.exe')
elif state.target == 'krom-linux':
os.rename(files_path + '/Krom', files_path + '/' + arm.utils.safestr(wrd.arm_project_name))
else:
os.rename(files_path + '/Krom.app', files_path + '/' + arm.utils.safestr(wrd.arm_project_name) + '.app')
# Rename
ext = state.target.split('-')[-1] # krom-windows
new_files_path = files_path + '-' + ext
os.rename(files_path, new_files_path)
files_path = new_files_path
if target_name == 'html5':
print('Exported HTML5 package to ' + files_path)
elif target_name == 'ios' or target_name == 'osx': # TODO: to macos
print('Exported XCode project to ' + files_path + '-build')
elif target_name == 'windows' or target_name == 'windowsapp':
print('Exported Visual Studio 2017 project to ' + files_path + '-build')
elif target_name == 'android-native':
print('Exported Android Studio project to ' + files_path + '-build/' + arm.utils.safestr(wrd.arm_project_name))
elif target_name == 'krom':
print('Exported Krom package to ' + files_path)
else:
print('Exported makefiles to ' + files_path + '-build')
def clean_project():
os.chdir(arm.utils.get_fp())
wrd = bpy.data.worlds['Arm']
# Remove build and compiled data
if os.path.isdir(arm.utils.build_dir()):
shutil.rmtree(arm.utils.build_dir(), onerror=remove_readonly)
if os.path.isdir(arm.utils.get_fp() + '/build'): # Kode Studio build dir
shutil.rmtree(arm.utils.get_fp() + '/build', onerror=remove_readonly)
# Remove compiled nodes
nodes_path = 'Sources/' + arm.utils.safestr(wrd.arm_project_package).replace('.', '/') + '/node/'
if os.path.isdir(nodes_path):
shutil.rmtree(nodes_path, onerror=remove_readonly)
# Remove khafile/korefile/Main.hx
if os.path.isfile('khafile.js'):
os.remove('khafile.js')
if os.path.isfile('korefile.js'):
os.remove('korefile.js')
if os.path.isfile('Sources/Main.hx'):
os.remove('Sources/Main.hx')
# To recache signatures for batched materials
for mat in bpy.data.materials:
mat.signature = ''
mat.is_cached = False
print('Project cleaned')
def get_render_result():
play_project(False, is_render=True)
def get_render_anim_result():
if bpy.context.scene != None:
print('Capturing animation frames into ' + bpy.context.scene.render.filepath)
play_project(False, is_render=True, is_render_anim=True)
|
tmqa34.py
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
get_ipython().run_line_magic('autosave', '180')
get_ipython().run_line_magic('matplotlib', 'inline')
# In[2]:
import requests
import time
from datetime import datetime
import itertools as it
import re
#import numpy
from copy import copy
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings('ignore')
#from pprint import pprint
import time
import json
import os
import networkx as nx
from math import sqrt
import spacy
from hdt import HDTDocument
import multiprocessing as mp
import torch
import random
from transformers import BertTokenizer, BertModel, BertForMaskedLM, GPT2Tokenizer, GPT2LMHeadModel
os.environ['CUDA_VISIBLE_DEVICES'] = "0,1"
from deepcorrect import DeepCorrect
corrector = DeepCorrect('data/deep_punct/deeppunct_params_en', 'data/deep_punct/deeppunct_checkpoint_wikipedia')
# In[3]:
print("Hi! My PID is",os.getpid())
# In[4]:
hdt_wd = HDTDocument("data/kb/wikidata2018_09_11.hdt")
# In[5]:
# Load pre-trained language models and tokenizers
#https://github.com/huggingface/transformers/blob/master/docs/source/pretrained_models.rst
bert_modelpath = "bert-large-uncased"
bert_model = BertForMaskedLM.from_pretrained(bert_modelpath)
bert_tokenizer = BertTokenizer.from_pretrained(bert_modelpath)
gpt2_modelpath = "gpt2-xl"
gpt2_tokenizer = GPT2Tokenizer.from_pretrained(gpt2_modelpath)
gpt2_model = GPT2LMHeadModel.from_pretrained(gpt2_modelpath)
# In[6]:
#nlp = spacy.load("en_core_web_lg")
nlp = spacy.load("/data/users/romain.claret/tm/wiki-kb-linked-entities/nlp_custom_6")
#print(nlp.pipeline)
# In[7]:
# load settings
with open( "settings-graphqa.json", "r") as settings_data:
settings = json.load(settings_data)
use_cache = settings['use_cache']
save_cache = settings['save_cache']
cache_path = settings['cache_path']
#cache_path
# In[8]:
save_cache = True
def save_cache_data(save_cache=save_cache):
if save_cache:
with open(os.path.join(cache_path,'wd_local_statements_dict.json'), 'wb') as outfile:
outfile.write(json.dumps(wd_local_statements_dict, separators=(',',':')).encode('utf8'))
with open(os.path.join(cache_path,'wd_labels_dict.json'), 'wb') as outfile:
outfile.write(json.dumps(wd_labels_dict, separators=(',',':')).encode('utf8'))
with open(os.path.join(cache_path,'wd_local_word_ids_dict.json'), 'wb') as outfile:
outfile.write(json.dumps(wd_local_word_ids_dict, separators=(',',':')).encode('utf8'))
with open(os.path.join(cache_path,'wd_online_word_ids_dict.json'), 'wb') as outfile:
outfile.write(json.dumps(wd_online_word_ids_dict, separators=(',',':')).encode('utf8'))
with open(os.path.join(cache_path,'wd_local_predicate_ids_dict.json'), 'wb') as outfile:
outfile.write(json.dumps(wd_local_predicate_ids_dict, separators=(',',':')).encode('utf8'))
with open(os.path.join(cache_path,'wd_online_predicate_ids_dict.json'), 'wb') as outfile:
outfile.write(json.dumps(wd_online_predicate_ids_dict, separators=(',',':')).encode('utf8'))
with open(os.path.join(cache_path,'word_similarities_dict.json'), 'wb') as outfile:
outfile.write(json.dumps(word_similarities_dict, separators=(',',':')).encode('utf8'))
# In[9]:
# Load statements cache
use_cache = True
def load_cache_data(use_cache=False):
if use_cache:
path_wd_local_statements_dict = "wd_local_statements_dict.json"
path_wd_labels_dict = 'wd_labels_dict.json'
path_wd_local_word_ids_dict = 'wd_local_word_ids_dict.json'
path_wd_online_word_ids_dict = 'wd_online_word_ids_dict.json'
path_wd_local_predicate_ids_dict = 'wd_local_predicate_ids_dict.json'
path_wd_online_predicate_ids_dict = 'wd_online_predicate_ids_dict.json'
path_word_similarities_dict = 'word_similarities_dict.json'
else:
path_wd_local_statements_dict = "wd_local_statements_dict_empty.json"
path_wd_labels_dict = 'wd_labels_dict_empty.json'
path_wd_local_word_ids_dict = 'wd_local_word_ids_dict_empty.json'
path_wd_online_word_ids_dict = 'wd_online_word_ids_dict_empty.json'
path_wd_local_predicate_ids_dict = 'wd_local_predicate_ids_dict_empty.json'
path_wd_online_predicate_ids_dict = 'wd_online_predicate_ids_dict_empty.json'
path_word_similarities_dict = 'word_similarities_dict_empty.json'
with open(os.path.join(cache_path,path_wd_local_statements_dict), "rb") as data:
wd_local_statements_dict = json.load(data)
with open(os.path.join(cache_path,path_wd_labels_dict), "rb") as data:
wd_labels_dict = json.load(data)
with open(os.path.join(cache_path,path_wd_local_word_ids_dict), "rb") as data:
wd_local_word_ids_dict = json.load(data)
with open(os.path.join(cache_path,path_wd_online_word_ids_dict), "rb") as data:
wd_online_word_ids_dict = json.load(data)
with open(os.path.join(cache_path,path_wd_local_predicate_ids_dict), "rb") as data:
wd_local_predicate_ids_dict = json.load(data)
with open(os.path.join(cache_path,path_wd_online_predicate_ids_dict), "rb") as data:
wd_online_predicate_ids_dict = json.load(data)
with open(os.path.join(cache_path,path_word_similarities_dict), "rb") as data:
word_similarities_dict = json.load(data)
return (wd_local_statements_dict, wd_labels_dict,
wd_local_word_ids_dict, wd_online_word_ids_dict,
wd_local_predicate_ids_dict, wd_online_predicate_ids_dict,
word_similarities_dict)
(wd_local_statements_dict, wd_labels_dict,
wd_local_word_ids_dict, wd_online_word_ids_dict,
wd_local_predicate_ids_dict, wd_online_predicate_ids_dict,
word_similarities_dict) = load_cache_data(use_cache=True)
#print("wd_local_statements_dict",len(wd_local_statements_dict))
#print("wd_labels_dict",len(wd_labels_dict))
#print("wd_local_word_ids_dict",len(wd_local_word_ids_dict))
#print("wd_online_word_ids_dict",len(wd_online_word_ids_dict))
#print("wd_local_predicate_ids_dict",len(wd_local_predicate_ids_dict))
#print("wd_online_predicate_ids_dict",len(wd_online_predicate_ids_dict))
#print("word_similarities_dict",len(word_similarities_dict))
# In[10]:
def get_kb_ents(text):
#doc = nlp_kb(text)
doc = nlp(text)
#for ent in doc.ents:
# print(" ".join(["ent", ent.text, ent.label_, ent.kb_id_]))
return doc.ents
#ent_text_test = (
# "In The Hitchhiker's Guide to the Galaxy, written by Douglas Adams, "
# "Douglas reminds us to always bring our towel, even in China or Brazil. "
# "The main character in Doug's novel is the man Arthur Dent, "
# "but Dougledydoug doesn't write about George Washington or Homer Simpson."
#)
#
#en_text_test_2 = ("Which actor voiced the Unicorn in The Last Unicorn?")
#
#print([ent.kb_id_ for ent in get_kb_ents(ent_text_test)])
#[ent.kb_id_ for ent in get_kb_ents(en_text_test_2)]
# In[11]:
def get_nlp(sentence, autocorrect=False, banning_str=False):
sentence = sentence.replace("’", "\'")
nlp_sentence = nlp(sentence)
nlp_sentence_list = list(nlp_sentence)
meaningful_punct = []
for i_t, t in enumerate(nlp_sentence_list):
#print(t,t.pos_, t.lemma_)
if t.lemma_ == "year":
nlp_sentence_list[i_t] = "date"
elif t.text == "\'s" or t.text == "s":
if t.lemma_ == "be" or t.lemma_ == "s":
nlp_sentence_list[i_t] = "is"
else: nlp_sentence_list[i_t] = ""
elif t.text == "\'ve" or t.text == "ve":
if t.lemma_ == "have":
nlp_sentence_list[i_t] = "have"
else: nlp_sentence_list[i_t] = ""
elif t.text == "\'re" or t.text == "re":
if t.lemma_ == "be":
nlp_sentence_list[i_t] = "are"
else: nlp_sentence_list[i_t] = ""
elif t.text == "\'ll" or t.text == "ll":
if t.lemma_ == "will":
nlp_sentence_list[i_t] = "will"
else: nlp_sentence_list[i_t] = ""
elif t.text == "\'d" or t.text == "d":
if t.lemma_ == "have":
nlp_sentence_list[i_t] = "had"
elif t.lemma_ == "would":
nlp_sentence_list[i_t] = "would"
else: nlp_sentence_list[i_t] = ""
elif t.is_space:
nlp_sentence_list[i_t] = ""
elif t.pos_ == "PUNCT":
if t.text.count(".") > 2:
meaningful_punct.append((i_t,"..."))
nlp_sentence_list[i_t] = "..."
else:
nlp_sentence_list[i_t] = ""
else: nlp_sentence_list[i_t] = nlp_sentence_list[i_t].text
nlp_sentence_list = [w for w in nlp_sentence_list if w]
#print("nlp_sentence_list",nlp_sentence_list)
if autocorrect:
nlp_sentence = " ".join(nlp_sentence_list)
nlp_sentence = (nlp_sentence.replace("’", "\'").replace("€", "euro").replace("ç", "c")
.replace("à", "a").replace("é","e").replace("ä","a").replace("ö","o")
.replace("ü","u").replace("è","e").replace("¨","").replace("ê","e")
.replace("â","a").replace("ô","o").replace("î","i").replace("û","u")
.replace("_"," ").replace("°","degree").replace("§","section")
.replace("š","s").replace("Š","S").replace("ć","c").replace("Ç", "C")
.replace("À", "A").replace("É","E").replace("Ä","A").replace("Ö","O")
.replace("Ü","U").replace("È","E").replace("Ê","E").replace("Ë","E")
.replace("Â","A").replace("Ô","O").replace("Î","I").replace("Û","U")
.replace("á","a").replace("Á","Á").replace("ó","o").replace("Ó","O")
.replace("ú","u").replace("Ú","U").replace("í","i").replace("Í","I")
.replace("–","-").replace("×","x").replace("“","\"").replace("ř","r")
.replace("ø","o").replace("ı","i").replace("ş","s").replace("Á","A")
.replace("Ō","O").replace("ã","a").replace("ū","u").replace("ō","o")
.replace("ñ","n").replace("Ł","L").replace("ł","l").replace("Ñ","N")
.replace("Ō","O").replace("Ā","A").replace("ē","e").replace("ǟ","a")
.replace("ȱ","o").replace("ō","o").replace("ȭ","o").replace("ī","i")
.replace("ū","u").replace("ȯ","o").replace("ä","a").replace("õ","o")
.replace("Ā","A").replace("ū","u").replace("ī","i").replace("ē","e")
.replace("ō","o").replace("Ā","A").replace("ā","a").replace("Ǟ","A")
.replace("ǟ","a").replace("Ḇ","B").replace("ḇ","b").replace("C̄","C")
.replace("c̄","c").replace("Ḏ","D").replace("ḏ","d").replace("ḕ","e")
.replace("Ē","E").replace("ē","e").replace("Ḕ","E").replace("Ḗ","E")
.replace("ḗ","e").replace("Ḡ","G").replace("ḡ","g").replace("ẖ","h")
.replace("Ī","ī").replace("Ḹ","L").replace("ḹ","l").replace("Ḻ","L")
.replace("ḻ","l").replace("Ṉ","N").replace("ṉ","n").replace("Ȫ","O")
.replace("ȫ","o").replace("Ṑ","O").replace("ṑ","o").replace("ß","ss")
.replace("Ṓ","O").replace("ṓ","o").replace("Ṝ","R").replace("ṝ","r")
.replace("Ṟ","R").replace("ṟ","r").replace("Ṯ","T").replace("ṯ","t")
.replace("Ū","U").replace("ū","u").replace("Ǘ","U").replace("ǘ","u")
.replace("Ǖ","U").replace("ǖ","u").replace("Ṻ","U").replace("ṻ","u")
.replace("Ȳ","Y").replace("ȳ","y").replace("ẕ","z").replace("Ẕ","Z")
.replace("Ǣ","AE").replace("ǣ","ae").replace("ė","e").replace("å","a")
.replace("æ","ae").replace("Æ","AE").replace("ą","a").replace("ț","t")
.replace("ï","i").replace("Ț","T").replace("İ","I").replace("ʻ","\'")
.replace("ń","n").replace("Ń","N").replace("Č","C").replace("ø","o")
.replace("č","c").replace("ž","z").replace("Ž","Z").replace("Ø","O")
.replace("ễ","e").replace("Ê","E").replace("ă","a").replace("Ă","A")
.replace("ệ","e").replace("Ş","S").replace("ş","s").replace("~"," ")
.replace("œ","oe").replace("Œ","OE").replace("ě","e").replace("Ě","E")
.replace("đ","d").replace("Đ","D").replace("Я","R").replace("я","r")
.replace("ý","y").replace("Ý","Y").replace("Ż","Z").replace("ż","z")
.replace("ș","s").replace("¡","i").replace("´","\'").replace("Ș","S")
.replace("ò","o").replace("Ò","O").replace("ë","e")
)
if banning_str:
for ban in banning_str:
nlp_sentence = nlp_sentence.replace(ban[0],ban[1])
nlp_sentence = corrector.correct(nlp_sentence)
nlp_sentence = nlp_sentence[0]["sequence"]
nlp_sentence = nlp(nlp_sentence)
nlp_sentence_list = list(nlp_sentence)
for i_t, t in enumerate(nlp_sentence_list):
if t.pos_ == "PUNCT":
if i_t in [mpunct[0] for mpunct in meaningful_punct]:
for mpunct in meaningful_punct:
if i_t == mpunct[0]:
nlp_sentence_list[mpunct[0]] = mpunct[1]
else: nlp_sentence_list[i_t] = ''
else:
nlp_sentence_list[i_t] = nlp_sentence_list[i_t].text
for mpunct in meaningful_punct:
if mpunct[0] < len(nlp_sentence_list):
if nlp_sentence_list[mpunct[0]] != mpunct[1]:
nlp_sentence_list.insert(mpunct[0], mpunct[1])
return nlp(" ".join(nlp_sentence_list).replace(" ", " ").replace(". &",".").replace("/",""))
#get_nlp("Which genre of album is harder.....faster?", autocorrect=True)
#get_nlp("Which genre of album is Harder ... Faster", autocorrect=True)
#get_nlp("Which home is an example of italianate architecture?", autocorrect=True)
#get_nlp("Your mom's father, were nice in the Years.!?\'\":`’^!$£€\(\)ç*+%&/\\\{\};,àéäöüè¨êâôîû~-_<>°§...@.....", autocorrect=True)
#get_nlp("of what nationality is ken mcgoogan", autocorrect=True)
#get_nlp("you're fun", autocorrect=True)
#get_nlp("where's the fun", autocorrect=True)
#get_nlp("whats the name of the organization that was founded by frei otto", True)
#get_nlp("Hurry! We’re late!",True)
#get_nlp("Who was an influential figure for miško Šuvaković",True)
#get_nlp("what is the second level division of the division crixás do tocantins",True)
#get_nlp("what is the second level division of the division crixás do tocantins",True)
#get_nlp("2×4",autocorrect=True,banning_str=[["×","x"]])
#get_nlp("what types of music is p.a.r.c.e.",autocorrect=True)
# In[12]:
def is_wd_entity(to_check):
pattern = re.compile('^Q[0-9]*$')
if pattern.match(to_check.strip()): return True
else: return False
def is_wd_predicate(to_check):
pattern = re.compile('^P[0-9]*$')
if pattern.match(to_check.strip()): return True
else: return False
def is_valide_wd_id(to_check):
if is_wd_entity(to_check) or is_wd_predicate(to_check): return True
else: return False
#print(is_wd_entity("Q155"))
# In[13]:
# TODO redo the functions and optimize
def is_entity_or_literal(to_check):
if is_wd_entity(to_check.strip()):
return True
pattern = re.compile('^[A-Za-z0-9]*$')
if len(to_check) == 32 and pattern.match(to_check.strip()):
return False
return True
# return if the given string is a literal or a date
def is_literal_or_date(to_check):
return not('www.wikidata.org' in to_check)
# return if the given string describes a year in the format YYYY
def is_year(year):
pattern = re.compile('^[0-9][0-9][0-9][0-9]$')
if not(pattern.match(year.strip())):
return False
else:
return True
# return if the given string is a date
def is_date(date):
pattern = re.compile('^[0-9]+ [A-z]+ [0-9][0-9][0-9][0-9]$')
if not(pattern.match(date.strip())):
return False
else:
return True
# return if the given string is a timestamp
def is_timestamp(timestamp):
pattern = re.compile('^[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]T00:00:00Z')
if not(pattern.match(timestamp.strip())):
return False
else:
return True
# convert the given month to a number
def convert_month_to_number(month):
return{
"january" : "01",
"february" : "02",
"march" : "03",
"april" : "04",
"may" : "05",
"june" : "06",
"july" : "07",
"august" : "08",
"september" : "09",
"october" : "10",
"november" : "11",
"december" : "12"
}[month.lower()]
# convert a date from the wikidata frontendstyle to timestamp style
def convert_date_to_timestamp (date):
sdate = date.split(" ")
# add the leading zero
if (len(sdate[0]) < 2):
sdate[0] = "0" + sdate[0]
return sdate[2] + '-' + convert_month_to_number(sdate[1]) + '-' + sdate[0] + 'T00:00:00Z'
# convert a year to timestamp style
def convert_year_to_timestamp(year):
return year + '-01-01T00:00:00Z'
# get the wikidata id of a wikidata url
def wikidata_url_to_wikidata_id(url):
if not url:
return False
if "XMLSchema#dateTime" in url or "XMLSchema#decimal" in url:
date = url.split("\"", 2)[1]
date = date.replace("+", "")
return date
if(is_literal_or_date(url)):
if is_year(url):
return convert_year_to_timestamp(url)
if is_date(url):
return convert_date_to_timestamp(url)
else:
url = url.replace("\"", "")
return url
else:
url_array = url.split('/')
# the wikidata id is always in the last component of the id
return url_array[len(url_array)-1]
# fetch all statements where the given qualifier statement occurs as subject
def get_all_statements_with_qualifier_as_subject(qualifier):
statements = []
triples, cardinality = hdt_wd.search_triples(qualifier, "", "")
for triple in triples:
sub, pre, obj = triple
# only consider triples with a wikidata-predicate
if pre.startswith("http://www.wikidata.org/"):
statements.append({'entity': sub, 'predicate': pre, 'object': obj})
return statements
# fetch the statement where the given qualifier statement occurs as object
def get_statement_with_qualifier_as_object(qualifier):
triples, cardinality = hdt_wd.search_triples("", "", qualifier)
for triple in triples:
sub, pre, obj = triple
# only consider triples with a wikidata-predicate
if pre.startswith("http://www.wikidata.org/") and sub.startswith("http://www.wikidata.org/entity/Q"):
return (sub, pre, obj)
return False
# returns all statements that involve the given entity
def get_all_statements_of_entity(entity_id):
# check entity pattern
if not is_wd_entity(entity_id.strip()):
return False
if wd_local_statements_dict.get(entity_id) != None:
#print("saved statement")
return wd_local_statements_dict[entity_id]
entity = "http://www.wikidata.org/entity/"+entity_id
statements = []
# entity as subject
triples_sub, cardinality_sub = hdt_wd.search_triples(entity, "", "")
# entity as object
triples_obj, cardinality_obj = hdt_wd.search_triples("", "", entity)
if cardinality_sub + cardinality_obj > 5000:
wd_local_statements_dict[entity_id] = []
return []
# iterate through all triples in which the entity occurs as the subject
for triple in triples_sub:
sub, pre, obj = triple
# only consider triples with a wikidata-predicate or if it is an identifier predicate
if not pre.startswith("http://www.wikidata.org/"):# or (wikidata_url_to_wikidata_id(pre) in identifier_predicates):
continue
# object is statement
if obj.startswith("http://www.wikidata.org/entity/statement/"):
qualifier_statements = get_all_statements_with_qualifier_as_subject(obj)
qualifiers = []
for qualifier_statement in qualifier_statements:
if qualifier_statement['predicate'] == "http://www.wikidata.org/prop/statement/" + wikidata_url_to_wikidata_id(pre):
obj = qualifier_statement['object']
elif is_entity_or_literal(wikidata_url_to_wikidata_id(qualifier_statement['object'])):
qualifiers.append({
"qualifier_predicate":{
"id": wikidata_url_to_wikidata_id(qualifier_statement['predicate'])
},
"qualifier_object":{
"id": wikidata_url_to_wikidata_id(qualifier_statement['object'])
}})
statements.append({'entity': {'id': wikidata_url_to_wikidata_id(sub)}, 'predicate': {'id': wikidata_url_to_wikidata_id(pre)}, 'object': {'id': wikidata_url_to_wikidata_id(obj)}, 'qualifiers': qualifiers})
else:
statements.append({'entity': {'id': wikidata_url_to_wikidata_id(sub)}, 'predicate': {'id': wikidata_url_to_wikidata_id(pre)}, 'object': {'id': wikidata_url_to_wikidata_id(obj)}, 'qualifiers': []})
# iterate through all triples in which the entity occurs as the object
for triple in triples_obj:
sub, pre, obj = triple
# only consider triples with an entity as subject and a wikidata-predicate or if it is an identifier predicate
if not sub.startswith("http://www.wikidata.org/entity/Q"):# or not pre.startswith("http://www.wikidata.org/") or wikidata_url_to_wikidata_id(pre) in identifier_predicates:
continue
if sub.startswith("http://www.wikidata.org/entity/statement/"):
statements_with_qualifier_as_object = get_statement_with_qualifier_as_object(sub, process)
# if no statement was found continue
if not statements_with_qualifier_as_object:
continue
main_sub, main_pred, main_obj = statements_with_qualifier_as_object
qualifier_statements = get_all_statements_with_qualifier_as_subject(sub)
qualifiers = []
for qualifier_statement in qualifier_statements:
if wikidata_url_to_wikidata_id(qualifier_statement['predicate']) == wikidata_url_to_wikidata_id(main_pred):
main_obj = qualifier_statement['object']
elif is_entity_or_literal(wikidata_url_to_wikidata_id(qualifier_statement['object'])):
qualifiers.append({
"qualifier_predicate":{"id": wikidata_url_to_wikidata_id(qualifier_statement['predicate'])},
"qualifier_object":{"id": wikidata_url_to_wikidata_id(qualifier_statement['object'])}
})
statements.append({
'entity': {'id': wikidata_url_to_wikidata_id(main_sub)},
'predicate': {'id': wikidata_url_to_wikidata_id(main_pred)},
'object': {'id': wikidata_url_to_wikidata_id(main_obj)},
'qualifiers': qualifiers
})
else:
statements.append({'entity': {'id': wikidata_url_to_wikidata_id(sub)}, 'predicate': {'id': wikidata_url_to_wikidata_id(pre)}, 'object': {'id': wikidata_url_to_wikidata_id(obj)}, 'qualifiers': []})
# cache the data
wd_local_statements_dict[entity_id] = statements
return statements
#print(len(get_all_statements_of_entity("Q267721")))
#for s in get_all_statements_of_entity("Q267721"):
# print(s)
#save_cache_data(save_cache=save_cache)
# In[14]:
def get_wd_ids_online(name, is_predicate=False, top_k=3, sim_threshold=0.5):
name = name.split('(')[0]
if is_predicate and wd_online_predicate_ids_dict.get(name) != None and use_cache and len(wd_online_predicate_ids_dict)>0:
res_ids = wd_online_predicate_ids_dict[name][:top_k]
to_return = []
for res_id in res_ids:
if get_nlp(get_wd_label(res_id)).similarity(get_nlp(name)) >= sim_threshold:
to_return.append(res_id)
return to_return #wd_online_predicate_ids_dict[name][:top_k]
elif not is_predicate and wd_online_word_ids_dict.get(name) != None and use_cache and len(wd_online_word_ids_dict)>0:
#print("saved word online")
res_ids = wd_online_word_ids_dict[name][:top_k]
to_return = []
for res_id in res_ids:
if get_nlp(get_wd_label(res_id)).similarity(get_nlp(name)) >= sim_threshold:
to_return.append(res_id)
return to_return #wd_online_word_ids_dict[name][:top_k]
request_successfull = False
entity_ids = ""
while not request_successfull:
try:
if is_predicate:
entity_ids = requests.get('https://www.wikidata.org/w/api.php?action=wbsearchentities&format=json&language=en&type=property&limit=' + str(top_k) + '&search='+name).json()
else:
entity_ids = requests.get('https://www.wikidata.org/w/api.php?action=wbsearchentities&format=json&language=en&limit=' + str(top_k) + '&search='+name).json()
request_successfull = True
except Exception as e:
print("ERROR:",e)
time.sleep(5)
results = entity_ids.get("search")
if not results:
if is_predicate: wd_online_predicate_ids_dict[name] = []
else: wd_online_word_ids_dict[name] = []
return []
if not len(results):
if is_predicate: wd_online_predicate_ids_dict[name] = []
else: wd_online_word_ids_dict[name] = []
return []
res = []
for result in results:
res_id = result['id']
#print(res_id,get_nlp(get_wd_label(res_id)).similarity(get_nlp(name)))
if get_nlp(get_wd_label(res_id)).similarity(get_nlp(name)) >= sim_threshold:
res.append(res_id)
if is_predicate: wd_online_predicate_ids_dict[name] = res
else: wd_online_word_ids_dict[name] = res
if res:
return res[:top_k]
else:
return []
#print(get_wd_ids_online("did", is_predicate=False, top_k=3))
#print(get_wd_ids_online("voiced", is_predicate=True, top_k=3))
# In[15]:
# very computational
def get_most_similar(word, top_k=3):
BANNED_WORDS = ["benteen"]
if word in BANNED_WORDS:
return []
print("behold: get_most_similar started with:", word)
word_text = str(word.lower())
if word_similarities_dict.get(word) != None and use_cache and len(word_similarities_dict)>0:
return word_similarities_dict[word][:top_k]
word = nlp.vocab[word_text]
queries = [w for w in word.vocab if w.is_lower == word.is_lower and w.prob >= -15]
if len(queries) > 32994:
return []
by_similarity = sorted(queries, key=lambda w: word.similarity(w), reverse=True)
word_similarities = [(w.text.lower(),float(w.similarity(word))) for w in by_similarity[:10] if w.lower_ != word.lower_]
word_similarities_dict[word_text] = word_similarities
save_cache_data(save_cache=save_cache)
return word_similarities[:top_k]
#print(get_most_similar("dog", top_k=3))
#save_cache_data(save_cache=save_cache)
# In[122]:
def get_wd_ids(word, is_predicate=False, top_k=3, limit=6, online=False, sim_threshold=0.5):
#if is_predicate and wd_local_predicate_ids_dict.get(word) != None and use_cache and len(wd_local_predicate_ids_dict)>0:
# #print("saved predicate local")
# res_ids = wd_local_predicate_ids_dict[word][:top_k]
# to_return = []
# for res_id in res_ids:
# if get_nlp(get_wd_label(res_id)).similarity(get_nlp(word)) >= sim_threshold:
# to_return.append(res_id)
# return to_return #wd_local_predicate_ids_dict[word][:top_k]
#
#elif not is_predicate and wd_local_word_ids_dict.get(word) != None and use_cache and len(wd_local_word_ids_dict)>0:
# #print("saved word local")
# res_ids = wd_local_word_ids_dict[word][:top_k]
# to_return = []
# for res_id in res_ids:
# if get_nlp(get_wd_label(res_id)).similarity(get_nlp(word)) >= sim_threshold:
# to_return.append(res_id)
# return to_return #wd_local_word_ids_dict[word][:top_k]
language = "en"
word_formated = str("\""+word+"\""+"@"+language)
to_remove = len("http://www.wikidata.org/entity/")
t_name, card_name = hdt_wd.search_triples("", "http://schema.org/name", word_formated, limit=top_k)
#print("names cardinality of \"" + word+"\": %i" % card_name)
t_alt, card_alt = hdt_wd.search_triples("", 'http://www.w3.org/2004/02/skos/core#altLabel', word_formated, limit=top_k)
#print("alternative names cardinality of \"" + word+"\": %i" % card_alt)
results = list(set(
[t[0][to_remove:] for t in t_name if is_valide_wd_id(t[0][to_remove:])] +
[t[0][to_remove:] for t in t_alt if is_valide_wd_id(t[0][to_remove:])]
))
res = []
for result in results:
#print(result,get_nlp(get_wd_label(result)).similarity(get_nlp(word)))
if get_nlp(get_wd_label(result)).similarity(get_nlp(word)) >= sim_threshold:
res.append(result)
if is_predicate: res = [r for r in res if is_wd_predicate(r)]
# cache the data
if is_predicate: wd_local_predicate_ids_dict[word] = res
else: wd_local_word_ids_dict[word] = res
#print("res",res)
#print("limit",limit)
if limit<=0:
#print("if limit<=0",top_k)
return res[:top_k]
else:
#print("else limit<=0",limit)
if limit-1>=0: return res[:limit-1]
else: return res[:limit]
#print(get_wd_ids("did", is_predicate=False, top_k=1))
#get_wd_ids("The Last Unicorn", is_predicate=False,top_k=0, limit=10)
#print(get_wd_ids("wife", is_predicate=False , top_k=0, limit=0))
#print(get_wd_ids("voiced", is_predicate=True , top_k=0, limit=0))
# In[123]:
def get_wd_label(from_id, language="en"):
#print("from_id",from_id)
if is_valide_wd_id(from_id):
if wd_labels_dict.get(from_id) != None and use_cache and len(wd_labels_dict)>0:
#print("saved label local")
return wd_labels_dict[from_id]
id_url = "http://www.wikidata.org/entity/"+from_id
t_name, card_name = hdt_wd.search_triples(id_url, "http://schema.org/name", "")
name = [t[2].split('\"@'+language)[0].replace("\"", "") for t in t_name if "@"+language in t[2]]
#name = [t[2].split('@en')[0] for t in t_name if "@"+language in t[2]]
result = name[0] if name else ''
wd_labels_dict[from_id] = result #caching
return result
else:
return from_id
#print(get_wd_label("P725"))
#get_wd_label("Q20789322")
#get_wd_label("Q267721")
# In[124]:
# Building colors from graph
def get_color(node_type):
if node_type == "entity": return "violet"#"cornflowerblue"
elif node_type == "predicate": return "yellow"
else: return "red"
# Building labels for graph
def get_elements_from_graph(graph):
node_names = nx.get_node_attributes(graph,"name")
node_types = nx.get_node_attributes(graph,"type")
colors = [get_color(node_types[n]) for n in node_names]
return node_names, colors
# Plotting the graph
def plot_graph(graph, name, title="Graph"):
fig = plt.figure(figsize=(14,14))
ax = plt.subplot(111)
ax.set_title(str("answer: "+title), fontsize=10)
#pos = nx.spring_layout(graph)
labels, colors = get_elements_from_graph(graph)
nx.draw(graph, node_size=30, node_color=colors, font_size=10, font_weight='bold', with_labels=True, labels=labels)
plt.tight_layout()
plt.savefig("tmqa1_graphs_imgs/"+str(name)+".png", format="PNG", dpi = 300)
plt.show()
#plot_graph(graph, "file_name_graph", "Graph_title")
# In[125]:
def make_statements_graph_worker(graph, predicate_nodes, turn, indexing_predicates, BANNED_WD_IDS, BANNED_WD_PRED_IDS, BANNED_WD_KEYWORDS, BANNED_WD_PRED_KEYWORDS, in_mp_queue, out_mp_queue, predicate_nodes_lock, node_weight, qa):
#for statement in statements:
sentinel = None
for statement in iter(in_mp_queue.get, sentinel):
#print("statement",statement)
#if (statement['entity']['id'][0] != "Q"
# or statement['entity']['id'] in BANNED_WD_IDS
# or statement['predicate']['id'][0] != "P"
# or statement['predicate']['id'] in BANNED_WD_PRED_IDS
# or statement['object']['id'][0] != "Q"
# or statement['object']['id'] in BANNED_WD_IDS):
# continue
if (
statement['entity']['id'] in BANNED_WD_IDS
or statement['predicate']['id'][0] != "P"
or statement['predicate']['id'] in BANNED_WD_PRED_IDS
or statement['object']['id'] in BANNED_WD_IDS
):
continue
continue_flag = False
for key in BANNED_WD_PRED_KEYWORDS:
if (get_wd_label(statement['predicate']['id']).find(key) != -1): continue_flag = True
for key in BANNED_WD_KEYWORDS:
if (get_wd_label(statement['entity']['id']).find(key) != -1): continue_flag = True
if (get_wd_label(statement['object']['id']).find(key) != -1): continue_flag = True
if continue_flag: continue
#print(statement)
if not statement['entity']['id'] in graph:
graph.add_node(statement['entity']['id'], name=get_wd_label(statement['entity']['id']), type='entity', turn=turn, weight=node_weight, qa=qa)
if not statement['object']['id'] in graph:
graph.add_node(statement['object']['id'], name=get_wd_label(statement['object']['id']), type='entity', turn=turn, weight=node_weight, qa=qa)
with predicate_nodes_lock:
# increment index of predicate or set it at 0
if not statement['predicate']['id'] in predicate_nodes or not indexing_predicates:
predicate_nodes_index = 1
predicate_nodes[statement['predicate']['id']] = 1
else:
predicate_nodes[statement['predicate']['id']] += 1
predicate_nodes_index = predicate_nodes[statement['predicate']['id']]
# add the predicate node
predicate_node_id = (statement['predicate']['id'])
if indexing_predicates: predicate_node_id += "-" + str(predicate_nodes_index)
graph.add_node(predicate_node_id, name=get_wd_label(statement['predicate']['id']), type='predicate', turn=turn, weight=node_weight)
# add the two edges (entity->predicate->object)
#statement['entity']['id'] in BANNED_WD_IDS
#statement['object']['id'] in BANNED_WD_IDS
#statement['predicate']['id'] in BANNED_WD_PRED_IDS
#if (statement['predicate']['id'] in BANNED_WD_PRED_IDS): break
graph.add_edge(statement['entity']['id'], predicate_node_id)
graph.add_edge(predicate_node_id, statement['object']['id'])
out_mp_queue.put(graph)
# In[20]:
# TODO: handle special literals? which one
def make_statements_graph(statements, indexing_predicates=True, potential_predicates=False, cores=mp.cpu_count(), context_graph=False, node_weight=1, qa=False, max_deepness=3):
BANNED_WD_IDS = [
"Q4167410","Q66087861","Q65932995","Q21281405","Q17442446","Q41770487","Q29548341",
"Q29547399","Q25670","Q21286738"
]
BANNED_WD_PRED_IDS = [
"P1687","P7087","P1889","P646", "P227", "P1256", "P1257", "P1258", "P1260", "P301",
"P18","P1266","P487","P1970","P2529", "P4390", "P4342", "P4213", "P487", "P2624",
"P4953", "P2241", "P345","P703", "P2163", "P18", "P436", "P227", "P646", "P2581",
"P1006", "P244", "P214", "P1051", "P1296","P461", "P2959", "P1657", "P3834","P243",
"P3306","P6932","P356","P1630","P3303","P1921","P1793","P1628","P1184","P1662","P2704",
"P4793","P1921","P2302","P6562","P6127","P4342","P6145","P5786","P5099","P4947","P5032",
"P4933","P4632","P4529","P4277","P4282","P3135","P4276","P3593","P2638","P3804","P3145",
"P2509","P3212","P2704","P480","P3844","P3141","P3808","P3933","P2346","P3077","P3417",
"P2529","P3302","P3143","P2334","P3129","P3138","P3107","P2603","P2631","P2508","P2465",
"P2014", "P1874", "P2518", "P1265", "P1237","P1712", "P1970","P1804","P905","P1562",
"P1258","P646","P345",'http://www.w3.org/2002/07/owl#sameAs'
]
BANNED_WD_KEYWORDS = ["_:"]
BANNED_WD_PRED_KEYWORDS = [
"ID", "ISBN","Identifier","identifier", "IDENTIFIER", "isbn", "ISSN", "issn","id","Id","iD","ISNI"
]
predicate_nodes = mp.Manager().dict()
predicate_nodes_lock = mp.Manager().Lock()
if context_graph:
latest_turn = sorted([y["turn"] for x,y in context_graph.nodes(data=True)])[-1]
turn = latest_turn+1
graph = context_graph.copy()
previous_predicates_ids = [x for x,y in context_graph.nodes(data=True) if y["type"]=="predicate"]
if previous_predicates_ids:
if previous_predicates_ids[0].find("-") != -1:
for ppi in previous_predicates_ids:
ppi_id = ppi[:ppi.find("-")]
ppi_value = ppi[ppi.find("-")+1:]
if ppi_id in predicate_nodes:
if int(ppi_value) > predicate_nodes[ppi_id]:
predicate_nodes[ppi_id] = int(ppi_value)
else:
predicate_nodes[ppi_id] = int(ppi_value)
#print("predicate_nodes from context",predicate_nodes.keys())
else:
turn=1
graph = nx.Graph()
if cores <= 0: cores = 1
out_mp_queue = mp.Queue()
in_mp_queue = mp.Queue()
sentinel = None
for statement in statements:
in_mp_queue.put(statement)
procs = [mp.Process(target = make_statements_graph_worker, args = (graph, predicate_nodes, turn, indexing_predicates, BANNED_WD_IDS, BANNED_WD_PRED_IDS, BANNED_WD_KEYWORDS, BANNED_WD_PRED_KEYWORDS, in_mp_queue, out_mp_queue, predicate_nodes_lock, node_weight, qa)) for i in range(cores)]
for proc in procs:
proc.daemon = True
proc.start()
for proc in procs:
in_mp_queue.put(sentinel)
for proc in procs:
local_g = out_mp_queue.get()
graph = nx.compose(graph,local_g)
for proc in procs:
proc.join()
if context_graph:
previous_entities_ids = [x for x,y in context_graph.nodes(data=True) if y["type"]=="entity"]
#print("previous_entities_ids",previous_entities_ids)
for n in graph.copy().nodes():
is_entity_present = [nx.has_path(graph, n, pei) for pei in previous_entities_ids if graph.has_node(pei)]
#print("is_entity_present for n",n,is_entity_present)
if not any(iep for iep in is_entity_present):
graph.remove_node(n)
else:
shortest_paths = [nx.shortest_path(graph, n, pei) for pei in previous_entities_ids if graph.has_node(pei)]
#print("shortest_paths",shortest_paths)
shortest_paths_len = [len(sp) for sp in shortest_paths]
#print("shortest_paths_len",shortest_paths_len)
if not any((spl <= max_deepness and spl>1) for spl in shortest_paths_len):
graph.remove_node(n)
#print("Any is smaller than max_deepness")
#else:
#print("None is smaller than max_deepness")
#if previous_predicates_ids
#print("1 len(graph)",len(graph))
if previous_predicates_ids:
if previous_predicates_ids[0].find("-") != -1:
previous_predicates_ids_only = [p[:p.find("-")] for p in previous_predicates_ids]
else:
previous_predicates_ids_only = previous_predicates_ids
else: previous_predicates_ids_only=[]
#print("previous_predicates_ids",previous_predicates_ids)
#print("previous_predicates_ids_only",previous_predicates_ids_only)
spo_list = [[list(graph.neighbors(p))[0],p[:p.find("-")],list(graph.neighbors(p))[1]] for p in previous_predicates_ids]
spo_list_tagged = [[list(graph.neighbors(p))[0],p,list(graph.neighbors(p))[1]] for p in previous_predicates_ids]
for spo in spo_list_tagged:
#print("graph.nodes(data=True)",graph.nodes(data=True))
#print("before spo weights",spo,graph.nodes[spo[0]]['weight'],graph.nodes[spo[1]]['weight'],graph.nodes[spo[2]]['weight'])
graph.nodes[spo[0]]['weight'] += 1
graph.nodes[spo[1]]['weight'] += 1
graph.nodes[spo[2]]['weight'] += 1
#print("after spo weights",spo,graph.nodes[spo[0]]['weight'],graph.nodes[spo[1]]['weight'],graph.nodes[spo[2]]['weight'])
for p in [x for x,y in graph.nodes(data=True) if y["type"]=="predicate"]:
p_n = list(graph.neighbors(p))
if p.find("-") != -1:
p_id = p[:p.find("-")]
p_value = p[p.find("-")+1:]
else:
p_id = p
p_value = 0
if len(p_n) > 1: spo_tuple = [p_n[0],p_id,p_n[1]]
else: spo_tuple = []
if spo_tuple not in spo_list and spo_tuple:
spo_list.append(spo_tuple)
else:
if p not in previous_predicates_ids:
graph.remove_node(p)
#print("2 len(graph)",len(graph))
for spo in spo_list:
if spo[1] in previous_predicates_ids_only:
if spo[0] not in previous_entities_ids and spo[2] not in previous_entities_ids:
bad_paths = [p for p in nx.all_shortest_paths(graph, source=spo[0], target=spo[2]) if p[1][:p[1].find("-")] == spo[1]]
for bp in bad_paths:
graph.remove_node(bp[1])
#print("3 len(graph)",len(graph))
local_main_themes=[]
for p in [x for x,y in graph.nodes(data=True) if y["type"]=="entity"]:
if len(list(graph.neighbors(p)))>2: local_main_themes.append(p)
meaningful_paths = []
#for t in [x for x,y in graph.nodes(data=True) if y["type"]=="entity"]:
for t in local_main_themes:
statements = get_all_statements_of_entity(t)
if not statements:
continue
for statement in statements:
tmp_statement_basic = []
s_entity = statement["entity"]['id']
if t!=s_entity:
continue
s_predicate = statement["predicate"]['id']
s_object = statement["object"]['id']
s_qualifiers = statement["qualifiers"]
tmp_statement_basic = [s_entity, s_predicate, s_object]
tmp_statement_qualifier = []
if s_qualifiers:
for s_qualifier in s_qualifiers:
s_qual_predicate = s_qualifier["qualifier_predicate"]['id']
s_qual_object = s_qualifier["qualifier_object"]['id']
tmp_statement_qualifier = tmp_statement_basic+[s_qual_predicate,s_qual_object]
if tmp_statement_qualifier[2] in local_main_themes:
meaningful_paths.append(tmp_statement_qualifier)
if tmp_statement_qualifier[4] in local_main_themes:
meaningful_paths.append(tmp_statement_qualifier)
if not tmp_statement_qualifier:
if tmp_statement_basic[2] in local_main_themes:
meaningful_paths.append(tmp_statement_basic)
#print("4 len(graph)",len(graph))
graph_all_entities = [x for x,y in graph.nodes(data=True) if y["type"]=="entity"]
graph_all_predicates = [x for x,y in graph.nodes(data=True) if y["type"]=="predicate"]
to_remove_predicates = []
meaningful_entities = []
for mps in meaningful_paths:
if mps[0] not in meaningful_entities: meaningful_entities.append(mps[0])
if mps[1] not in to_remove_predicates: to_remove_predicates.append(mps[1])
if mps[2] not in meaningful_entities: meaningful_entities.append(mps[2])
if len(mps)>3:
if mps[3] not in to_remove_predicates: to_remove_predicates.append(mps[3])
if mps[4] not in meaningful_entities: meaningful_entities.append(mps[4])
#print("meaningful_entities",meaningful_entities)
#print("to_remove_predicates",to_remove_predicates)
#print("BEFORE")
#plot_graph(graph, "file_name_graph", "Graph_title")
#
#print("len(graph)",len(graph))
#print("potential_predicates",potential_predicates)
if meaningful_entities:
[graph.remove_node(x) for x,y in graph.copy().nodes(data=True) if y["type"]=="entity" and x not in meaningful_entities]
#for ent in [x for x,y in graph.copy().nodes(data=True) if y["type"]=="entity" and x not in meaningful_entities]:
# ent_n = list(graph.neighbors(ent))
# ent_n = [e for e in ent_n if e[:e.find("-")] not in to_remove_predicates]
# ent_n_copy = ent_n.copy()
# for n in ent_n:
# if n[:n.find("-")] not in potential_predicates:
# ent_n_copy.remove(n);
# graph.remove_node(n)
# if len(ent_n_copy) == 0:
# graph.remove_node(ent)
#else:
# for ent in [x for x,y in graph.copy().nodes(data=True) if y["type"]=="entity"]:
# ent_n = list(graph.neighbors(ent))
# #print("ent, ent_n",ent, ent_n)
# ent_n_copy = ent_n.copy()
# for n in ent_n:
# #print("try n",n)
# if n[:n.find("-")] not in potential_predicates:
# ent_n_copy.remove(n);
# graph.remove_node(n)
# if len(ent_n_copy) == 0:
# graph.remove_node(ent)
#print("5 len(graph)",len(graph))
#[
# graph.remove_node(x) for x,y in graph.copy().nodes(data=True)
# if y["type"]=="entity"
# and x not in meaningful_entities
# and any(gn[:gn.find("-")] in meaningful_predicates for gn in list(graph.neighbors(x)))
#]
for p in [x for x,y in graph.copy().nodes(data=True) if y["type"]=="predicate"]:
p_n = list(graph.neighbors(p))
if len(p_n) < 2:
graph.remove_node(p)
continue
#print("6 len(graph)",len(graph))
for p in [x for x,y in graph.copy().nodes(data=True) if y["type"]=="entity"]:
p_n = list(graph.neighbors(p))
if len(p_n) < 1:
graph.remove_node(p)
continue
#print("7 len(graph)",len(graph))
#print("AFTER")
#plot_graph(graph, "file_name_graph", "Graph_title")
#print("len(graph)",len(graph))
#return "LOL"
#bad_paths = [p for p in nx.all_shortest_paths(graph, source=spo[0], target=spo[2]) if p[1][:p[1].find("-")] == spo[1]]
#for bp in bad_paths:
# graph.remove_node(bp[1])
# #break
#for p in [x for x,y in graph.nodes(data=True) if y["type"]=="entity"]:
# if len(p_n)>2: local_main_themes.append(p)
# print("p",get_wd_label(p),p,p_n)
#print("graph.nodes(data=True)",graph.nodes(data=True))
return graph, predicate_nodes
#test_graph = make_statements_graph(test_unduplicate_statements, indexing_predicates=False)
#print(test_graph[1])
#plot_graph(test_graph[0],"test")
#print("len(filtered_statements)",len(filtered_statements))
#start_time = time.time()
#graph, predicate_nodes = make_statements_graph(filtered_statements, indexing_predicates=True, cores=1)
#print(time.time()-start_time)
#print("--> ",len(graph), "nodes and", graph.size(), "edges")
#print(predicate_nodes)
#q0_test = "Who is the wife of Barack Obama?"
#q0_nlp_test = get_nlp(q0_test)
#q0_themes_test = get_themes(q0_nlp_test, q0_test, top_k=3)
#q0_themes_enhanced_test = get_enhanced_themes(q0_themes_test, top_k=3)
#q0_predicates_test = get_predicates_online(q0_nlp_test, top_k=3)
#q0_focused_parts_test = []
#q0_graph, q0_predicates_dict = build_graph(q0_nlp_test, q0_themes_test, q0_themes_enhanced_test, q0_predicates_test, deep_k=3)
#print(q0_predicates_dict)
#plot_graph(q0_graph, "file_name_graph", "Graph_title")
#
#q1_test = "Where did Barack Obama and Michelle Obama marry?"
#q1_nlp_test = get_nlp(q1_test)
#q1_themes_test = get_themes(q1_nlp_test, q1_test, top_k=2)
#print("q1_themes_test",q1_themes_test)
#q1_themes_enhanced_test = get_enhanced_themes(q1_themes_test, top_k=2)
#print("q1_themes_enhanced_test",q1_themes_enhanced_test)
#q1_predicates_test = get_predicates_online(q1_nlp_test, top_k=2)
#print("q1_predicates_test",q1_predicates_test)
#q1_focused_parts_test = []
#q1_graph, q1_predicates_dict = build_graph(q1_nlp_test, q1_themes_test, q1_themes_enhanced_test, q1_predicates_test, deep_k=3, context_graph=context_graph_1)
#print("q1_predicates_dict",q1_predicates_dict)
#plot_graph(q1_graph, "file_name_graph", "Graph_title")
#plot_graph(answer_1[-1], "file_name_graph", "Graph_title")
# In[21]:
def merge_lists(list_1, list_2):
if len(list_1) == len(list_2):
return [(list_1[i], list_2[i]) for i in range(0, len(list_1))]
else:
return "Error: lists are not the same lenght"
#print(merge_lists(["author"],['P50']))
# In[107]:
def get_themes_ids_from_chunks(noun_chunks, top_k=3, online=False):
if online:
theme_ids = [get_wd_ids_online(chunk.text, top_k=top_k)+get_wd_ids(chunk.text, top_k=top_k) for chunk in noun_chunks if chunk.text != ""]
else:
#print("noun_chunks",noun_chunks)
#print("[chunk for chunk in noun_chunks]",[chunk for chunk in noun_chunks])
#print("[chunk.text for chunk in noun_chunks]", [chunk.text for chunk in noun_chunks])
theme_ids = [get_wd_ids(chunk.text, top_k=top_k) for chunk in noun_chunks if chunk.text != ""]
return theme_ids
# In[108]:
def get_themes_ids_from_names(noun_chunks, top_k=3, online=False):
if online:
theme_ids = [get_wd_ids_online(chunk, top_k=top_k)+get_wd_ids(chunk, top_k=top_k) for chunk in noun_chunks if chunk.text != ""]
else:
theme_ids = [get_wd_ids(chunk, top_k=top_k) for chunk in noun_chunks if chunk.text != ""]
return theme_ids
# In[109]:
def get_themes(nlp_question, raw_question, top_k=3, online=False, max_title_size=5):
#nlp_raw_question = get_nlp(raw_question, autocorrect=False)
#nlp_raw_question_lower = get_nlp(raw_question.lower(), autocorrect=False)
#nlp_raw_question_captialize = get_nlp(" ".join([w.capitalize() for w in raw_question.split(" ")]), autocorrect=False)
#nlp_raw_question_list = list(nlp_raw_question)
#print([e for e in nlp_raw_question])
#print([e for e in nlp_raw_question_lower])
#print([e for e in nlp_raw_question_captialize])
special_words = [w for w in raw_question.lower().split(" ") if w not in nlp_question.text.lower().split()]
special_words_capitalize = [w.capitalize() for w in special_words]
#special_words_capitalize_per_2 = it.permutations(special_words_capitalize,2)
#print("special_words",special_words)
#print("special_words_capitalize",special_words_capitalize)
special_words_titles = []
for per_size in range(2,len(special_words_capitalize)+1):
if per_size > max_title_size:
break
special_words_titles += [" ".join(p) for p in it.permutations(special_words_capitalize,per_size)]
#print("special_words_titles",special_words_titles)
#print(nlp_raw_question_list)
# PART1: finding themes as the user typed it
filter_list = ["PART", "PRON", "NUM"]
nlp_list_src = list(nlp_question)
nlp_list = []
for w in nlp_question:
if w.pos_ not in filter_list:
nlp_list.append(w)
nlp_question = get_nlp(" ".join([e.text for e in nlp_list]))
themes = [(ent, [ent.kb_id_]) for ent in get_kb_ents(nlp_question.text) if ent.kb_id_ != "NIL"]
#print("1 themes",themes)
for w in special_words+special_words_capitalize+special_words_titles:
themes += [(ent, [ent.kb_id_]) for ent in get_kb_ents(w) if ent.kb_id_ != "NIL"]
#print("2 themes",themes)
theme_complements = []
noun_chunks = [chunk for chunk in nlp_question.noun_chunks]
#print("1 noun_chunks",noun_chunks)
for w in special_words+special_words_capitalize+special_words_titles:
noun_chunks += [chunk for chunk in get_nlp(w, autocorrect=False).noun_chunks]
#print("2 noun_chunks",noun_chunks)
#theme_ids = [get_wd_ids(chunk.text, top_k=top_k) for chunk in noun_chunks][:top_k]
theme_ids = get_themes_ids_from_chunks(noun_chunks, top_k=top_k, online=online)
for i, chunk in enumerate(theme_ids):
if chunk: themes.append((noun_chunks[i], chunk))
else: theme_complements.append(noun_chunks[i])
# PART2: finding themes with the question capitalized
#print(nlp_question)
nlp_list_cap = []
nlp_list_low = []
nlp_list_lemma = []
nlp_list_no_det = []
nlp_list_cap_nouns = []
w_filter = ["WDT","WP","WP$","WRB"]
FILTER_NOUN = ["NOUN","PROPN"]
for w in nlp_question:
if w.tag_ not in w_filter:
nlp_list_cap.append(w.text.capitalize())
nlp_list_low.append(w.text.lower())
nlp_list_lemma.append(w.lemma_)
if w.pos_ != "DET":
nlp_list_no_det.append(w.text)
for w in nlp_question:
#print(w,w.pos_,w.tag_)
if w.pos_ in FILTER_NOUN:
nlp_list_cap_nouns.append(w.text.capitalize())
else:
nlp_list_cap_nouns.append(w.text.lower())
#print("nlp_list_cap_nouns",nlp_list_cap_nouns)
nlp_question_cap = get_nlp(" ".join([e for e in nlp_list_cap]))
nlp_question_low = get_nlp(" ".join([e for e in nlp_list_low]))
nlp_question_lemma = get_nlp(" ".join([e for e in nlp_list_lemma]))
nlp_question_no_det = get_nlp(" ".join([e for e in nlp_list_no_det]))
nlp_question_cap_nouns = get_nlp(" ".join([e for e in nlp_list_cap_nouns]))
themes += [(ent, [ent.kb_id_]) for ent in get_kb_ents(nlp_question_cap.text) if ent.kb_id_ != "NIL" and (ent, [ent.kb_id_]) not in themes]
themes += [(ent, [ent.kb_id_]) for ent in get_kb_ents(nlp_question_low.text) if ent.kb_id_ != "NIL" and (ent, [ent.kb_id_]) not in themes]
themes += [(ent, [ent.kb_id_]) for ent in get_kb_ents(nlp_question_lemma.text) if ent.kb_id_ != "NIL" and (ent, [ent.kb_id_]) not in themes]
themes += [(ent, [ent.kb_id_]) for ent in get_kb_ents(nlp_question_no_det.text) if ent.kb_id_ != "NIL" and (ent, [ent.kb_id_]) not in themes]
themes += [(ent, [ent.kb_id_]) for ent in get_kb_ents(nlp_question_cap_nouns.text) if ent.kb_id_ != "NIL" and (ent, [ent.kb_id_]) not in themes]
if online:
themes += [(ent, get_wd_ids_online(ent.text, is_predicate=False, top_k=top_k)) for ent in get_kb_ents(nlp_question_cap.text)]
themes += [(ent, get_wd_ids_online(ent.text, is_predicate=False, top_k=top_k)) for ent in get_kb_ents(nlp_question_low.text)]
themes += [(ent, get_wd_ids_online(ent.text, is_predicate=False, top_k=top_k)) for ent in get_kb_ents(nlp_question_lemma.text)]
themes += [(ent, get_wd_ids_online(ent.text, is_predicate=False, top_k=top_k)) for ent in get_kb_ents(nlp_question_no_det.text)]
themes += [(ent, get_wd_ids_online(ent.text, is_predicate=False, top_k=top_k)) for ent in get_kb_ents(nlp_question_cap_nouns.text)]
noun_chunks = []
previous_title_position = 0
for i_t,t in enumerate(nlp_question):
tmp_row = []
if i_t > previous_title_position:
if t.is_title:
for i_p in range(previous_title_position,i_t+1):
tmp_row.append(nlp_question[i_p])
noun_chunks.append(get_nlp(" ".join([w.text for w in tmp_row])))
if t.is_title:
previous_title_position = i_t
noun_chunks += [chunk for chunk in nlp_question_cap.noun_chunks]
#theme_ids = [get_wd_ids(chunk.text, top_k=top_k) for chunk in noun_chunks][:top_k]
theme_ids = get_themes_ids_from_chunks(noun_chunks, top_k=top_k, online=online)
for i, chunk in enumerate(theme_ids):
if chunk: themes.append((noun_chunks[i], chunk))
else: theme_complements.append(noun_chunks[i])
noun_chunks = [chunk for chunk in nlp_question_low.noun_chunks]
#theme_ids = [get_wd_ids(chunk.text, top_k=top_k) for chunk in noun_chunks][:top_k]
theme_ids = get_themes_ids_from_chunks(noun_chunks, top_k=top_k, online=online)
for i, chunk in enumerate(theme_ids):
if chunk: themes.append((noun_chunks[i], chunk))
else: theme_complements.append(noun_chunks[i])
noun_chunks = [chunk for chunk in nlp_question_lemma.noun_chunks]
#theme_ids = [get_wd_ids(chunk.text, top_k=top_k) for chunk in noun_chunks][:top_k]
theme_ids = get_themes_ids_from_chunks(noun_chunks, top_k=top_k, online=online)
for i, chunk in enumerate(theme_ids):
if chunk: themes.append((noun_chunks[i], chunk))
else: theme_complements.append(noun_chunks[i])
noun_chunks = [chunk for chunk in nlp_question_no_det.noun_chunks]
#theme_ids = [get_wd_ids(chunk.text, top_k=top_k) for chunk in noun_chunks][:top_k]
theme_ids = get_themes_ids_from_chunks(noun_chunks, top_k=top_k, online=online)
for i, chunk in enumerate(theme_ids):
if chunk: themes.append((noun_chunks[i], chunk))
else: theme_complements.append(noun_chunks[i])
noun_chunks = [chunk for chunk in nlp_question_cap_nouns.noun_chunks]
#print("nlp_question_cap_nouns",nlp_question_cap_nouns)
for i_w, w in enumerate(nlp_question_cap_nouns):
if i_w+2 < len(nlp_question_cap_nouns):
if (w.pos_ in FILTER_NOUN
and nlp_question_cap_nouns[i_w+1].pos_ == "CCONJ"
and nlp_question_cap_nouns[i_w+2].pos_ in FILTER_NOUN):
noun_chunks.append(get_nlp(w.text+" "+nlp_question_cap_nouns[i_w+1].text+" "+nlp_question_cap_nouns[i_w+2].text))
#print(w,w.pos_,w.tag_)
#print("noun_chunks",noun_chunks)
#theme_ids = [get_wd_ids(chunk.text, top_k=top_k) for chunk in noun_chunks][:top_k]
theme_ids = get_themes_ids_from_chunks(noun_chunks, top_k=top_k, online=online)
for i, chunk in enumerate(theme_ids):
if chunk: themes.append((noun_chunks[i], chunk))
else: theme_complements.append(noun_chunks[i])
themes_filtered = []
for t in themes:
if t[0].text in [tf[0].text for tf in themes_filtered]:
index = [tf[0].text for tf in themes_filtered].index(t[0].text)
tmp = t[1]+[i for j in [tf[1] for index, tf in enumerate(themes_filtered) if tf[0].text == t[0].text] for i in j]
themes_filtered[index] = (t[0],tmp)
else:
themes_filtered.append(t)
# removing the same elments per rows and skipping already existing rows
unique_ids = []
themes_filtered_undupped = []
for tf in themes_filtered:
tmp_ids = []
for tfid in tf[1]:
if tfid not in unique_ids and tfid not in tmp_ids:
tfname = get_wd_label(tfid)
similarity = get_nlp(tfname).similarity(tf[0])
if similarity >= 0.95:
tmp_ids.append(tfid)
unique_ids.append(tfid)
if tmp_ids and tmp_ids not in [tfu[1] for tfu in themes_filtered_undupped]:
themes_filtered_undupped.append((tf[0][:],tmp_ids[:top_k]))
#for tf in themes_filtered:
# tmp_ids = []
# for tfid in tf[1]:
# if tfid not in tmp_ids:
# tmp_ids.append(tfid)
# if tmp_ids not in [tfu[1] for tfu in themes_filtered_undupped]:
# themes_filtered_undupped.append((tf[0],tmp_ids))
theme_complements_undupped = []
[theme_complements_undupped.append(tc) for tc in theme_complements if tc.text not in [tcu.text for tcu in theme_complements_undupped]]
#print(themes_filtered)
return themes_filtered_undupped, theme_complements_undupped
#q0_themes = get_themes(q0_nlp, top_k=3)
#q0_themes_test = get_themes(q0_nlp_test)
#q0_themes_test_2 = get_themes(q0_nlp_test_2)
#print(q0_themes)
#q_test_3 = get_nlp("the unicorn and the raccoons love obama barack's tacos")
#q_test_3_themes = get_themes(q_test_3, top_k=3)
#print(get_enhanced_themes(q_test_3_themes))
#print(q_test_3_themes)
#q_test_test = get_nlp("What is a tv action show?")
#q_test_test = get_nlp("Who voiced the Unicorn in The Last Unicorn")
#q_test_test = get_nlp("What is the name of the person who created Saved by the Bell?")
#q_test_test = get_nlp("When did the movie Grease come out?")
#q_test_question = "Who was an influential figure for miško Šuvaković"
#q_test_question = "who is the writer of chance and necessity"
#q_test_test = get_nlp(q_test_question,True)
#get_themes(q_test_test, q_test_question, top_k=2, online=True)
# In[110]:
BANNED_WORDS = ["...", "/"]
def get_theme_tuples(theme_list, top_k=3, online=False):
tuples = [(t, get_wd_ids(t, top_k=top_k)) for t in theme_list if t not in BANNED_WORDS]
if online:
tuples += [(t, get_wd_ids_online(t, is_predicate=False, top_k=top_k)) for t in theme_list if t not in BANNED_WORDS]
return tuples
def get_theme_no_stopwords(theme_list):
return [s for s in theme_list if not s.is_stop]
def get_theme_lemmatized(theme_list):
return [s.lemma_ for s in theme_list]
def get_permutation_tuples(theme_list, start=2):
permutations = []
for i in range(start, len(theme_list)+1):
permutations += list(it.permutations(theme_list,i))
return permutations
def get_lemma_permutation_tuples(theme_list, start=2):
return get_permutation_tuples(get_theme_lemmatized(theme_list), start=2)
def get_non_token_tuples(theme_list):
return [" ".join([e for e in list(l)]) for l in theme_list]
def get_non_token_lower_tuples(theme_list):
return [" ".join([e.lower() for e in list(l)]) for l in theme_list]
def get_non_token_capitalize_tuples(theme_list):
return [" ".join([c.capitalize() for c in [e for e in list(l)]]) for l in theme_list]
def get_text_tuples(theme_list):
return [" ".join([e.text for e in list(l)]) for l in theme_list]
def get_lower_tuples(theme_list):
return [" ".join([e.lower_ for e in list(l)]) for l in theme_list]
def get_capitalized_tuples(theme_list):
return [" ".join([c.capitalize() for c in [e.text for e in list(l)]]) for l in theme_list]
def get_capitalized_nouns_tuples(theme_list):
FILTER_NOUNS = ["NOUN","PROPN"]
return [" ".join([c.text.capitalize() for c in [e for e in list(l)] if c.pos_ in FILTER_NOUNS]) for l in theme_list]
def get_enhanced_themes(themes, top_k=3, title_limit=5, aggressive=False, online=False):
enhanced_themes = []
# permute, capitalize, lowering of the words in the complements
for c in themes[1]:
if len(c) <= title_limit:
per_lemma = get_theme_tuples(get_non_token_tuples([n for n in get_permutation_tuples(get_theme_lemmatized(c))]),top_k, online=online)
[enhanced_themes.append(p) for p in per_lemma if p[1] and p not in enhanced_themes]
del per_lemma
per_nostop = get_theme_tuples(get_text_tuples(get_permutation_tuples(get_theme_no_stopwords(c),start=1)),top_k, online=online)
[enhanced_themes.append(p) for p in per_nostop if p[1] and p not in enhanced_themes]
del per_nostop
per_lemma_nostop = get_theme_tuples(get_non_token_tuples([get_theme_lemmatized(s) for s in get_permutation_tuples(get_theme_no_stopwords(c),start=1)]),top_k, online=online)
[enhanced_themes.append(p) for p in per_lemma_nostop if p[1] and p not in enhanced_themes]
del per_lemma_nostop
per_lemma_lower = get_theme_tuples(get_non_token_lower_tuples([n for n in get_permutation_tuples(get_theme_lemmatized(c))]),top_k, online=online)
[enhanced_themes.append(p) for p in per_lemma_lower if p[1] and p not in enhanced_themes]
del per_lemma_lower
per_nostop_lower = get_theme_tuples(get_lower_tuples(get_permutation_tuples(get_theme_no_stopwords(c),start=1)),top_k)
[enhanced_themes.append(p) for p in per_nostop_lower if p[1] and p not in enhanced_themes]
del per_nostop_lower
per_lemma_nostop_lower = get_theme_tuples(get_non_token_lower_tuples([get_theme_lemmatized(s) for s in get_permutation_tuples(get_theme_no_stopwords(c),start=1)]),top_k, online=online)
[enhanced_themes.append(p) for p in per_lemma_nostop_lower if p[1] and p not in enhanced_themes]
del per_lemma_nostop_lower
per_lemma_capitalize = get_theme_tuples(get_non_token_capitalize_tuples([n for n in get_permutation_tuples(get_theme_lemmatized(c))]),top_k, online=online)
[enhanced_themes.append(p) for p in per_lemma_capitalize if p[1] and p not in enhanced_themes]
del per_lemma_capitalize
per_nostop_capitalize = get_theme_tuples(get_capitalized_tuples(get_permutation_tuples(get_theme_no_stopwords(c),start=1)),top_k, online=online)
[enhanced_themes.append(p) for p in per_nostop_capitalize if p[1] and p not in enhanced_themes]
del per_nostop_capitalize
per_lemma_nostop_capitalize = get_theme_tuples(get_non_token_capitalize_tuples([get_theme_lemmatized(s) for s in get_permutation_tuples(get_theme_no_stopwords(c),start=1)]),top_k, online=online)
[enhanced_themes.append(p) for p in per_lemma_nostop_capitalize if p[1] and p not in enhanced_themes]
per = get_theme_tuples(get_text_tuples(get_permutation_tuples(c)),top_k, online=online)
[enhanced_themes.append(p) for p in per if p[1] and p not in enhanced_themes]
del per
per_lower = get_theme_tuples(get_lower_tuples(get_permutation_tuples(c)),top_k, online=online)
[enhanced_themes.append(p) for p in per_lower if p[1] and p not in enhanced_themes]
del per_lower
per_capitalize = get_theme_tuples(get_capitalized_tuples(get_permutation_tuples(c)),top_k, online=online)
[enhanced_themes.append(p) for p in per_capitalize if p[1] and p not in enhanced_themes]
del per_capitalize
#per_capitalize_nouns = get_theme_tuples(get_capitalized_nouns_tuples(get_permutation_tuples(c)),top_k, online=online)
#[enhanced_themes.append(p) for p in per_capitalize_nouns if p[1] and p not in enhanced_themes]
#del per_capitalize_nouns
if aggressive:
predicates = []
[predicates.append(get_wd_label(pred)) for pred in sum([p[1] for p in themes[0]],[]) if get_wd_label(pred) not in predicates]
predicates_ids = [get_wd_ids_online(p, is_predicate=True, top_k=top_k) for p in predicates]
predicated_themes = merge_lists(predicates, predicates_ids)
predicated_themes = [pt for pt in predicated_themes if pt[1] != '']
if predicates: enhanced_themes += predicated_themes
#print("themes[0]",[t[0].text for t in themes[0]])
#print("themes[0].lower()",[t[0].text.lower() for t in themes[0]])
enhanced_themes_filtered = []
for et in enhanced_themes:
if not et[0] in [t[0].text for t in themes[0]]:
#print("et not in themes",et)
#print(len(themes[0]))
#print([t[0].text.find(et[0]) for t in themes[0]].count(-1))
if len([t for t in themes[0] if t[0].text.find(et[0]) == -1]) < len(themes[0]) or not et[1]:
continue
if et[0] in [e[0] for e in enhanced_themes_filtered]:
index_et = [e[0] for e in enhanced_themes_filtered].index(et[0])
if index_et != -1:
enhanced_themes_filtered[index_et] = (et[0], enhanced_themes_filtered[index_et][1]+et[1]) #.append((et[0],et[1][:top_k]))
else: enhanced_themes_filtered.append((et[0],et[1][:top_k]))
#elif et[0] not in :
# print("et unknown",et)
else:
enhanced_themes_filtered.append((et[0],et[1][:top_k]))
return enhanced_themes_filtered
#q_test_3 = get_nlp("Which genre of album is harder.....faster?",autocorrect=True)
#q_test_3 = get_nlp("the unicorn and the raccoons love obama barack's tacos")
#q_test_3 = get_nlp("what was the cause of death of yves klein")
#q_test_3 = get_nlp("Who is the author that wrote the book Moby Dick")
#q_test_question_3 = "who is the writer of chance and necessity"
#q_test_3 = get_nlp(q_test_question_3, autocorrect=True)
#q_test_3_themes = get_themes(q_test_3,q_test_question_3, top_k=2)
#print(q_test_3_themes[0])
#print(q_test_3_themes[1])
#print(get_enhanced_themes(q_test_3_themes, aggressive=False))
#print(get_enhanced_themes(q_test_3_themes, aggressive=True))
# In[111]:
def get_predicates_online(nlp_sentence, top_k=3, aggressive=False):
PASSIVE_VERBS = ["be"]
AGRESSIVE_FILTER = ["VERB","AUX","NOUN","ADJ"]
if aggressive: predicates = [p for p in nlp_sentence if p.pos_ in AGRESSIVE_FILTER]
else: predicates = [p for p in nlp_sentence if p.pos_ == "VERB" or p.pos_ == "AUX"]
if len(predicates) == 1:
if predicates[0].lemma_ in PASSIVE_VERBS:
predicates += [p for p in nlp_sentence if p.pos_ in AGRESSIVE_FILTER if p not in predicates]
predicates_filtered = []
for p in predicates:
if p.lemma_ in PASSIVE_VERBS:
p = get_nlp(p.lemma_)[0]
if len(predicates_filtered) == 0:
predicates_filtered.append(p)
if p.text not in [p.text for p in predicates_filtered]:
predicates_filtered.append(p)
predicates_ids = []
for i_p, p in enumerate(predicates_filtered):
if p.lemma_ == "be":
predicates_ids.append(get_wd_ids_online("is", is_predicate=True, top_k=top_k)[:1])
elif p.pos_ != "PROPN":
p_id = get_wd_ids_online(p.text, is_predicate=True, top_k=top_k)
if not p_id:
p_id = get_wd_ids_online(p.lemma_, is_predicate=True, top_k=top_k)
if not p_id:
similar_words = [w[0] for w in get_most_similar(p.lemma_, top_k=top_k)]
for sw in similar_words:
if not p_id:
p_id = get_wd_ids_online(sw, is_predicate=True, top_k=top_k)
predicates_ids.append(p_id[:top_k])
return merge_lists(predicates_filtered, predicates_ids)
#q_test = get_nlp("Who voiced the Unicorn in The Last Unicorn")
#q_test = get_nlp("Of what nationality is Ken McGoogan")
#q_test = get_nlp("Which have the nation of Martha Mattox")
#q_test = get_nlp("what city was alex golfis born in")
#q_test = get_nlp("who's born in city was alex golfis born in")
#q_test = get_nlp("what's the name fo the wife of my dads")
#start_time = time.time()
#q_test = get_nlp("Where did roger marquis die")
#print(get_predicates_online(q_test, top_k=2, aggressive=False))
#print("it was:",time.time()-start_time)
#q0_predicates_test_2 = get_predicates_online(q0_nlp_test_2, top_k=3, aggressive=True)
# In[112]:
def get_predicates(nlp_sentence, themes=False, top_k=0):
PASSIVE_VERBS = ["be"]
predicates = [p for p in nlp_sentence if p.pos_ == "VERB" or p.pos_ == "AUX"]
#for i_p, p in enumerate(predicates):
# if p.text == "\'s":
# predicates[i_p] = get_nlp("is")[0]
# if p.text == "\'re":
# predicates[i_p] = get_nlp("are")[0]
if themes:
for t in themes[0]:
for e in t[1]:
if is_wd_predicate(e):
predicates.append(t[0])
predicates_filtered = []
for p in predicates:
if p.lemma_ in PASSIVE_VERBS:
p = get_nlp(p.lemma_)[0]
if len(predicates_filtered) == 0:
predicates_filtered.append(p)
if p.text not in [p.text for p in predicates_filtered]:
predicates_filtered.append(p)
predicates_ids = []
for i_p, p in enumerate(predicates_filtered):
if p.lemma_ in PASSIVE_VERBS:
predicates_ids.append(get_wd_ids(p.lemma_, is_predicate=True, top_k=top_k, limit=0)[:1])
else:
predicates_ids.append(get_wd_ids(p.text, is_predicate=True, top_k=top_k, limit=0)[:top_k])
#predicates_ids = [ for p in predicates_filtered]
return merge_lists(predicates_filtered, predicates_ids)
#q_test = get_nlp("Who voiced the Unicorn in The Last Unicorn")
#q_test = get_nlp("Of what nationality is Ken McGoogan")
#q_test = get_nlp("Where did roger marquis die")
#q_test = get_nlp("who's born in city was alex golfis born in")
#get_predicates(q_test)
#q_test_themes = get_themes(q_test)
#get_predicates(q_test, q_test_themes, top_k=3)
#q0_nlp_test_0 = get_nlp("Voiced")
#q0_predicates = get_predicates(q0_nlp, top_k=3)
#q0_predicates_test_2 = get_predicates(q0_nlp_test_2, top_k=3)
#print(q0_predicates)
# In[113]:
def extract_ids(to_extract):
return [i for i in it.chain.from_iterable([id[1] for id in to_extract])]
#extract_ids([('name', ['id'])]) #q0_themes[0] #q0_focused_parts #q0_predicates
#print(extract_ids([("The Last Unicorn", ['Q16614390']),("Second Theme", ['Q12345'])]))
#extract_ids(q0_focused_parts)
# In[114]:
def get_similarity_by_words(nlp_word_from, nlp_word_to):
if not nlp_word_from or not nlp_word_to:
return 0
elif not nlp_word_from.vector_norm or not nlp_word_to.vector_norm:
return 0
else:
return nlp_word_from.similarity(nlp_word_to)
#print(get_similarity_by_words(get_nlp("character role"), get_nlp("voice actor")))
# In[115]:
def get_similarity_by_ids(word_id_from, word_id_to):
nlp_word_from = get_nlp(get_wd_label(word_id_from))
nlp_word_to = get_nlp(get_wd_label(word_id_to))
return get_similarity_by_words(nlp_word_from, nlp_word_to)
#print(get_similarity_by_ids("P453", "P725"))
# In[116]:
def get_top_similar_statements(statements, from_token_id, similar_to_name, top_k=0, qualifier=False, statement_type="object", time_sentitive=False):
highest_matching_similarity = -1
top_statements = []
nlp_name = get_nlp(similar_to_name)
#print("get_top_similar_statements from_token_id",from_token_id)
if get_wd_label(from_token_id):
for statement in statements:
if top_k>0:
if qualifier:
for qualifier in statement['qualifiers']:
if time_sentitive and is_timestamp(qualifier[statement_type]['id']):
top_statements.append((1, statement))
else:
nlp_word_to = get_nlp(get_wd_label(qualifier[statement_type]['id']))
matching_similarity = get_similarity_by_words(nlp_name, nlp_word_to)
top_statements.append((matching_similarity, statement))
else:
if time_sentitive and is_timestamp(statement[statement_type]['id']):
top_statements.append((1, statement))
else:
nlp_word_to = get_nlp(get_wd_label(statement[statement_type]['id']))
matching_similarity = get_similarity_by_words(nlp_name, nlp_word_to)
top_statements.append((matching_similarity, statement))
else:
if qualifier:
if statement.get('qualifiers'):
for qualifier in statement['qualifiers']:
if time_sentitive and is_timestamp(qualifier[statement_type]['id']):
top_statements.append((1, statement))
else:
nlp_word_to = get_nlp(get_wd_label(qualifier[statement_type]['id']))
matching_similarity = get_similarity_by_words(nlp_name, nlp_word_to)
if highest_matching_similarity == -1 or matching_similarity >= highest_matching_similarity:
highest_matching_similarity = matching_similarity
best_statement = statement
top_statements.append((highest_matching_similarity, best_statement))
else:
if time_sentitive and is_timestamp(statement[statement_type]['id']):
top_statements.append((1, statement))
else:
nlp_word_to = get_nlp(get_wd_label(statement[statement_type]['id']))
matching_similarity = get_similarity_by_words(nlp_name, nlp_word_to)
if highest_matching_similarity == -1 or matching_similarity >= highest_matching_similarity:
highest_matching_similarity = matching_similarity
best_statement = statement
top_statements.append((highest_matching_similarity, best_statement))
if top_k > 0:
return sorted(top_statements, key=lambda x: x[0], reverse=True)[:top_k]
else:
return sorted(top_statements, key=lambda x: x[0], reverse=True)
#statements = get_all_statements_of_entity('Q503992')
#top_similar_statements = get_top_similar_statements(statements, 'Q267721', 'western')
#print(top_similar_statements)
# In[117]:
def get_best_similar_statements_by_word_worker(in_mp_queue, out_mp_queue, top_k, qualifier, statement_type, time_sentitive):
sentinel = None
best_statements = []
for token,similar_to_name in iter(in_mp_queue.get, sentinel):
# print("working on",token,similar_to_name)
statements = get_all_statements_of_entity(token)
if statements: best_statements += get_top_similar_statements(statements, token, similar_to_name, top_k=top_k, qualifier=qualifier, statement_type=statement_type, time_sentitive=time_sentitive)
# print("done with",token,similar_to_name)
out_mp_queue.put(best_statements)
# In[33]:
def get_best_similar_statements_by_word(from_token_ids, similar_to_name, top_k=3, qualifier=False, statement_type="object", time_sentitive=False, cores=1):
if not similar_to_name:
return []
best_statements = []
if cores > 1:
out_mp_queue = mp.Queue()
in_mp_queue = mp.Queue()
sentinel = None
for token in from_token_ids:
in_mp_queue.put((token,similar_to_name))
procs = [mp.Process(target = get_best_similar_statements_by_word_worker, args = (in_mp_queue, out_mp_queue, top_k, qualifier, statement_type, time_sentitive)) for i in range(cores)]
for proc in procs:
proc.daemon = True
proc.start()
for proc in procs:
in_mp_queue.put(sentinel)
for proc in procs:
best_statements += out_mp_queue.get()
for proc in procs:
proc.join()
else:
for token in from_token_ids:
statements = get_all_statements_of_entity(token)
if statements: best_statements += get_top_similar_statements(statements, token, similar_to_name, top_k=top_k, qualifier=qualifier, statement_type=statement_type, time_sentitive=time_sentitive)
#print("best_statements",best_statements)
return sorted(best_statements, key=lambda x: x[0], reverse=True)
#best_similar_statements = get_best_similar_statements_by_word(extract_ids(q0_themes[0]), 'voiced', top_k=3, qualifier=True, statement_type="qualifier_object")
#print(best_similar_statements[0])
#init_clusters = cluster_extend_by_words(theme_ids, [p[0].text for p in q_predicates+predicates_enhanced], top_k=deep_k, time_sentitive=time_sensitive,cores=2)
# In[34]:
def get_statements_subjects_labels(statements):
return [get_wd_label(t[1]['entity']['id']) for t in statements]
#print(get_statements_subjects_labels(best_similar_statements))
# In[35]:
def get_statements_predicates_labels(statements):
return [get_wd_label(t[1]['predicate']['id']) for t in statements]
#print(get_statements_predicates_labels(best_similar_statements))
# In[36]:
def get_statements_objects_labels(statements):
return [get_wd_label(t[1]['object']['id']) for t in statements]
#print(get_statements_objects_labels(best_similar_statements))
# In[37]:
def get_statements_qualifier_predicates_labels(statements):
return [get_wd_label(t[1]['qualifiers'][0]['qualifier_predicate']['id']) for t in statements]
#print(get_statements_qualifier_predicates_labels(best_similar_statements))
# In[38]:
def get_statements_qualifier_objects_labels(statements):
return [get_wd_label(t[1]['qualifiers'][0]['qualifier_object']['id']) for t in statements]
#print(get_statements_qualifier_objects_labels(best_similar_statements))
# In[39]:
def cluster_extend_by_words_worker(in_mp_queue, out_mp_queue, top_k, time_sentitive, cores):
sentinel = None
cluster = []
for cluster_root_ids,name in iter(in_mp_queue.get, sentinel):
cluster += get_best_similar_statements_by_word(cluster_root_ids, name, top_k=top_k, qualifier=True, statement_type="qualifier_predicate", time_sentitive=time_sentitive,cores=1)
cluster += get_best_similar_statements_by_word(cluster_root_ids, name, top_k=top_k, qualifier=True, statement_type="qualifier_object", time_sentitive=time_sentitive,cores=1)
cluster += get_best_similar_statements_by_word(cluster_root_ids, name, top_k=top_k, qualifier=False, statement_type="predicate", time_sentitive=time_sentitive,cores=1)
cluster += get_best_similar_statements_by_word(cluster_root_ids, name, top_k=top_k, qualifier=False, statement_type="object", time_sentitive=time_sentitive,cores=1)
out_mp_queue.put(cluster)
# In[40]:
def cluster_extend_by_words(cluster_root_ids, extending_words, top_k=3, time_sentitive=False,cores=mp.cpu_count()):
if not cluster_root_ids or not extending_words:
return []
cluster = []
if cores <= 0: cores = 1
out_mp_queue = mp.Queue()
in_mp_queue = mp.Queue()
sentinel = None
for name in extending_words:
in_mp_queue.put((cluster_root_ids,name))
procs = [mp.Process(target = cluster_extend_by_words_worker, args = (in_mp_queue, out_mp_queue, top_k, time_sentitive, cores)) for i in range(cores)]
for proc in procs:
proc.daemon = True
proc.start()
for proc in procs:
in_mp_queue.put(sentinel)
for proc in procs:
cluster += out_mp_queue.get()
for proc in procs:
proc.join()
return sorted(cluster, key=lambda x: x[0], reverse=True)
#start_time = time.time()
#for name in extending_words:
# #start_cluster_time = time.time()
# cluster += get_best_similar_statements_by_word(cluster_root_ids, name, top_k=top_k, qualifier=True, statement_type="qualifier_predicate", time_sentitive=time_sentitive,cores=cores)
# cluster += get_best_similar_statements_by_word(cluster_root_ids, name, top_k=top_k, qualifier=True, statement_type="qualifier_object", time_sentitive=time_sentitive,cores=cores)
# cluster += get_best_similar_statements_by_word(cluster_root_ids, name, top_k=top_k, qualifier=False, statement_type="predicate", time_sentitive=time_sentitive,cores=cores)
# cluster += get_best_similar_statements_by_word(cluster_root_ids, name, top_k=top_k, qualifier=False, statement_type="object", time_sentitive=time_sentitive,cores=cores)
# #end_time = time.time()
# #print("EXTENDING Cluster with:", name," ->\tRunning time is {}s".format(round(end_time-start_cluster_time,2)))
##end_time = time.time()
##print("EXTENDING Clusters ->\tRunning time is {}s".format(round(end_time-start_time,2)))
#test_cluster = cluster_extend_by_words(extract_ids(q0_themes[0]), ['voiced'], top_k=2)
#test_cluster_test_2 = cluster_extend_by_words(extract_ids(q0_themes_test_2[0]), ['birth'], top_k=2)
#print(test_cluster[0])
#start_time = time.time()
#init_clusters = cluster_extend_by_words(theme_ids, [p[0].text for p in q_predicates+predicates_enhanced], top_k=deep_k, time_sentitive=time_sensitive,cores=mp.cpu_count())
#print("timer",time.time()-start_time)
# In[41]:
# sorts by the similarity value of statements[0]
def sort_statements_by_similarity(statements):
return [s for s in sorted(statements, key=lambda x: x[0], reverse=True)]
#test_sorted_statements = sort_statements_by_similarity(test_cluster)
#test_sorted_statements_test_2 = sort_statements_by_similarity(test_cluster_test_2)
#print(test_sorted_statements[0])
# In[42]:
# appends spo from qualifiers, removes qualifier tags, and removes similarity scores
def statements_flatter(statements):
best_statements_to_graph = []
for statement in statements:
tmp_statement = copy(statement)
if tmp_statement.get('qualifiers'):
#print("statement", statement)
for q in tmp_statement['qualifiers']:
qualifier_statement = {'entity': {'id': tmp_statement['entity']['id']}}
qualifier_statement['predicate'] = {'id': q['qualifier_predicate']['id']}
qualifier_statement['object'] = {'id': q['qualifier_object']['id']}
best_statements_to_graph.append(qualifier_statement)
del(tmp_statement['qualifiers'])
else:
#print("tmp_statement", tmp_statement)
if ('qualifiers' in tmp_statement): del(tmp_statement['qualifiers'])
if tmp_statement not in best_statements_to_graph:
#print("best_statements_to_graph", tmp_statement)
best_statements_to_graph.append(tmp_statement)
return best_statements_to_graph
#test_flatten_statements = statements_flatter([s[1] for s in test_sorted_statements])
#test_flatten_statements_test_2 = statements_flatter([s[1] for s in test_sorted_statements_test_2])
#print(test_flatten_statements[0])
#test_flatten_statements_test_2
# In[43]:
# remove duplicates from statements
def unduplicate_statements(statements):
filtered_statements = []
[filtered_statements.append(s) for s in statements if s not in [e for e in filtered_statements]]
return filtered_statements
#test_unduplicate_statements = unduplicate_statements(test_flatten_statements)
#print(len(test_flatten_statements))
#print(len(test_unduplicate_statements))
#print(test_unduplicate_statements[0])
# In[44]:
def get_statements_by_id(statements, from_token_id, to_id, qualifier=False, statement_type="predicate"):
id_statements = []
if not statements:
return id_statements
if get_wd_label(from_token_id):
for statement in statements:
if qualifier:
if statement.get('qualifiers'):
for s in statement['qualifiers']:
if to_id == s[statement_type]['id']:
id_statements.append(statement)
else:
if to_id == statement[statement_type]['id']:
id_statements.append(statement)
return id_statements
#statements_test = get_all_statements_of_entity('Q176198')
#id_statements_test = get_statements_by_id(statements_test, 'Q176198', 'P725')
#print(id_statements_test[0])
#get_statements_by_id(root_statements, cluster_root_id, predicate_id, qualifier=False, statement_type="predicate")
#statements_test = get_all_statements_of_entity('Q176198')
#id_statements_test = get_statements_by_id(statements_test, 'Q176198', 'P725')
#id_statements_test[0]
# In[45]:
def cluster_extend_by_words(cluster_root_ids, extending_words, top_k=3, time_sentitive=False,cores=2):
if not cluster_root_ids or not extending_words:
return []
cluster = []
if cores <= 0: cores = 1
out_mp_queue = mp.Queue()
in_mp_queue = mp.Queue()
sentinel = None
for name in extending_words:
in_mp_queue.put((cluster_root_ids,name))
procs = [mp.Process(target = cluster_extend_by_words_worker, args = (in_mp_queue, out_mp_queue, top_k, time_sentitive, cores)) for i in range(cores)]
for proc in procs:
proc.daemon = True
proc.start()
for proc in procs:
in_mp_queue.put(sentinel)
for proc in procs:
cluster += out_mp_queue.get()
for proc in procs:
proc.join()
return sorted(cluster, key=lambda x: x[0], reverse=True)
#start_time = time.time()
#for name in extending_words:
# #start_cluster_time = time.time()
# cluster += get_best_similar_statements_by_word(cluster_root_ids, name, top_k=top_k, qualifier=True, statement_type="qualifier_predicate", time_sentitive=time_sentitive,cores=cores)
# cluster += get_best_similar_statements_by_word(cluster_root_ids, name, top_k=top_k, qualifier=True, statement_type="qualifier_object", time_sentitive=time_sentitive,cores=cores)
# cluster += get_best_similar_statements_by_word(cluster_root_ids, name, top_k=top_k, qualifier=False, statement_type="predicate", time_sentitive=time_sentitive,cores=cores)
# cluster += get_best_similar_statements_by_word(cluster_root_ids, name, top_k=top_k, qualifier=False, statement_type="object", time_sentitive=time_sentitive,cores=cores)
# #end_time = time.time()
# #print("EXTENDING Cluster with:", name," ->\tRunning time is {}s".format(round(end_time-start_cluster_time,2)))
##end_time = time.time()
##print("EXTENDING Clusters ->\tRunning time is {}s".format(round(end_time-start_time,2)))
#test_cluster = cluster_extend_by_words(extract_ids(q0_themes[0]), ['voiced'], top_k=2)
#test_cluster_test_2 = cluster_extend_by_words(extract_ids(q0_themes_test_2[0]), ['birth'], top_k=2)
#print(test_cluster[0])
#start_time = time.time()
#init_clusters = cluster_extend_by_words(theme_ids, [p[0].text for p in q_predicates+predicates_enhanced], top_k=deep_k, time_sentitive=time_sensitive,cores=mp.cpu_count())
#print("timer",time.time()-start_time)
# In[46]:
def cluster_extend_by_predicates_ids_worker(in_mp_queue, out_mp_queue):
sentinel = None
cluster = []
for cluster_root_id, predicate_id in iter(in_mp_queue.get, sentinel):
root_statements = get_all_statements_of_entity(cluster_root_id)
if root_statements:
cluster += get_statements_by_id(root_statements, cluster_root_id, predicate_id, qualifier=True, statement_type="qualifier_predicate")
cluster += get_statements_by_id(root_statements, cluster_root_id, predicate_id, qualifier=False, statement_type="predicate")
out_mp_queue.put(cluster)
# In[47]:
# parameters
# cluster_root_ids: ['Qcode']
# predicates_ids: ['Pcode']
def cluster_extend_by_predicates_ids(cluster_root_ids, predicates_ids, cores=mp.cpu_count()):
if not cluster_root_ids or not predicates_ids:
return []
cluster = []
if cores <= 0: cores = 1
out_mp_queue = mp.Queue()
in_mp_queue = mp.Queue()
sentinel = None
for cluster_root_id, predicate_id in it.product(cluster_root_ids, predicates_ids):
#print((cluster_root_id, predicates_id))
in_mp_queue.put((cluster_root_id, predicate_id))
procs = [mp.Process(target = cluster_extend_by_predicates_ids_worker, args = (in_mp_queue, out_mp_queue)) for i in range(cores)]
for proc in procs:
proc.daemon = True
proc.start()
for proc in procs:
in_mp_queue.put(sentinel)
for proc in procs:
cluster += out_mp_queue.get()
for proc in procs:
proc.join()
#for cluster_root_id in cluster_root_ids:
# root_statements = get_all_statements_of_entity(cluster_root_id)
# #print("root_statements", root_statements)
# for predicate_id in predicates_ids:
# cluster += get_statements_by_id(root_statements, cluster_root_id, predicate_id, qualifier=True, statement_type="qualifier_predicate")
# cluster += get_statements_by_id(root_statements, cluster_root_id, predicate_id, qualifier=False, statement_type="predicate")
return cluster #sorted(cluster, key=lambda x: x[0], reverse=True)
#test_predicate_clusters = cluster_extend_by_predicates_ids(extract_ids(q0_themes[0]), extract_ids(q0_predicates))
#print(len(test_predicate_clusters))
#test_predicate_clusters[0]
#test_predicate_clusters_test_2 = cluster_extend_by_predicates_ids(extract_ids(q0_themes_test_2[0]), extract_ids(q0_predicates_test_2))
#print(len(test_predicate_clusters_test_2))
#print(test_predicate_clusters_test_2[-1])
#predicate_ids_clusters = cluster_extend_by_predicates_ids(theme_ids, predicates_ids+predicates_enhanced_ids)
#print(predicate_ids_clusters)
# In[48]:
def cluster_extractor_from_complements(complements):
for c in complements:
[print(t.pos_) for t in c]
return complements
#print(cluster_extractor_from_complements(q0_themes[1]))
# In[49]:
#TODO: add cache
#TODO: Check if extending with predicate_ids is useful
# parameter
# question: nlp_string
#limits=plt.axis('off')
def build_graph(nlp, themes, themes_enhanced, predicates, deep_k=3, time_sensitive = False, cores=mp.cpu_count(), context_graph=False, previous_answer=False, aggressive=False, k_deep_followup=2, max_deepness=3):
BANNED_IDS = ["P31"]
#print("time_sensitive",time_sensitive)
#start_time = time.time()
theme_ids = extract_ids(themes[0])
theme_enhanced_ids = extract_ids(themes_enhanced)
predicates_ids = extract_ids(predicates)
predicates_enhanced_ids = [p for p in theme_enhanced_ids if is_wd_predicate(p)]
predicates_enhanced = merge_lists([get_nlp(get_wd_label(p)) for p in predicates_enhanced_ids], predicates_enhanced_ids)
previous_predicates_ids = False
previous_entities_ids = False
if context_graph:
previous_predicates_ids = [x for x,y in context_graph.nodes(data=True) if y["type"]=="predicate"]
previous_predicates_ids_filtered = []
if previous_predicates_ids:
if previous_predicates_ids[0].find("-") != -1:
for ppi in previous_predicates_ids:
previous_predicates_ids_filtered.append(ppi[:ppi.find("-")])
previous_predicates_ids = previous_predicates_ids_filtered
previous_entities_ids = [x for x,y in context_graph.nodes(data=True) if y["type"]=="entity"]
#print("before previous_predicates_ids",previous_predicates_ids)
if previous_answer:
if len(previous_answer) > 1:
previous_answer_entity_ids = [p for p in previous_answer[1] if is_wd_entity(p)]
previous_answer_predicates_ids = [p for p in previous_answer[1] if is_wd_predicate(p)]
else:
previous_answer_entity_ids = []
previous_answer_predicates_ids = []
else:
previous_answer_entity_ids = []
previous_answer_predicates_ids = []
#print("after previous_predicates_ids",previous_predicates_ids)
for i, tei in enumerate(theme_enhanced_ids):
if tei in theme_ids:
tmp = theme_enhanced_ids.pop(i)
#print("before theme_ids",theme_ids)
#print("after predicates_ids",predicates_ids)
#print("before predicates_enhanced_ids",predicates_enhanced_ids)
if context_graph:# and not aggressive:
theme_ids += previous_answer_entity_ids[:k_deep_followup]
predicates_ids = previous_answer_predicates_ids[:k_deep_followup]+predicates_ids[:k_deep_followup]
#print("build_graph - not agressive and context graph present")
#elif context_graph:
# #print("build_graph - agressive and context graph present")
# theme_ids += previous_answer_entity_ids[:k_deep_followup]
# predicates_ids = previous_answer_predicates_ids[:k_deep_followup]
theme_ids_filtered = []
[theme_ids_filtered.append(ti) for ti in theme_ids if ti not in theme_ids_filtered]
theme_ids = theme_ids_filtered
predicates_ids_filtered = []
[predicates_ids_filtered.append(ti) for ti in predicates_ids if ti not in predicates_ids_filtered]
predicates_ids = predicates_ids_filtered
predicates_names = [p[0].text for p in predicates+predicates_enhanced]
#print("BEFORE predicates_names",predicates_names)
predicates_names += [get_wd_label(p) for p in predicates_ids+predicates_enhanced_ids if get_wd_label(p) and p not in BANNED_IDS and get_wd_label(p) not in predicates_names]
#print("AFTER predicates_names",predicates_names)
#print("after theme_ids",theme_ids)
#print("after predicates_ids",predicates_ids)
#print("after predicates_enhanced_ids",predicates_enhanced_ids)
#print("[p[0].text for p in predicates+predicates_enhanced]",[p[0].text for p in predicates+predicates_enhanced])
#print("predicates_ids+predicates_enhanced_ids",predicates_ids+predicates_enhanced_ids)
init_clusters = cluster_extend_by_words(theme_ids, predicates_names, top_k=deep_k, time_sentitive=time_sensitive, cores=cores)
#print("init_clusters",len(init_clusters))
init_clusters_enhanced = cluster_extend_by_words(theme_enhanced_ids, predicates_names, top_k=deep_k, time_sentitive=time_sensitive, cores=cores)
#print("init_clusters_enhanced",len(init_clusters_enhanced))
init_sorted_statements = sort_statements_by_similarity(init_clusters + init_clusters_enhanced)
#print("init_sorted_statements",len(init_sorted_statements))
init_flatten_statements = statements_flatter([s[1] for s in init_sorted_statements])
#print("init_flatten_statements",len(init_flatten_statements))
predicate_ids_clusters = cluster_extend_by_predicates_ids(theme_ids, predicates_ids+predicates_enhanced_ids, cores=cores)
#print("predicate_ids_clusters",len(predicate_ids_clusters))
predicate_ids_enhanced_clusters = cluster_extend_by_predicates_ids(theme_enhanced_ids, predicates_ids+predicates_enhanced_ids, cores=cores)
#print("predicate_ids_enhanced_clusters",len(predicate_ids_enhanced_clusters))
predicate_ids_flatten_statements = statements_flatter(predicate_ids_clusters+predicate_ids_enhanced_clusters)
#print("predicate_ids_flatten_statements",len(predicate_ids_flatten_statements))
clusters = init_flatten_statements+predicate_ids_flatten_statements
filtered_statements = unduplicate_statements(clusters)
#print(predicate_ids_enhanced_clusters)
graph = make_statements_graph(filtered_statements, potential_predicates=predicates_ids, cores=cores, context_graph=context_graph, max_deepness=max_deepness)
#print([get_wd_label(e) for e in g.nodes] )
##print("clusters:", len(clusters))
##print("filtered_statements:", len(filtered_statements))
#end_time = time.time()
#print("->\tRunning time is {}s".format(round(end_time-start_time,2)))
return graph
#q0_test = questions[0]
#q0_test = "Which actor voiced the Unicorn in The Last Unicorn?"
#q0_test = "what was the cause of death of yves klein"
#q0_test = "Who is the wife of Barack Obama?"
#q0_test = "Who is the author of Le Petit Prince?"
#q0_test = "Who is the wife of Barack Obama?"
#q0_test = "And Alan Arkin was behind.."
#q0_nlp_test = get_nlp(q0_test)
#q0_themes_test = get_themes(q0_nlp_test, q0_test, top_k=3)
#q0_themes_enhanced_test = get_enhanced_themes(q0_themes_test, top_k=3)
#q0_predicates_test = get_predicates_online(q0_nlp_test, top_k=3)
#q0_focused_parts_test = []
#q0_context_graph = conversation_history[0][1]
#q0_test = "And Alan Arkin was behind.."
#q0_nlp_test = get_nlp(q0_test)
#q0_themes_test = ([(get_nlp("Alan Arkin"), ['Q108283'])], [get_nlp("And")])
#q0_themes_enhanced_test = [('alan', ['Q29715852'])]
#q0_predicates_test = [(get_nlp("be"), ['P31'])]
#q0_focused_parts_test = []
#q0_context_graph = conversation_history[0][1]
#
#print("q0_themes_test",q0_themes_test)
#print("q0_themes_enhanced_test",q0_themes_enhanced_test)
#print("q0_predicates_test",q0_predicates_test)
#print("q0_focused_parts_test",q0_focused_parts_test)
#print("q0_context_graph",q0_context_graph)
#q0_graph, q0_predicates_dict = build_graph(q0_nlp_test, q0_themes_test, q0_themes_enhanced_test, q0_predicates_test, deep_k=3, context_graph=q0_context_graph)
#print(q0_predicates_dict)
#plot_graph(q0_graph, "file_name_graph", "Graph_title")
#-> predicates_dict: {'P453': 11, 'P725': 10, 'P144': 2, 'P1441': 15, 'P364': 4, 'P407': 2, 'P2868': 2, 'P4675': 1, 'P585': 1, 'P166': 3, 'P735': 1, 'P1476': 1, 'P1705': 1, 'P580': 3, 'P582': 3, 'P26': 2, 'P451': 1, 'P19': 1, 'P2031': 1, 'P31': 8, 'P1552': 1, 'P642': 2, 'P175': 1, 'P577': 1, 'P27': 1, 'P1545': 1, 'P495': 1, 'P4196': 1}
# In[50]:
def filter_graph_by_names(graph, filtering_names, entities=True, predicates=False):
# remove meaningless subgraphs
graph_copy = graph.copy()
for g in [g for g in (graph.subgraph(c) for c in nx.connected_components(graph))]:
is_meaningful = False
for e in g:
if get_wd_label(e) in filtering_names:
is_meaningful = True
break
if not is_meaningful:
for e in g:
graph_copy.remove_node(e)
return graph_copy
#q_theme_names = [q[0].text for q in q_themes[0]]
#q_theme_enhanced_names = [q[0] for q in q_themes_enhanced]
#filter_graph_by_names(graph, q_theme_names+q_theme_enhanced_names, entities=True, predicates=False)
#plot_graph(graph, "subgraph_test", "subgraph_test")
# In[51]:
#TODO
def filter_graph_by_ids(graph, filtering_ids, entities=True, predicates=False):
# remove meaningless subgraphs
graph_copy = graph.copy()
for g in [g for g in (graph.subgraph(c) for c in nx.connected_components(graph))]:
is_meaningful = False
for e in g:
if e in filtering_ids:
is_meaningful = True
break
if not is_meaningful:
for e in g:
graph_copy.remove_node(e)
return graph_copy
#print("filtered_graph",filtered_graph)
#plot_graph(filtered_graph, "subgraph_test", "subgraph_test")
#plot_graph(graph, "subgraph_test", "subgraph_test")
#q_theme_ids = extract_ids(q_themes[0])
#q_theme_enhanced_ids = extract_ids(q_themes_enhanced)
#filter_graph_by_ids(graph, q_theme_ids+q_theme_enhanced_ids, entities=True, predicates=False)
# In[52]:
# check the graph for complements
# parameters
# name: string
def find_name_in_graph(graph, name):
return [x for x,y in graph.nodes(data=True) if y['name'].lower() == name.lower()]
#[find_name_in_graph(c.text) for c in q0_themes[1]]
#print(find_name_in_graph(graph, "the unicorn"))
# In[53]:
# check the graph for complements
# parameters
# name: string
def find_id_in_graph(graph, id_to_find):
return [x for x,y in graph.nodes(data=True) if x == id_to_find]
#[find_name_in_graph(c.text) for c in q0_themes[1]]
#print(find_name_in_graph(graph, "the unicorn"))
# In[54]:
# TODO: clean the complements by removing stopwords etc.
def find_theme_complement(graph, themes):
return [i for i in it.chain.from_iterable(
[id for id in [c for c in [find_name_in_graph(graph, t.text) for t in themes[1]] if c]])]
#print(find_theme_complement(graph, q0_themes_test))
#[i for i in it.chain.from_iterable([id for id in check_theme_complement(graph, q0_themes)])]
# In[55]:
def find_paths_in_graph(graph, node_start, node_end):
return [p for p in nx.all_simple_paths(graph, source=node_start, target=node_end)]
#test_paths = find_paths_in_graph(graph, "Q16205566", "Q7774795")
#print(test_paths)
# In[56]:
def is_id_in_graph(graph, node_id):
return graph.has_node(node_id)
#print(is_id_in_graph(graph, "Q24039104"))
# In[57]:
def is_name_in_graph(graph, node_name):
return find_name_in_graph(graph, node_name) != []
#print(is_name_in_graph(graph, "the Unicorn"))
# In[58]:
def find_paths_for_themes(graph, themes):
themes_ids = [t for t in extract_ids(themes[0])]
complements_ids = find_theme_complement(graph, themes)
paths = []
for t_id in themes_ids:
if is_id_in_graph(graph, t_id):
for c_id in complements_ids:
if is_id_in_graph(graph, c_id):
path = find_paths_in_graph(graph, t_id, c_id)
if path:
paths.append(path)
paths = [i for i in it.chain.from_iterable(
[id for id in paths])]
return paths
#print(find_paths_for_themes(graph, q0_themes_test))
#print(find_paths_for_themes(graph, q0_themes))
# In[59]:
def get_node_predicates_from_path(paths):
predicates = []
for p in paths:
[predicates.append(i[:i.find("-")]) for i in p if is_wd_predicate(i[:i.find("-")]) and i[:i.find("-")] not in predicates]
return predicates
#test_node_predicates = get_node_predicates_from_path(test_paths)
#print(test_node_predicates)
# In[60]:
def get_node_predicate_similarity_from_path(paths, predicates):
path_predicates = get_node_predicates_from_path(paths)
return sorted([(pp, get_similarity_by_ids(p2, pp)) for p in predicates for p2 in p[1] for pp in path_predicates], key=lambda x: x[-1], reverse=True)
#test_node_pedicate_similarities = get_node_predicate_similarity_from_path(test_paths, q0_predicates)
#print(test_node_pedicate_similarities)
# In[61]:
def get_focused_parts(nlp_sentence, themes, top_k=3, in_context=False):
W_FILTERS = ["WDT", "WP", "WP$", "WRB"]
V_FILTERS = ["VERB", "AUX"]
BANNED_WORDS = ["/"]
dummy_doc = get_nlp("dummy doc")
focused_parts = [t.head for t in nlp_sentence if t.tag_ in W_FILTERS]
for fp in focused_parts:
if fp.children:
for c in fp.children:
if c.tag_ not in W_FILTERS and c.text not in [fp.text for fp in focused_parts]:
focused_parts.append(c)
#print("focused_parts",focused_parts)
#print("themes[0]",themes[0])
for t in themes[0]:
for i_fp, fp in enumerate(focused_parts):
#print("fp",fp, type(fp))
for i_w, w in enumerate([w.lower_ for w in t[0]]):
#print("w",w, type(w))
if fp.lower_ == w:
#print("MATCHING")
if i_fp+1 < len(focused_parts):
#print("focused_parts[i_fp+1].lower_",focused_parts[i_fp+1].lower_)
#print("t[0][i_w-1].lower_",t[0][i_w-1].lower_)
if focused_parts[i_fp+1].lower_ == t[0][i_w-1].lower_:
#print(i_fp,fp, t[0][i_w-1], t[0])
#print("BEFORE focused_parts",focused_parts)
#print("t[0]",t[0])
if type(t[0]) == type(dummy_doc):
focused_parts[i_fp] = t[0][:]
else:
focused_parts[i_fp] = t[0]
del focused_parts[i_fp+1]
#print("AFTER focused_parts",focused_parts)
focused_parts = [fp for fp in focused_parts if fp.text not in BANNED_WORDS]
#print()
#for fp in focused_parts:
# print(type(fp))
#
# print(fp.as_doc())
#if isinstance() == 'spacy.tokens.span.Span':
# print("in")
#
#focused_parts = [type(fp) for fp in focused_parts]
#print("focused_parts",focused_parts)
if in_context:
focused_parts_ids = []
for p in focused_parts:
is_in = False
for t in themes[0]:
if p.text == t[0].text:
focused_parts_ids.append(t[1])
is_in = True
break
if not is_in:
focused_parts_ids.append(get_wd_ids(p.text, top_k=top_k))
#print("focused_parts",focused_parts)
#print("focused_parts_ids",focused_parts_ids)
else:
focused_parts_ids = [get_wd_ids(p.text, top_k=top_k) for p in focused_parts]
#print("focused_parts",focused_parts)
#print("focused_parts_ids",focused_parts_ids)
#focused_parts_ids = [get_wd_ids(p.text, top_k=top_k, online=True) for p in focused_parts]
#print("focused_parts_ids",focused_parts_ids)
merged_list = merge_lists(focused_parts, focused_parts_ids)
#print("merged_list",merged_list)
dummy_span = dummy_doc[:]
merged_list_filtered = []
for ml in merged_list:
if ml[1]:
if type(ml[0]) == type(dummy_span):
merged_list_filtered.append(ml)
elif ml[0].pos_ not in V_FILTERS and not ml[0].is_stop:
merged_list_filtered.append(ml)
return merged_list_filtered
#q_test_nlp = get_nlp("what's akbar tandjung's ethnicity")
#print(get_focused_parts(q0_nlp_test))
#q_test_nlp = get_nlp("Who voiced the Unicorn in The Last Unicorn?")
#print(get_focused_parts(q0_nlp_test))
#q_test_question = "Who is the author that wrote the book Moby Dick"
##q_test_question = "And Alan Arrkin voice actor character role behind"
#q_test_nlp = get_nlp(q_test_question)
#q_test_themes = get_themes(q_test_nlp,q_test_question, top_k=3)
#get_focused_parts(q_test_nlp,q_test_themes, top_k=3)
#q_test_nlp = get_nlp("Where was Shigeyasu Suzuki Place of Birth")
#q_test_nlp = get_nlp("Who is the author that wrote the book Moby Dick")
#q_test_nlp = get_nlp("Where was Shigeyasu Suzuki Place of Birth")
#q_test_themes = get_themes(q_test_nlp, top_k=3)
#get_focused_parts(q_test_nlp,q_test_themes, top_k=3)
#q_focused_parts: [(Unicorn, ['Q18356448', 'Q21070472', 'Q22043340', 'Q1565614', 'Q30060419']),
#(in, ['P642', 'Q29733109', 'P361', 'P131']),
#(the, ['Q1408543', 'Q2865743', 'Q29423', 'Q21121474']),
#(Unicorn, ['Q18356448', 'Q21070472', 'Q22043340', 'Q1565614', 'Q30060419']),
#(The, ['Q1067527', 'Q13423400', 'Q28457426', 'Q24406786', 'Q2430521', 'Q37199001']),
#(Last, ['Q16995904', 'Q20072822', 'Q24229340', 'Q20155285'])]
#[(author, ['P676', 'Q482980', 'Q3154968']),
# (book, ['Q571', 'Q4942925', 'Q997698']),
# (Dick, ['Q1471500', 'Q21510351', 'Q249606']),
# (Moby, ['Q1954726', 'Q6887412', 'Q14045'])]
# In[62]:
def is_in_list_by_similarity(word,list_of_words,similarity_threshold):
nlp_word = get_nlp(word)
for lw in list_of_words:
if nlp_word.similarity(get_nlp(lw)) > similarity_threshold:
return True
return False
#is_in_list_by_similarity("Moby Dick", ["moby-dick","star wars"],0.9)
# In[63]:
def add_compound(nlp_list, themes):
compounded = []
#if not nlp_list[0]:
# return compounded
try:
for t in [e[0] for e in themes[0]] + themes[1]:
for l in [n[0] for n in nlp_list]:
if l.text.lower() in t.text.lower():
compounded.append(t.text)
return compounded
except:
return compounded
# TODO: make the predicate search go further in the path list for the !i%2
def find_paths_keywords(graph, nlp, themes, themes_enhanced, predicates, focused_parts, keywords_len_limit=5, similarity_threshold=0.9):
WH_FILTER = ["WDT", "WP", "WP$", "WRB"]
VERB_FILTER = ["VERB", "AUX"]
NOUN_FILTER = ["NOUN","PROPN"]
POSITION_FILTER = ["ADP"]
#[(voiced, ['P725']), (Last, ['P5017'])]
predicates_ids = extract_ids(predicates)#[""]
#print("find_paths_keywords - predicates_ids",predicates_ids)
focused_parts_words = [t[0].text for t in focused_parts]
focused_parts_ids = [j for i in [t[1] for t in focused_parts] for j in i]
focused_parts_predicates_ids = [f for f in focused_parts_ids if is_wd_predicate(f)]
focused_parts_words_ids = [f for f in focused_parts_ids if is_wd_entity(f)]
focused_parts_words_ids_labeled = [get_wd_label(p) for p in focused_parts_words_ids]
#print(focused_parts_words_2)
question_anchors = [t for t in nlp if t.tag_ in WH_FILTER]
themes_enhanced_list = [t[0] for t in themes_enhanced]
focus_themes = [t[0].text for t in themes[0]]
focus_path_by_tails = [[c for c in t.head.children if c.pos_ in NOUN_FILTER] for t in nlp if t.pos_ == "PRON"]
focus_part_by_head = [t.head for t in question_anchors]
predicates_nlp = [t for t in nlp if t.pos_ in VERB_FILTER]
predicates_lemma = [t.lemma_ for t in predicates_nlp]
predicates_attention = [t for t in nlp if t.head in predicates_nlp]
predicates_attention_tails = [[c for c in t.children] for t in predicates_attention]
in_attention_heads = [t.head.text for t in nlp if t.pos_ in POSITION_FILTER]
in_attention_tails = add_compound([[c for c in t.children] for t in nlp if t.pos_ in POSITION_FILTER], themes)
focus_themes_enhanced = [t[0] for t in themes_enhanced
if t[0].lower() in [a.lower() for a in in_attention_tails]
or t[0].lower() in [a.lower() for a in in_attention_heads]]
theme_enhanced_ids = extract_ids(themes_enhanced)
predicates_enhanced_ids = predicates_ids+[(p) for p in theme_enhanced_ids if is_wd_predicate(p)]
[predicates_enhanced_ids.append(p) for p in focused_parts_predicates_ids if p not in predicates_enhanced_ids]
predicates_enhanced_ids_filtered = []
[predicates_enhanced_ids_filtered.append(p) for p in predicates_enhanced_ids if p not in predicates_enhanced_ids_filtered]
#print("predicates_enhanced_ids_filtered",predicates_enhanced_ids_filtered)
alterniative_words = {}
for t in themes_enhanced:
for e in predicates_enhanced_ids_filtered:
if e in t[1]:
alterniative_words[t[0]] = [get_nlp(get_wd_label(e)),[e]]
else:
alterniative_words[get_wd_label(e)] = [get_nlp(get_wd_label(e)),[e]]
#print("focused_parts_predicates_ids",focused_parts_predicates_ids)
#print("focused_parts_words_ids",focused_parts_words_ids)
#print("alterniative_words",alterniative_words)
#print("predicates_enhanced_ids",predicates_enhanced_ids)
##print("predicates_enhanced",predicates_enhanced)
#print("question_anchors",question_anchors)
#print("in_attention_heads",in_attention_heads)
#print("in_attention_tails",in_attention_tails)
#print("focus_themes",focus_themes)
#print("themes_enhanced_list",themes_enhanced_list)
#print("focus_themes_enhanced",focus_themes_enhanced)
#print("focus_path_by_tails",focus_path_by_tails)
#print("focus_part_by_head",focus_part_by_head)
#print("predicates_nlp",predicates_nlp)
#print("predicates_lemma",predicates_lemma)
#print("predicates_attention",predicates_attention)
#print("predicates_attention_tails",predicates_attention_tails)
#
#print("\n")
paths_keywords = []
[paths_keywords.append(e.lower()) for e in focused_parts_words + in_attention_heads + in_attention_tails + focus_themes + focus_themes_enhanced + focused_parts_words_ids_labeled if e.lower() not in paths_keywords]
#print(paths_keywords)
#paths_keywords = [p for p in it.permutations(paths_keywords)]
#print(paths_keywords)
paths_keywords = [p for p in paths_keywords if p and len(p.split(" ")) <= keywords_len_limit]
paths_keywords_filtered = []
#print("paths_keywords",paths_keywords)
#for k in paths_keywords:
# print("current k",k)
# #print("paths_keywords_filtered before",paths_keywords_filtered)
# is_in_list_by_similarity(k, paths_keywords_filtered,similarity_threshold)
[paths_keywords_filtered.append(k) for k in paths_keywords if not is_in_list_by_similarity(k, paths_keywords_filtered,similarity_threshold)]
#is_in_list_by_similarity("Moby Dick", ["moby-dick","star wars"],0.9)
return paths_keywords_filtered, alterniative_words, question_anchors
#initial_paths = find_paths_for_themes(graph, themes)
#predicate_id_similarities = get_node_predicate_similarity_from_path(initial_paths, predicates)
#best_path = [p for p in initial_paths if predicate_id_similarities[0][0] == p[1][:p[1].find("-")]]
#path_answer = get_wd_label(best_path[0][2]) if best_path else []
#return (path_answer, best_path[0][2]) if path_answer else (False, False)
#paths_keywords_2 = find_paths_keywords(graph_2, q_nlp_2, q_themes_2, q_themes_enhanced_2, q_predicates_2, q_focused_parts_2)
#paths_keywords_2
# In[64]:
def get_all_simple_paths_worker(graph, in_mp_queue, out_mp_queue):
found_paths = []
sentinel = None
for source, target in iter(in_mp_queue.get, sentinel):
for path in nx.all_simple_paths(graph, source = source, target = target, cutoff = None):
found_paths.append(path)
out_mp_queue.put(found_paths)
# In[65]:
def get_keywords_nodes_worker(graph, threshold, in_mp_queue, out_mp_queue):
keywords_nodes = []
sentinel = None
for name in iter(in_mp_queue.get, sentinel):
#print("2 get_paths_keywords_nodes")
nlp_lookup = get_nlp(name)
keywords_nodes = [x for x,y in graph.nodes(data=True) if get_nlp(y['name']).similarity(nlp_lookup) >= threshold]
out_mp_queue.put(keywords_nodes)
# In[66]:
def do_nothing(perm):
return perm
def get_paths_keywords_nodes(graph, keywords,threshold=0.9,top_performance=50, cores=mp.cpu_count()):
#print("1 get_paths_keywords_nodes")
if cores <= 0: cores = 1
sentinel = None
out_mp_queue = mp.Queue()
in_mp_queue = mp.Queue()
for k in keywords:
in_mp_queue.put(k)
procs = [mp.Process(target = get_keywords_nodes_worker, args = (graph, threshold, in_mp_queue, out_mp_queue)) for i in range(cores)]
keywords_nodes = []
for proc in procs:
proc.daemon = True
proc.start()
for proc in procs:
in_mp_queue.put(sentinel)
for proc in procs:
keywords_nodes.append(out_mp_queue.get())
for proc in procs:
proc.join()
keywords_nodes = [k for k in keywords_nodes if k]
#print("3 get_paths_keywords_nodes")
#print("1 get_paths_keywords_nodes")
#keywords_nodes = []
#print("len(keywords)",len(keywords))
#for k in keywords:
# nlp_lookup = get_nlp(k)
# keywords_nodes.append([x for x,y in graph.nodes(data=True) if get_nlp(y['name']).similarity(nlp_lookup) >= threshold])
# print("2 get_paths_keywords_nodes")
#keywords_nodes = [k for k in keywords_nodes if k]
#print("3 get_paths_keywords_nodes")
#keywords_nodes [['Q17521117', 'Q17521118', 'Q557214', 'Q421946', 'Q11282976', 'Q4677712', 'Q33999'], ['Q7246', 'Q1307944', 'Q21070472', 'Q18356448', 'Q1863113', 'Q20983877', 'Q226755', 'Q22043340'], ['Q176198', 'Q967268', 'Q17553756', 'Q30060419', 'Q17985004', 'Q16614390', 'Q18647334', 'Q15628943'], ['Q176198', 'Q967268', 'Q17553756', 'Q30060419', 'Q17985004', 'Q16614390', 'Q18647334', 'Q15628943'], []]
#keywords_nodes[0] ['Q17521117', 'Q17521118', 'Q557214', 'Q421946', 'Q11282976', 'Q4677712', 'Q33999']
#keywords_nodes[1] ['Q7246', 'Q1307944', 'Q21070472', 'Q18356448', 'Q1863113', 'Q20983877', 'Q226755', 'Q22043340']
keywords_nodes_per = []
if keywords_nodes:
#print("4 get_paths_keywords_nodes")
if len(keywords_nodes) > 1:
#print("5 get_paths_keywords_nodes")
for kn_i, kn in enumerate(keywords_nodes):
#print("6 get_paths_keywords_nodes")
if kn_i + 1 < len(keywords_nodes):
if len(kn) * len(keywords_nodes[kn_i+1]) > top_performance:
if len(kn) <= int(sqrt(top_performance)):
keywords_nodes[kn_i+1] = keywords_nodes[kn_i+1][:int(top_performance/len(kn))]
elif len(kn) >= len(keywords_nodes[kn_i+1]):
kn = kn[:int(top_performance/len(keywords_nodes[kn_i+1]))]
else:
kn = kn[:int(sqrt(top_performance))]
keywords_nodes[kn_i+1] = keywords_nodes[kn_i+1][:int(sqrt(top_performance))]
#print("7 get_paths_keywords_nodes")
#print("8 get_paths_keywords_nodes")
with mp.Pool() as pool:
keywords_nodes_per = pool.map(do_nothing, it.permutations(keywords_nodes, 2))
#print("9 get_paths_keywords_nodes")
keywords_nodes_per = [p for p in keywords_nodes_per]
#print(">1 len(keywords_nodes_per",len(keywords_nodes_per),keywords_nodes_per[0])
else:
keywords_nodes_per = [(keywords_nodes+keywords_nodes)]
#print("<1 len(keywords_nodes_per)",len(keywords_nodes_per),keywords_nodes_per[0])
#print("keywords_nodes_per",keywords_nodes_per)
#return 0
paths_keyword_nodes = []
targets = []
sources = []
for pkn in keywords_nodes_per:
[sources.append(pkn0) for pkn0 in pkn[0] if pkn0 not in sources]
[targets.append(pkn1) for pkn1 in pkn[1] if pkn1 not in targets]# and pkn1 not in pkn[0]]
#print("len(targets)",len(targets))
#print("len(sources)",len(sources))
out_mp_queue = mp.Queue()
in_mp_queue = mp.Queue()
sentinel = None
for source, target in it.product(sources, targets):
in_mp_queue.put((source, target))
procs = [mp.Process(target = get_all_simple_paths_worker, args = (graph, in_mp_queue, out_mp_queue)) for i in range(cores)]
for proc in procs:
proc.daemon = True
proc.start()
for proc in procs:
in_mp_queue.put(sentinel)
for proc in procs:
paths_keyword_nodes.extend(out_mp_queue.get())
for proc in procs:
proc.join()
paths_keyword_nodes = [p for p in paths_keyword_nodes if p]
#paths_keyword_nodes_filtered = []
#[paths_keyword_nodes_filtered.append(p) for p in paths_keyword_nodes if p not in paths_keyword_nodes_filtered]
#print("len(paths_keyword_nodes)",len(paths_keyword_nodes))
return paths_keyword_nodes
def find_path_nodes_from_graph(nlp_question, graph, predicates_dict, keywords, threshold=0.9,special_pred_theshold=0.7, thres_inter=0.15, top_performance=50,min_paths=3000, cores=mp.cpu_count()):
#print("current threshold", str(round(threshold, 1)))
w_positions, w_names = w_converter(nlp_question)
w_names_only = [wn[1] for wn in w_names]
date_trigger = "date" in w_names_only
location_trigger = "location" in w_names_only
#location_trigger = "person" in w_names_only
all_predicates = list(predicates_dict.keys())
option_keywords = []
if date_trigger:
nlp_time = get_nlp("time")
nlp_date = get_nlp("date")
for p in all_predicates:
#print("current p", p)
p_label = get_wd_label(p)
nlp_p = get_nlp(p_label)
#print("nlp_p",nlp_p)
p_date = nlp_p.similarity(nlp_date)
p_time = nlp_p.similarity(nlp_time)
#print("p_date",p_date)
#print("p_time",p_time)
if p_date > special_pred_theshold or p_time > special_pred_theshold:
if p not in option_keywords:
#print("adding",p)
option_keywords.append(p_label)
if location_trigger:
nlp_location = get_nlp("location")
nlp_place = get_nlp("place")
for p in all_predicates:
#print("current p", p)
p_label = get_wd_label(p)
nlp_p = get_nlp(p_label)
#print("nlp_p",nlp_p)
p_location = nlp_p.similarity(nlp_location)
p_place = nlp_p.similarity(nlp_place)
#print(p_label, "p_location",p_location)
#print(p_label, "p_place",p_place)
if p_location > special_pred_theshold or p_place > special_pred_theshold:
if p not in option_keywords:
#print("adding",p)
option_keywords.append(p_label)
for k in keywords[0]:
nlp_k = get_nlp(get_wd_label(k))
for p in all_predicates:
#print("current p", p)
p_label = get_wd_label(p)
nlp_p = get_nlp(p_label)
#print("nlp_p",nlp_p)
p_k_sim = nlp_p.similarity(nlp_k)
if p_k_sim > special_pred_theshold:
if p not in option_keywords:
option_keywords.append(p_label)
#print("keywords[1]",keywords[1])
k1_predicates = keywords[1].values() #[[country of citizenship, ['P27']], [country of citizenship, ['P27']]]
#print("k1_predicates",k1_predicates)
k1_predicates = sum([[get_wd_label(p) for p in e[1]] for e in k1_predicates],[])
#print("k1_predicates",k1_predicates)
#print("option_keywords",option_keywords)
all_keywords = []
[all_keywords.append(k) for k in keywords[0] + option_keywords + k1_predicates if k and k not in all_keywords]
#print("all_keywords",all_keywords)
main_keyword_paths = get_paths_keywords_nodes(graph, all_keywords,threshold=threshold,top_performance=top_performance, cores=cores)
alternative_keyword_paths = []
#for k_1 in keywords[1]:
# for i, k_0 in enumerate(all_keywords):
# if k_1==k_0:
# tmp_keywords = all_keywords.copy()
# tmp_keywords[i] = keywords[1][k_1][0].text
# alternative_keyword_paths += get_paths_keywords_nodes(graph, all_keywords,threshold=threshold,top_performance=top_performance, cores=cores)
keyword_paths = main_keyword_paths#+alternative_keyword_paths
#print("BEFORE len(keyword_paths)",len(keyword_paths))
keyword_paths_filtered=[]
[keyword_paths_filtered.append(p) for p in keyword_paths if p not in keyword_paths_filtered]
keyword_paths = keyword_paths_filtered
#print("keyword_paths",len(keyword_paths))
#print("len(keyword_paths)",len(keyword_paths))
if len(keyword_paths) < min_paths:
if threshold == 0: return keyword_paths
threshold -= thres_inter
if threshold < 0: threshold = 0
keyword_paths = get_paths_keywords_nodes(graph, all_keywords,threshold=threshold,top_performance=top_performance, cores=cores)
keyword_paths_filtered=[]
[keyword_paths_filtered.append(p) for p in keyword_paths if p not in keyword_paths_filtered]
keyword_paths = keyword_paths_filtered
#keyword_paths_filtered = []
#print("AFTER len(keyword_paths)",len(keyword_paths))
#[keyword_paths_filtered.append(p) for p in keyword_paths if p not in keyword_paths_filtered]
return keyword_paths
#path_nodes_2 = find_path_nodes_from_graph(nlp_question,graph_2, predicates_dict,paths_keywords_2, threshold=0.9, special_pred_theshold=0.7, thres_inter=0.15, top_performance=50, min_paths=3000)
#end_time = time.time()
#start_time = time.time()
#path_nodes = find_path_nodes_from_graph(nlp_question,graph, predicates_dict,paths_keywords, threshold=0.8, thres_inter=0.1, top_performance=graph.size(),min_paths=3000,cores=2)
#print("--> len(path_nodes):",len(path_nodes))
#print("Finding path nodes ->\tRunning time is {}s".format(round(time.time()-start_time,2)))
# In[67]:
def is_sublist(a, b):
if not a: return True
if not b: return False
#if a == b: return False
return b[:len(a)] == a or is_sublist(a, b[1:])
def paths_nodes_filter_is_sublist_worker(in_mp_queue, out_mp_queue, filtered_paths):
found_paths = []
sentinel = None
#print("HI I AM A PROCESSOR", filtered_paths)
for i, fp in iter(in_mp_queue.get, sentinel):
for fp_2 in filtered_paths:
#print("will process",i,fp,fp_2)
if (is_sublist(fp, fp_2) and fp!=fp_2):
#print("processed",i,fp,fp_2)
out_mp_queue.put(i)
break
out_mp_queue.put(False)
#print("I AM TERMINATED")
# In[68]:
#node_predicates_names_2 = get_node_predicates_from_path(path_nodes_2)
def paths_nodes_filter(path_nodes, graph, cores=mp.cpu_count(), with_sublists=True):
filtered_paths = []
for path in path_nodes:
filtered_row = []
for i,p in enumerate(path):
if is_wd_predicate(p[:p.find("-")]):
if i == 0:
#if p[:p.find("-")] == "P725":
# print(p)
neighbor = [k for k in graph[p].keys() if k != path[i+1]]
if neighbor:
filtered_row.append(neighbor[0])
filtered_row.append(p[:p.find("-")])
else:
continue
#print(filtered_row)
elif i > 0 and i < len(path)-1:
filtered_row.append(p[:p.find("-")])
else:
neighbor = [k for k in graph[p].keys() if k != path[i-1]]
if neighbor:
filtered_row.append(p[:p.find("-")])
filtered_row.append(neighbor[0])
else:
continue
else: filtered_row.append(p)
#print("filtered_paths",filtered_paths)
if len(filtered_row) > 1 and filtered_row not in filtered_paths:
filtered_paths.append(filtered_row)
if with_sublists:
if cores <= 0: cores = 1
out_mp_queue = mp.Queue()
in_mp_queue = mp.Queue()
sentinel = None
for i,fp in enumerate(filtered_paths):
in_mp_queue.put((i, fp))
procs = [mp.Process(target = paths_nodes_filter_is_sublist_worker, args = (in_mp_queue, out_mp_queue, filtered_paths)) for i in range(cores)]
to_remove_idexes = []
for proc in procs:
proc.daemon = True
proc.start()
for proc in procs:
in_mp_queue.put(sentinel)
for proc in procs:
to_remove_idexes.append(out_mp_queue.get())
for proc in procs:
proc.join(1)
#print("to_remove_idexes",to_remove_idexes)
for tri in to_remove_idexes:
if tri:
filtered_paths[tri] = []
unique_paths = [p for p in filtered_paths if p]
unique_paths_with_reversed = []
for up in unique_paths:
reversed_up = list(reversed(up))
if up not in unique_paths_with_reversed:
unique_paths_with_reversed.append(up)
if reversed_up not in unique_paths_with_reversed:
unique_paths_with_reversed.append(reversed_up)
#print("unique_paths",len(unique_paths))
#for i, up in enumerate(unique_paths):
# for up_2 in unique_paths:
# if (list(reversed(up)) == up_2):
# unique_paths[i] = []
# break
#cleaned_paths = []
#unique_paths = [up for up in unique_paths if up]
#for up in unique_paths:
# for i,e in enumerate(up):
# if not is_wd_predicate(e):
# for j,r in enumerate(list(reversed(up))):
# if not is_wd_predicate(r):
# cleaned_paths.append(up[i:-j])
# break
#print("cleaned_paths",len(cleaned_paths))
#cleaned_paths = [c for c in cleaned_paths if len(c) > 2]
#unique_paths = cleaned_paths.copy()
#for i,fp in enumerate(cleaned_paths):
# for fp_2 in cleaned_paths:
# if (is_sublist(fp, fp_2) and fp!=fp_2):
# unique_paths[i] = []
# break
#unique_paths = [p for p in unique_paths if len(p) > 2]
#for i, up in enumerate(unique_paths):
# for up_2 in unique_paths:
# if (list(reversed(up)) == up_2):
# unique_paths[i] = []
# break
#print(up)
#[up for up in unique_paths if up and not is_wd_predicate(up[-1]) and not is_wd_predicate(up[0])]
#print()
#for up in unique_paths:
# print(up)
# break
# return []
else:
unique_paths_with_reversed = filtered_paths
return [p for p in unique_paths_with_reversed if len(p) > 2] #False#[up for up in unique_paths if up and not is_wd_predicate(up[-1]) and not is_wd_predicate(up[0])]#False# [p for p in unique_paths if p]
#paths_nodes_filtered_2 = paths_nodes_filter(path_nodes_2, graph_2)
#print("unique_paths", len(paths_nodes_filtered_2))
#for p in paths_nodes_filtered_2:
# print(p)
# In[69]:
def w_converter(nlp, focused_parts=False):
w_positions = []
w_names = []
who_like = ["who", "whom"]
for i_q,q in enumerate(nlp):
if q.lemma_ == "where":
w_positions.append((i_q))
w_names.append((i_q,"location"))
elif q.lemma_ == "when":
w_positions.append((i_q))
w_names.append((i_q,"date"))
elif q.lemma_ in who_like:
w_positions.append((i_q))
w_names.append((i_q,"person"))
elif q.lemma_ == "why":
w_positions.append((i_q))
w_names.append((i_q,"cause"))
elif q.lemma_ == "which":
w_positions.append((i_q))
w_names.append((i_q,"which"))
elif q.lemma_ == "what":
w_positions.append((i_q))
w_names.append((i_q,"what"))
elif i_q+1 < len(nlp) and q.lemma_ == "how" and (nlp[i_q+1].lemma_ == "much" or nlp[i_q+1].lemma_ == "many"):
w_positions.append((i_q))
w_names.append((i_q,"quantity"))
if not w_positions and focused_parts:
for fp in focused_parts:
w_positions.append((nlp.text.find(fp[0].text)))
w_names.append((w_positions[-1],fp[0].lower_))
elif not w_positions:
w_positions.append((0))
w_names.append((0,""))
return w_positions, w_names
# In[70]:
def get_entity_similarity(word_id, entity_type, banned_labels=[], max_reward=2.0):
LOCATION_FILTER = ["GPE", "FAC", "LOC","PERSON"]
PERSON_FILTER = ["PERSON","NORP","ORG","PER"]
DATE_FILTER = ["DATE","TIME"]
CAUSE_FILTER = ["NORP","PRODUCT","EVENT","MISC"]
WHICH_FILTER = PERSON_FILTER+DATE_FILTER+["GPE","LOC","PRODUCT","EVENT",
"WORK_OF_ART","LAW","LANGUAGE","MISC"]
WHAT_FILTER = LOCATION_FILTER+DATE_FILTER+CAUSE_FILTER+PERSON_FILTER+["WORK_OF_ART","LAW","LANGUAGE"]
QUANTITY_FILTER = ["PERCENT", "MONEY", "QUANTITY", "ORDINAL", "CARDINAL"]
ALL_FILTER = LOCATION_FILTER + PERSON_FILTER + DATE_FILTER + CAUSE_FILTER + WHICH_FILTER + WHAT_FILTER + QUANTITY_FILTER
similarities = []
word_label = get_wd_label(word_id)
is_banned_label = word_label.lower() in banned_labels
if word_label == "" and not is_timestamp(word_id):
return similarities
word_ents = get_kb_ents(word_label)
#print(word_id,word_label,entity_type,[e.label_ for e in word_ents])
#if is_timestamp(word_id):
#print("is_timestamp")
#print("word_ents",word_ents)
if is_timestamp(word_id) and entity_type == "date" and not is_banned_label:
similarities.append(max_reward)
#print("in the condition", word_id, entity_type, similarities)
elif word_ents and not is_banned_label:
for ent in word_ents:
if (entity_type == "all" and ent.label_ in ALL_FILTER):
similarities.append(max_reward)
elif ent.kb_id_ == word_id:
if entity_type == "location" and ent.label_ in LOCATION_FILTER:
similarities.append(max_reward)
elif entity_type == "person" and ent.label_ in PERSON_FILTER:
similarities.append(max_reward)
elif entity_type == "date" and (ent.label_ in DATE_FILTER):
similarities.append(max_reward)
elif entity_type == "cause" and ent.label_ in CAUSE_FILTER:
similarities.append(max_reward)
elif entity_type == "which" and ent.label_ in WHICH_FILTER:
similarities.append(max_reward)
elif entity_type == "what" and ent.label_ in WHAT_FILTER:
similarities.append(max_reward)
elif entity_type == "quantity" and ent.label_ in QUANTITY_FILTER:
similarities.append(max_reward)
else:
similarities.append(get_similarity_by_words(get_nlp(word_label),get_nlp(entity_type)))
else: similarities.append(get_similarity_by_words(get_nlp(word_label),get_nlp(entity_type)))
else:
similarities.append(get_similarity_by_words(get_nlp(word_label),get_nlp(entity_type)))
#print("get_entity_similarity:",word_label, entity_type, similarities)
return similarities
#get_entity_similarity("place of birth", "location", [], max_reward=2)
# In[71]:
def get_hypothesises(nlp, predicates_dict, predicates, themes, paths_keywords, filtered_paths, threshold=0.5, special_pred_theshold=0.7, max_reward=2.0, in_context=False):#, themes, themes_enhanced):
#print(themes) #([(Alan Arkin, ['Q108283'])], [And])
w_positions, w_names = w_converter(nlp)
w_names_only = [wn[1] for wn in w_names]
#print("w_positions",w_positions)
#print("w_names",w_names)
#print("w_names_only",w_names_only)
date_trigger = "date" in w_names_only
#person_trigger = "person" in w_names_only
#print("date_trigger",date_trigger)
#print("person_trigger",person_trigger)
BANNED_PREDICATATE_IDS = ["P31"]
BANNED_PREDICATATE_NAMES = [get_wd_label(bpi) for bpi in BANNED_PREDICATATE_IDS]
#print("BANNED_PREDICATATE_NAMES",BANNED_PREDICATATE_NAMES)
complementary_predicates = paths_keywords[0]+[p[0] for p in list(paths_keywords[1].values())]
nlp_time = get_nlp("time")
nlp_date = get_nlp("date")
#locate positions
anchors_positions = []
anchors_focuses = []
#keywords_positions = []
#predicates_positions = []
theme_keywords = [t[0] for t in themes[0]]
#print("IN HYPOTHESIS: theme_keywords",theme_keywords)
theme_ids = sum([t[1] for t in themes[0]],[])
#print("IN HYPOTHESIS: theme_ids",theme_ids)
if in_context:
filtered_paths_filtered = []
if len(theme_ids)>=2: min_counter = 2
else: min_counter = 1
for fp in filtered_paths:
current_counter = 0
for e in fp:
if e in theme_ids and current_counter<min_counter:
current_counter+=1
if current_counter>=min_counter:
filtered_paths_filtered.append(fp)
break
#print("len(filtered_paths_filtered)",len(filtered_paths_filtered))
filtered_paths = filtered_paths_filtered
predicate_ids = sum([p[1] for p in predicates if p[1]],[])
predicate_names = [get_nlp(p[0].text) for p in predicates]
#print("predicate_ids",predicate_ids)
#print("predicate_names",predicate_names)
if paths_keywords[2]:
[anchors_positions.append(i) for i, w in enumerate(nlp) if w in paths_keywords[2]]
#anchors_childrens
for p in anchors_positions:
children = [c for c in nlp[p].children]
if children == []:
children = [c for c in nlp[p].head.children]
else:
if nlp[p].head:
children.append(nlp[p].head)
anchors_focuses += ([c for c in children
if c not in [nlp[a] for a in anchors_positions]
and c.pos_ != "PUNCT"])
if not anchors_focuses:
anchors_focuses = [nlp[p].head]
anchors_focuses += complementary_predicates
#print("\nanchors_focuses",anchors_focuses)
else:
anchors_positions = w_positions
anchors_focuses = [tk.lower_ for tk in theme_keywords]
anchors_focuses += complementary_predicates
#print("\nanchors_positions:",anchors_positions)
anchors_focuses_filtered = []
for af in anchors_focuses:
if isinstance(af, str):
anchors_focuses_filtered.append(af)
else:
anchors_focuses_filtered.append(af.text)
anchors_focuses = []
[anchors_focuses.append(af) for af in anchors_focuses_filtered if af not in anchors_focuses and af and af not in BANNED_PREDICATATE_NAMES]
#print("\nanchors_focuses",anchors_focuses)
#find anchor position in paths
anchors_predicates = []
main_predicate_ids = []
main_predicate_names = []
[main_predicate_ids.append(p) for p in predicate_ids+sum([p[1] for p in list(paths_keywords[1].values())],[]) if p not in main_predicate_ids]
#print("paths_keywords[1]",paths_keywords[1])
#print("main_predicate_ids",main_predicate_ids)
#print("[p[0] for p in list(paths_keywords[1].values())]",[p[0].text for p in list(paths_keywords[1].values())])
[main_predicate_names.append(p) for p in predicate_names+[get_nlp(p[0].text) for p in list(paths_keywords[1].values())] if p not in main_predicate_names]
#print("paths_keywords[1]",paths_keywords[1])
#print("main_predicate_names",main_predicate_names)
#
#return 0
for p in filtered_paths:
p_len = len(p)
for i_e, e in enumerate(p):
if is_wd_predicate(e):
#print("predicate",e)
if main_predicate_ids:
if e in main_predicate_ids and e not in BANNED_PREDICATATE_IDS:
if e not in [ap[0] for ap in anchors_predicates]:
if date_trigger:
time_similarity = get_similarity_by_words(get_nlp(get_wd_label(e)),nlp_time)
date_similarity = get_similarity_by_words(get_nlp(get_wd_label(e)),nlp_date)
#print("main_predicate_ids", e, "time_similarity",time_similarity)
#print("main_predicate_ids", e, "date_similarity",date_similarity)
if time_similarity > date_similarity:
anchors_predicates.append((e, time_similarity))
else: anchors_predicates.append((e, date_similarity))
else:
anchors_predicates.append((e, max_reward))
elif e not in [ap[0] for ap in anchors_predicates]:
stat_count = 0
stat_current = 0
for pn in main_predicate_names:
stat_current += get_similarity_by_words(get_nlp(get_wd_label(e)),pn)
stat_count += 1
for pi in main_predicate_ids:
stat_current += get_similarity_by_words(get_nlp(get_wd_label(e)),get_nlp(get_wd_label(pi)))
stat_count += 1
if date_trigger:
time_similarity = get_similarity_by_words(get_nlp(get_wd_label(e)),nlp_time)
date_similarity = get_similarity_by_words(get_nlp(get_wd_label(e)),nlp_date)
#print("if main_pred -> date_trigger -> elif e not",e)
#print("time_similarity",time_similarity)
#print("date_similarity",date_similarity)
if time_similarity > special_pred_theshold or date_similarity > special_pred_theshold:
if stat_count > 1:
stat_count -= 1
else: stat_count += 1
if time_similarity > date_similarity:
anchors_predicates.append((e, time_similarity))
else: anchors_predicates.append((e, date_similarity))
anchors_predicates.append((e, stat_current/stat_count))
elif e not in [ap[0] for ap in anchors_predicates]:
stat_count = 0
stat_current = 0
for af in anchors_focuses:
stat_current += get_similarity_by_words(get_nlp(get_wd_label(e)),get_nlp(af))
stat_count += 1
if date_trigger:
time_similarity = get_similarity_by_words(get_nlp(get_wd_label(e)),nlp_time)
date_similarity = get_similarity_by_words(get_nlp(get_wd_label(e)),nlp_date)
#print("if not main_pred -> date_trigger -> elif e not",e)
#print("time_similarity",time_similarity)
#print("date_similarity",date_similarity)
if time_similarity > special_pred_theshold or date_similarity > special_pred_theshold:
if stat_count > 1:
stat_count -= 1
else: stat_count += 1
if time_similarity > date_similarity:
anchors_predicates.append((e, time_similarity))
else: anchors_predicates.append((e, time_similarity))
anchors_predicates.append((e, stat_current/stat_count))
#print("filtered_paths",filtered_paths)
#for p in filtered_paths:
# for af in anchors_focuses:
# #print(af, p)
# for e in p:
# #print(af,get_wd_label(e))
# if is_wd_predicate(e):# and e not in [ap[0] for ap in anchors_predicates]:
# #print(af,get_wd_label(e))
# anchors_predicates.append([e, get_similarity_by_words(get_nlp(get_wd_label(e)),get_nlp(af))])
#print("\nanchors_predicates",anchors_predicates)
anchors_predicates_filtered = []
[anchors_predicates_filtered.append(ap) for ap in anchors_predicates if ap not in anchors_predicates_filtered]
#print("\nanchors_predicates_filtered",anchors_predicates_filtered)
anchors_predicates = [a for a in sorted(anchors_predicates_filtered, key=lambda x: x[-1], reverse=True) if a[1] > threshold]
for thres in [e/100 for e in reversed(range(10, int(threshold*100)+10, 10))]:
#print("anchors_predicates current thres",thres)
anchors_predicates = [a for a in sorted(anchors_predicates_filtered, key=lambda x: x[-1], reverse=True) if a[1] > thres]
if anchors_predicates:
break
#print("len(anchors_predicates sorted)",len(anchors_predicates))
#print("anchors_predicates sorted",anchors_predicates)
#anchors_predicates_filtered = []
#for ap in anchors_predicates:
# for af in anchors_focuses:
# anchors_predicates_filtered.append([ap[0],get_similarity_by_words(get_nlp(get_wd_label(ap[0])),get_nlp(af))])
#
#anchors_predicates_filtered = [a for a in sorted(anchors_predicates_filtered, key=lambda x: x[-1], reverse=True) if a[1] > 0]
#for thres in [e/100 for e in reversed(range(10, int(threshold*100)+10, 10))]:
# print("anchors_predicates_filtered current thres",thres)
# if not anchors_predicates_filtered:
# anchors_predicates_filtered = anchors_predicates
# break
# anchors_predicates_filtered = [a for a in sorted(anchors_predicates_filtered, key=lambda x: x[-1], reverse=True) if a[1] > thres]
# if len(anchors_predicates) > 10:
# break
#print("len(anchors_predicates_filtered)",len(anchors_predicates_filtered))
#print("anchors_predicates_filtered",anchors_predicates_filtered)
#
#anchors_predicates=[]
#[anchors_predicates.append(apf) for apf in anchors_predicates_filtered if apf not in anchors_predicates]
#print("len(anchors_predicates)",len(anchors_predicates))
#print("anchors_predicates",anchors_predicates)
tuples_unique_ids = []
tuples_unique_predicate_ids = []
hypothesises_tuples = []
for ap in anchors_predicates:
#print("ap",ap)
for fp in filtered_paths:
#if "Q4985" in fp:
# print("Q4985 in fp",fp, ap)
for i, e in enumerate(fp):
#print(e)
if e == ap[0] and i>1 and i<len(fp)-1:
#print(i, [fp[i-1], fp[i], fp[i+1]])
hypothesis_tuple = [fp[i-1], fp[i], fp[i+1]]
if hypothesis_tuple not in hypothesises_tuples:
hypothesises_tuples.append(hypothesis_tuple)
if hypothesis_tuple[0] not in tuples_unique_ids:
tuples_unique_ids.append(hypothesis_tuple[0])
if hypothesis_tuple[1] not in tuples_unique_predicate_ids:
tuples_unique_predicate_ids.append(hypothesis_tuple[1])
if hypothesis_tuple[2] not in tuples_unique_ids:
tuples_unique_ids.append(hypothesis_tuple[2])
#if "Q4985" in hypothesis_tuple:
# print("Q4985 hypothesis_tuple",hypothesis_tuple, ap,fp)
#print("tuples_unique_ids",tuples_unique_ids)
#print("tuples_unique_predicate_ids",tuples_unique_predicate_ids)
hypothesises_unique_ids = [t for t in tuples_unique_ids if get_wd_label(t).lower() not in anchors_focuses]
if len(hypothesises_unique_ids)>0 and len(tuples_unique_ids)>0:
max_reward *= len(hypothesises_unique_ids)/len(tuples_unique_ids)
#print("hypothesises_unique_ids",hypothesises_unique_ids)
#print("hypothesises_tuples",hypothesises_tuples)
#print("hypothesises_tuples",hypothesises_tuples)
#print([a[0] for a in anchors_predicates])
#keywords_ids = [i for j in [get_wd_ids(k) for k in anchors_focuses if get_wd_ids(k)] for i in j]
#print("anchors_focuses",keywords_ids)
#print(extract_ids(themes[0]))
#print(extract_ids(themes_enhanced))
#keywords_ids = []
#[keywords_ids.append(i) for i in extract_ids(themes[0]) + extract_ids(themes_enhanced) if i not in keywords_ids]
#print("keywords_ids",keywords_ids)
#print("anchors_predicates",anchors_predicates)
#print("-------START FILTERING-------")
hypothesises = []
hypothesises_all = []
hypothesises_tuples_len = len(hypothesises_tuples)
keywords_similarity_threshold = 0.9
tmp_to_find="" # for debugging, set the ID of the element to track in the log as print
for ht in hypothesises_tuples:
if tmp_to_find in ht: print("ht",ht)
if ht[1] in [a[0] for a in anchors_predicates]:
for i_af, af in enumerate(anchors_focuses):
if tmp_to_find in ht: print("HT -> step 0")
hypo_sum = 0
nlp_af = get_nlp(af)
nlp_ht0 = get_nlp(get_wd_label(ht[0]))
nlp_ht2 = get_nlp(get_wd_label(ht[2]))
if not nlp_ht2:
break
if tmp_to_find in ht: print("HT -> step 1")
af_lemma = ' '.join([e.lower_ for e in nlp_af if e.pos_ != "DET"])
ht0_lemma = ' '.join([e.lower_ for e in nlp_ht0 if e.pos_ != "DET"])
ht2_lemma = ' '.join([e.lower_ for e in nlp_ht2 if e.pos_ != "DET"])
#if get_wd_label(ht[0]).lower() not in anchors_focuses and get_wd_label(ht[2]).lower() not in anchors_focuses:
# for es in get_entity_similarity(ht[0], wn[1], anchors_focuses, max_reward=max_reward):
# hypo_sum += es
if (
nlp_af.text.lower() != nlp_ht2.text.lower()
and af_lemma != nlp_ht2[0].text.lower()
and nlp_af.text.lower() != ht2_lemma
and af_lemma != ht2_lemma
):
if tmp_to_find in ht: print("HT -> step 2")
if date_trigger:
if is_timestamp(ht[0]):
for es in get_entity_similarity(ht[0], "date", anchors_focuses, max_reward=max_reward):
hypo_sum += es
#print("if date hypo_sum",ht[0], "date",ht[0], es, hypo_sum)
else: hypo_sum += get_similarity_by_words(nlp_ht2, nlp_af)
else: hypo_sum += get_similarity_by_words(nlp_ht2, nlp_af)
if i_af in w_positions:
for wn in w_names:
if i_af == wn[0]:
for es in get_entity_similarity(ht[0], wn[1], anchors_focuses, max_reward=max_reward):
hypo_sum += es
if tmp_to_find in ht: print("if i_af hypo_sum","ht[0], wn[1], es",ht[0], wn[1], es,hypo_sum)
ht0_sum = 0
ht2_sum = 0
if is_timestamp(ht[0]): ht0_label = ht[0]
else: ht0_label = get_wd_label(ht[0]).lower()
if is_timestamp(ht[2]): ht2_label = ht[2]
else: ht2_label = get_wd_label(ht[2]).lower()
for tk in theme_keywords:
if tmp_to_find in ht: print("HT -> step 3")
if tmp_to_find in ht: print("tk",tk)
if tmp_to_find in ht: print("ht0_label",ht0_label)
if tmp_to_find in ht: print("ht2_label",ht2_label)
nlp_tk = get_nlp(tk.text.lower())
ht0_label_similarity = get_nlp(ht0_label).similarity(nlp_tk)
ht2_label_similarity = get_nlp(ht2_label).similarity(nlp_tk)
if tmp_to_find in ht: print("ht0_label_similarity",ht0_label_similarity)
if tmp_to_find in ht: print("ht2_label_similarity",ht2_label_similarity)
#
if ht0_label_similarity > keywords_similarity_threshold and ht[1] in main_predicate_ids:
if tmp_to_find in ht: print("ht0_label",ht0_label)
for wn in w_names_only:
for es in get_entity_similarity(ht[2], wn, anchors_focuses, max_reward=max_reward*3):
if tmp_to_find in ht: print("ht0_sum main_predicate_ids before",ht0_sum)
ht0_sum += es
if tmp_to_find in ht: print("theme_keywords ht0_sum ht[2], wn, es",ht[2], wn, es, ht0_sum)
if tmp_to_find in ht: print("ht0_label",ht2_label,es, ht0_sum, ht)
elif ht0_label_similarity > keywords_similarity_threshold:
if tmp_to_find in ht: print("ht0_label",ht0_label)
for wn in w_names_only:
for es in get_entity_similarity(ht[2], wn, anchors_focuses, max_reward=max_reward*2):
if tmp_to_find in ht: print("ht0_sum before",ht0_sum)
ht0_sum += es
if tmp_to_find in ht: print("theme_keywords not main_predicate_ids ht0_sum ht[2], wn, es",ht[2], wn, es, ht0_sum)
if tmp_to_find in ht: print("ht0_label",ht2_label,es, ht0_sum, ht)
#
if ht2_label_similarity > keywords_similarity_threshold and ht[1] in main_predicate_ids:
if tmp_to_find in ht: print("ht2_label",ht2_label)
for wn in w_names_only:
for es in get_entity_similarity(ht[0], wn, anchors_focuses, max_reward=max_reward*3):
if tmp_to_find in ht: print("ht2_sum before",ht0_sum)
ht2_sum += es
if tmp_to_find in ht: print("theme_keywords main_predicate_ids ht2_sum ht[0], wn, es",ht[0], wn, es, ht2_sum)
if tmp_to_find in ht: print("ht2_label",ht0_label,es, ht2_sum, ht)
elif ht2_label_similarity > keywords_similarity_threshold:
if tmp_to_find in ht: print("ht2_label",ht2_label)
for wn in w_names_only:
for es in get_entity_similarity(ht[0], wn, anchors_focuses, max_reward=max_reward*2):
if tmp_to_find in ht: print("ht2_sum before",ht0_sum)
ht2_sum += es
if tmp_to_find in ht: print("theme_keywords not main_predicate_ids ht2_sum ht[0], wn, es",ht[0], wn, es, ht2_sum)
if tmp_to_find in ht: print("ht2_label",ht0_label,es, ht2_sum, ht)
for ap in anchors_predicates:
if ap[0] == ht[1]:
for wn in w_names_only:
for es in get_entity_similarity(ht[0], wn, anchors_focuses, max_reward=max_reward*2):
ht0_sum += es
if tmp_to_find in ht: print("anchors_predicates w_names_only ht0_sum ht[0], wn, es",ht[0], wn, es, ht0_sum)
for es in get_entity_similarity(ht[2], wn, anchors_focuses, max_reward=max_reward*2):
ht2_sum += es
if tmp_to_find in ht: print("anchors_predicates w_names_only ht2_sum ht[2], wn, es",ht[2], wn, es, ht2_sum)
for tk in theme_keywords:
if tmp_to_find in ht: print("anchors_predicates tk",tk)
if tmp_to_find in ht: print("anchors_predicates ht0_label",ht0_label)
if tmp_to_find in ht: print("anchors_predicates ht2_label",ht2_label)
nlp_tk = get_nlp(tk.text.lower())
ht0_label_similarity = get_nlp(ht0_label).similarity(nlp_tk)
ht2_label_similarity = get_nlp(ht2_label).similarity(nlp_tk)
if tmp_to_find in ht: print("anchors_predicates ht0_label_similarity",ht0_label_similarity)
if tmp_to_find in ht: print("anchors_predicates ht2_label_similarity",ht2_label_similarity)
if ht0_label_similarity > keywords_similarity_threshold and ht[1] in main_predicate_ids:
if tmp_to_find in ht: print("anchors_predicates ht0_label",ht0_label)
for wn in w_names_only:
for es in get_entity_similarity(ht[2], wn, anchors_focuses, max_reward=max_reward*2):
if tmp_to_find in ht: print("anchors_predicates ht0_sum main_predicate_ids before",ht0_sum)
ht0_sum += es
if tmp_to_find in ht: print("anchors_predicates theme_keywords ht0_sum ht[2], wn, es",ht[2], wn, es, ht0_sum)
if tmp_to_find in ht: print("anchors_predicates ht0_label",ht2_label,es, ht0_sum, ht)
elif ht0_label_similarity > keywords_similarity_threshold:
if tmp_to_find in ht: print("anchors_predicates ht0_label",ht0_label)
for wn in w_names_only:
for es in get_entity_similarity(ht[2], wn, anchors_focuses, max_reward=max_reward):
if tmp_to_find in ht: print("anchors_predicates ht0_sum before",ht0_sum)
ht0_sum += es
if tmp_to_find in ht: print("anchors_predicates theme_keywords not main_predicate_ids ht0_sum ht[2], wn, es",ht[2], wn, es, ht0_sum)
if tmp_to_find in ht: print("anchors_predicates ht0_label",ht2_label,es, ht0_sum, ht)
if ht2_label_similarity > keywords_similarity_threshold and ht[1] in main_predicate_ids:
if tmp_to_find in ht: print("anchors_predicates ht2_label",ht2_label)
for wn in w_names_only:
for es in get_entity_similarity(ht[0], wn, anchors_focuses, max_reward=max_reward*2):
if tmp_to_find in ht: print("anchors_predicates ht2_sum before",ht0_sum)
ht2_sum += es
if tmp_to_find in ht: print("anchors_predicates theme_keywords main_predicate_ids ht2_sum ht[0], wn, es",ht[0], wn, es, ht2_sum)
if tmp_to_find in ht: print("anchors_predicates ht2_label",ht0_label,es, ht2_sum, ht)
elif ht2_label_similarity > keywords_similarity_threshold:
if tmp_to_find in ht: print("anchors_predicates ht2_label",ht2_label)
for wn in w_names_only:
for es in get_entity_similarity(ht[0], wn, anchors_focuses, max_reward=max_reward):
if tmp_to_find in ht: print("anchors_predicates ht2_sum before",ht0_sum)
ht2_sum += es
if tmp_to_find in ht: print("anchors_predicates theme_keywords not main_predicate_ids ht2_sum ht[0], wn, es",ht[0], wn, es, ht2_sum)
if tmp_to_find in ht: print("anchors_predicates ht2_label",ht0_label,es, ht2_sum, ht)
if date_trigger and is_timestamp(ht0_label) and ht2_label in anchors_focuses:
hypo_sum += ht0_sum
#print("is_timestamp(ht0_label) hypo_sum", hypo_sum)
elif date_trigger and is_timestamp(ht2_label) and ht0_label in anchors_focuses:
hypo_sum += ht2_sum
#print("is_timestamp(ht2_label) hypo_sum", hypo_sum)
elif ht2_label in anchors_focuses and ht0_label not in anchors_focuses:
hypo_sum += ht2_sum
if tmp_to_find in ht: print("ht2_label hypo_sum in anchors_focuses", hypo_sum)
elif ht0_label in anchors_focuses and ht2_label not in anchors_focuses:
hypo_sum += ht0_sum
if tmp_to_find in ht: print("ht0_label hypo_sum in anchors_focuses", hypo_sum)
else:
hypo_sum += ht0_sum
hypo_sum += ht2_sum
if tmp_to_find in ht: print("else in anchors_focuses hypo_sum", hypo_sum)
if tmp_to_find in ht: print("hypo_sum",hypo_sum)
if tmp_to_find in ht: print("ap[1]",ap[1])
hypo_sum *= ap[1]
if tmp_to_find in ht: print("ht[0], ht[2], hypo_sum",ht[0], ht[2], hypo_sum)
#if get_wd_label(ht[0]).lower() in anchors_focuses:
# if not i_af in w_positions:
# hypo_sum += abs(ap[1])
# else: hypo_sum -= abs(ap[1])
#if ht[0] == "Q202725": print("hypo_sum",hypo_sum)
#else: hypo_sum = ap[1]
#hypo_sum *= abs(ap[1])
#break
#print("ap",ap, "ht",ht, "hypo_sum",hypo_sum)
#print(ht)
#break
#hypo_sum = abs(hypo_sum)
#hypo_sum += abs(ap[1])
#hypo_sum += abs(ap[1])
#hypo_sum += ap[1]
#hypo_sum += abs(hypo_sum)
#hypo_sum *= abs(ap[1])
#hypo_sum = abs(hypo_sum)
#hypo_sum /= ap[1]
#hypo_sum -= ap[1]
#hypo_sum += hypo_sum/ap[1]
#print("ht[0]",ht[0])
#print("ht[2]",ht[2])
if (date_trigger and is_timestamp(ht[0])
and not is_timestamp(ht[2])
and is_in_list_by_similarity(get_wd_label(ht[2]).lower(), anchors_focuses, keywords_similarity_threshold)):
#print("is_timestamp(ht[0]")
hypo = ht[0]
elif (date_trigger and is_timestamp(ht[2])
and not is_timestamp(ht[0])
and is_in_list_by_similarity(get_wd_label(ht[0]).lower(), anchors_focuses, keywords_similarity_threshold)):
#print("is_timestamp(ht[2]")
hypo = ht[2]
elif date_trigger and is_timestamp(ht[0]) and is_timestamp(ht[2]): break
#is_in_list_by_similarity("Moby Dick", ["moby-dick","star wars"],0.9)
#elif get_wd_label(ht[0]).lower() in anchors_focuses:
# #print("get_wd_label(ht[0]).lower()",get_wd_label(ht[0]).lower())
# if not get_wd_label(ht[2]).lower() in anchors_focuses:
# hypo = ht[2]
# if get_wd_label(ht[2]).lower() in anchors_focuses:
# break
elif is_in_list_by_similarity(get_wd_label(ht[0]).lower(), anchors_focuses, keywords_similarity_threshold):
if not is_in_list_by_similarity(get_wd_label(ht[2]).lower(), anchors_focuses, keywords_similarity_threshold):
hypo = ht[2]
else: break
elif not is_in_list_by_similarity(get_wd_label(ht[0]).lower(), anchors_focuses, keywords_similarity_threshold):
if is_in_list_by_similarity(get_wd_label(ht[2]).lower(), anchors_focuses, keywords_similarity_threshold):
hypo = ht[0]
else:
hypothesises_all.append(ht[0])
if not hypothesises: hypothesises.append([ht[0], hypo_sum])
else:
if ht[0] in [h[0] for h in hypothesises]:
for i, h in enumerate(hypothesises):
if ht[0] == h[0]: hypothesises[i] = [ht[0], hypo_sum+hypothesises[i][1]]
else: hypothesises.append([ht[0], hypo_sum])
hypo = ht[2]
#elif not get_wd_label(ht[0]).lower() in anchors_focuses:
# if get_wd_label(ht[2]).lower() in anchors_focuses:
# hypo = ht[0]
# if not get_wd_label(ht[2]).lower() in anchors_focuses:
# hypothesises_all.append(ht[0])
# if not hypothesises: hypothesises.append([ht[0], hypo_sum])
# else:
# if ht[0] in [h[0] for h in hypothesises]:
# for i, h in enumerate(hypothesises):
# if ht[0] == h[0]: hypothesises[i] = [ht[0], hypo_sum+hypothesises[i][1]]
# else: hypothesises.append([ht[0], hypo_sum])
#
# #if "Q4985" in ht: print("Q4985 ALONE hypo and sum:", ht[0], hypo_sum)
# hypo = ht[2]
else:
#print("BREAK", ht)
break
#print("hypothesises",hypothesises)
#if "Q4985" in ht:
# print("Q4985 hypo and sum:", hypo, hypo_sum)
hypothesises_all.append(hypo)
if not hypothesises: hypothesises.append([hypo, hypo_sum])
else:
if hypo in [h[0] for h in hypothesises]:
for i, h in enumerate(hypothesises):
if hypo == h[0]: hypothesises[i] = [hypo, hypo_sum+hypothesises[i][1]]
else: hypothesises.append([hypo, hypo_sum])
#print("len(hypothesises_all)",len(hypothesises_all))
for i_h, h in enumerate(hypothesises):
h_sum = hypothesises_all.count(h[0])
#print("h_sum",h_sum)
#print("BEFORE: hypothesises[i_h][1]",hypothesises[i_h][1])
hypothesises[i_h][1] = hypothesises[i_h][1]/h_sum
#print("AFTER: hypothesises[i_h][1]",hypothesises[i_h][1])
#print("hypothesises_all",hypothesises_all)
hypothesises = sorted(hypothesises, key=lambda x: x[-1], reverse=True)
return hypothesises
#if verbose: print("-> Computing hypothesises...")
#hypothesises = get_hypothesises(q_nlp, q_predicates, q_themes, paths_keywords, paths_nodes_filtered, threshold=0.5, max_reward=2.0)
#if verbose: print("\n\n--> hypothesises:",hypothesises)
# In[72]:
def get_unique_hypo_paths(hypothesis, other_hypothesis, hypo_paths):
BANNED_IDS = ["P31"]
filtered_hypo_paths = []
other_hypothesis = other_hypothesis[:]+[hypothesis]
for hp in hypo_paths:
#print("hp",hp)
path_is_used = False
len_hp = len(hp)
if len_hp >= 3:
#if "P31" in hp: print("hp",hp)
for e in hp:
if e in other_hypothesis:
e_index = hp.index(e)
for step_index in range(1, len_hp, 2): #len_hp-e_index
#print("step_index",step_index)
if e_index-step_index-2 >= 0:
if hp[e_index-step_index] == hp[e_index-step_index-2] and hp[e_index-step_index] not in BANNED_IDS:
#print(hp.index(e), hp)
#print("IN")
part_1 = hp[:e_index-step_index-0]
part_2 = hp[e_index-step_index-1:]
#print("hp[:",e_index-step_index-0,"]",part_1)
#print("hp[",e_index-step_index-1,":]",part_2)
#hp[e_index:]
sub_part_1 = None
sub_part_2 = None
if hypothesis in part_1:
sub_part_1 = get_unique_hypo_paths(hypothesis, other_hypothesis, [part_1,[]])
if hypothesis in part_2:
sub_part_2 = get_unique_hypo_paths(hypothesis, other_hypothesis, [part_2,[]])
if sub_part_1 != None:
if sub_part_1:
[filtered_hypo_paths.append(sp) for sp in sub_part_1]
else:
for e in hp:
if hp.count(e) > 1 and e not in BANNED_IDS: flag_too_much=True
if not flag_too_much: filtered_hypo_paths.append(part_1)
if sub_part_2 != None:
if sub_part_2:
[filtered_hypo_paths.append(sp) for sp in sub_part_2]
else:
for e in hp:
if hp.count(e) > 1 and e not in BANNED_IDS: flag_too_much=True
if not flag_too_much: filtered_hypo_paths.append(part_2)
path_is_used=True
else: break
for step_index in range(1, len_hp, 2):
#print("step_index",step_index)
if e_index+step_index+2 < len_hp:
if hp[e_index+step_index] == hp[e_index+step_index+2] and hp[e_index+step_index] not in BANNED_IDS:
#print(hp.index(e), hp)
part_1 = hp[:e_index+step_index+2]
part_2 = hp[e_index+step_index+1:]
#print("hp[:",e_index+step_index+2,"]",part_1)
#print("hp[",e_index+step_index+1,":]",part_2)
#print("part_1",part_1)
#print("part_2",part_2)
sub_part_1 = None
sub_part_2 = None
if hypothesis in part_1:
sub_part_1 = get_unique_hypo_paths(hypothesis, other_hypothesis, [part_1,[]])
if hypothesis in part_2:
sub_part_2 = get_unique_hypo_paths(hypothesis, other_hypothesis, [part_2,[]])
if sub_part_1 != None:
if sub_part_1:
[filtered_hypo_paths.append(sp) for sp in sub_part_1]
else:
for e in hp:
if hp.count(e) > 1 and e not in BANNED_IDS: flag_too_much=True
if not flag_too_much: filtered_hypo_paths.append(part_1)
if sub_part_2 != None:
if sub_part_2:
[filtered_hypo_paths.append(sp) for sp in sub_part_2]
else:
flag_too_much = False
for e in hp:
if hp.count(e) > 1 and e not in BANNED_IDS: flag_too_much=True
if not flag_too_much: filtered_hypo_paths.append(part_2)
path_is_used=True
else: break
if path_is_used == False:
flag_too_much = False
for e in hp:
if hp.count(e) > 1 and e not in BANNED_IDS: flag_too_much=True
if not flag_too_much: filtered_hypo_paths.append(hp)
#else:
# filtered_hypo_paths.append(hp)
return filtered_hypo_paths
# In[73]:
def match_hypothesises_worker(in_mp_queue, out_mp_queue):
golden_paths = []
sentinel = None
for mpu in iter(in_mp_queue.get, sentinel):
#for mp in mp_similarities_untagged:
#print("AFTER mpu",mpu)
for i_e, e in enumerate(mpu[1]):
if i_e <= 1 or i_e >= len(mpu[1])-2:
continue
if not is_wd_entity(e):
continue
mp_e_statements = get_all_statements_of_entity(e)
mp_predicate_tagging_index = mpu[1][i_e+1].find("-")
if mp_predicate_tagging_index != -1:
mp_predicate = mpu[1][i_e+1][:mp_predicate_tagging_index]
else:
mp_predicate = mpu[1][i_e+1]
extended_paths = get_statements_by_id(mp_e_statements, e, mp_predicate, qualifier=False, statement_type="predicate")
extended_paths_qualifier = get_statements_by_id(mp_e_statements, e, mp_predicate, qualifier=True, statement_type="qualifier_predicate")
ep_predicate_tagging_index_plus_1 = mpu[1][i_e+1].find("-")
if ep_predicate_tagging_index_plus_1 != -1:
ep_predicate_plus_1 = mpu[1][i_e+1][:ep_predicate_tagging_index_plus_1]
else:
ep_predicate_plus_1 = mpu[1][i_e+1]
ep_predicate_tagging_index_minus_1 = mpu[1][i_e-1].find("-")
if ep_predicate_tagging_index_minus_1 != -1:
ep_predicate_minus_1 = mpu[1][i_e-1][:ep_predicate_tagging_index_minus_1]
else:
ep_predicate_minus_1 = mpu[1][i_e-1]
for ep in extended_paths_qualifier:
if (ep['entity']['id'] == mpu[1][i_e] and
ep['predicate']['id'] == ep_predicate_minus_1 and
ep['object']['id'] == mpu[1][i_e-2] and
ep['qualifiers']):
for q in ep['qualifiers']:
if(q['qualifier_predicate']["id"] == ep_predicate_plus_1 and
q['qualifier_object']["id"] == mpu[1][i_e+2]):
if mpu[1] not in golden_paths:
golden_paths.append(mpu[1])
if (ep['entity']['id'] == mpu[1][i_e+2] and
ep['predicate']['id'] == ep_predicate_plus_1 and
ep['object']['id'] == mpu[1][i_e] and
ep['qualifiers']):
for q in ep['qualifiers']:
if(q['qualifier_predicate']["id"] == ep_predicate_minus_1 and
q['qualifier_object']["id"] == mpu[1][i_e-2]):
if mpu[1] not in golden_paths:
golden_paths.append(mpu[1])
for ep in extended_paths:
if (ep['entity']['id'] == mpu[1][i_e] and
ep['predicate']['id'] == ep_predicate_minus_1 and
ep['object']['id'] == mpu[1][i_e-2] and
ep['qualifiers']):
for q in ep['qualifiers']:
if(q['qualifier_predicate']["id"] == ep_predicate_plus_1 and
q['qualifier_object']["id"] == mpu[1][i_e+2]):
if mpu[1] not in golden_paths:
golden_paths.append(mpu[1])
if (ep['entity']['id'] == mpu[1][i_e+2] and
ep['predicate']['id'] == ep_predicate_plus_1 and
ep['object']['id'] == mpu[1][i_e] and
ep['qualifiers']):
for q in ep['qualifiers']:
if(q['qualifier_predicate']["id"] == ep_predicate_minus_1 and
q['qualifier_object']["id"] == mpu[1][i_e-2]):
if mpu[1] not in golden_paths:
golden_paths.append(mpu[1])
out_mp_queue.put(golden_paths)
# In[74]:
def count_hops(best_path,question):
count_hops = []
[count_hops.append(e) for e in best_path if get_wd_label(e).lower() in question.lower() and not is_wd_predicate(e) and get_wd_label(e).lower() not in [get_wd_label(w).lower() for w in count_hops]]
if len(count_hops)>0:
spo_k = len(count_hops)
else: spo_k=1
return spo_k
# In[75]:
# TODO / REDO
# is_wd_entity not taking care of timestamps
def list_by_n(l, i):
list_n = []
for j in range(0, len(l)+1, 1):
tmp = l[j-i:i+j-i]
if tmp:
list_n.append(tmp)
return list_n
def match_hypothesises(graph, question, themes, themes_enchanced, focused_parts, predicates, hypothesises, paths, threshold=0.8, max_reward=2.0, winner_threshold_diff=7, time_sensitive=False, cores=mp.cpu_count(), deep_match=4):
BANNED_IDS = ["P31"]
LOCATION_FILTER = ["GPE", "FAC", "LOC","PERSON"]
filtered_paths = []
#print("hypothesises",hypothesises)
if time_sensitive:
hypothesises_time = [h for h in hypothesises if is_timestamp(h[0])]
if hypothesises_time:
hypothesises = hypothesises_time
#if 'where' in [t.lower_ for t in question if t.tag_=="WRB"]:
# print("in where condition")
# hypothesises_location = []
# for h in hypothesises:
# word_label = get_wd_label(h[0])
# word_ents = get_kb_ents(word_label)
# entities = [e.label_ for e in word_ents]
# [hypothesises_location.append(e) for e in entities if e in LOCATION_FILTER]
# print(h[0],word_label,word_ents,entities)
#
# if hypothesises_location:
# hypothesises = hypothesises_location
#print(word_id,word_label,entity_type,)
#print("hypothesises",hypothesises)
for h in hypothesises:
other_hypothesis = [e[0] for e in hypothesises if e != h]
hypo_paths = [p for p in paths if h[0] in p]
filtered_hypo_paths = []
[filtered_hypo_paths.append(p) for p in get_unique_hypo_paths(h[0], other_hypothesis, hypo_paths) if p not in filtered_hypo_paths]
for p in filtered_hypo_paths:
for e in p:
if p.count(e) > 1 and e not in BANNED_IDS:
continue
if p[-1] == h[0]:
reversed_path = list(reversed(p))
if reversed_path not in filtered_paths:
filtered_paths.append(reversed_path)
else:
if p not in filtered_paths:
filtered_paths.append(p)
#print("filtered_paths",filtered_paths)
#print("1 hypothesises",hypothesises)
# check if first hypothesis is clear winner
winner_threshold_diff = 2*max_reward # this is coded in hard ! TBD
first_is_winner = False
if len(hypothesises) > 1:
hypo_diff = hypothesises[0][1]-hypothesises[1][1]
#print("hypo_diff",hypo_diff)
if hypo_diff > winner_threshold_diff:
first_is_winner = True
#print("first_is_winner",first_is_winner)
w_positions, w_names = w_converter(question)
sorted_golden_paths = []
if not sorted_golden_paths:
for p in filtered_paths:
if len(p)>1 and p[0] == hypothesises[0][0]:
if p not in sorted_golden_paths:
sorted_golden_paths.append(p)
if len(p)>1 and p[-1] == hypothesises[0][0]:
p = list(reversed(p))
if p not in sorted_golden_paths:
sorted_golden_paths.append(p)
if not sorted_golden_paths:
for p in filtered_paths:
if len(p)>1 and hypothesises[0][0] in p:
if p not in sorted_golden_paths:
sorted_golden_paths.append(p)
meaningful_paths = []
theme_ids = sum([t[1] for t in themes[0]],[])
#print("1 theme_ids",theme_ids)
theme_ids += sum([t[1] for t in themes[0]],[])
#print("2 theme_ids",theme_ids)
theme_ids += sum([t[1] for t in themes[0]],[])
#print("3 theme_ids",theme_ids)
theme_ids_filtered = []
[theme_ids_filtered.append(ti) for ti in theme_ids if ti not in theme_ids_filtered]
theme_ids = theme_ids_filtered
del theme_ids_filtered
#print("FINAL theme_ids",theme_ids)
#print("themes_enchanced",themes_enchanced) #[('profession', ['Q28640'])]
#print("focused_parts",focused_parts) #[(music, ['Q19820041', 'Q12800119'])]
for p in filtered_paths:
counter = 0
themes_counter=0
for ti in theme_ids:
if ti in p and p not in meaningful_paths:
themes_counter+=1
if themes_counter<2:
counter += max_reward*2
for pred in [p[1] for p in predicates]:
for e in pred:
if e in p:
counter += max_reward
else:
counter -= max_reward
for i_wp, wp in enumerate(w_positions):
if w_names[i_wp][1] and wp<len(p):
for es in get_entity_similarity(p[wp], w_names[i_wp][1], [], max_reward=max_reward):
counter += es
#print("p[wp], w_names[i_wp][1], es",p[wp], w_names[i_wp][1], es)
counter_hypos = 0
for hypo in hypothesises:
for wp in w_positions:
if hypo and wp < len(p):
if hypo[0] == p[wp]:
counter += max_reward
if hypo[0] in p:
counter_hypos+=1
#print("hypos in p",counter, counter_hypos,p)
#if counter_hypos>0: counter = counter / counter_hypos * max_reward
#if hypo[0] in p:
# counter += 1
#if hypo[0] == p[0]:
# counter += 1
#if hypo[0] == p[-1]:
# counter += 0
if counter > 0: meaningful_paths.append((counter, p))
#print("before len(meaningful_paths):",len(meaningful_paths))
meaningful_paths = [mp for mp in meaningful_paths if len(mp[1])>2]
#print("after len(meaningful_paths):",len(meaningful_paths))
#for mpc in meaningful_paths.copy():
# reversed_mp = list(reversed(mpc[1]))
# #print(mp[1],reversed_mp)
# if reversed_mp not in [p[1] for p in meaningful_paths]:
# if time_sensitive:
# if is_timestamp(reversed_mp[0]):
# #print("time_sensitive added")
# #print(mpc[1],reversed_mp)
# meaningful_paths.append([mpc[0],reversed_mp])
# else:
# #print("added")
# #print(mpc[1],reversed_mp)
# meaningful_paths.append([mpc[0],reversed_mp])
#print("meaningful_paths",meaningful_paths)
#print("after after len(meaningful_paths):",len(meaningful_paths))
meaningful_paths = sorted(meaningful_paths, key=lambda x: x[0], reverse=True)
#top_count = list(set([int(mp[0]) for mp in meaningful_paths]))[::-1]
#print("top_count",top_count)
#meaningful_paths = [mp for mp in meaningful_paths if int(mp[0]) in top_count[:int(max_reward*2)]]
#[print(mps) for mps in meaningful_paths if "Q30060373" in mps[1]]
#print("\n")
#looped_paths = []
#for hypo in hypothesises:
# for mp in meaningful_paths:
# if mp[1][0] == hypo[0] or mp[1][-1] == hypo[0]:
# if graph.has_node(mp[1][0]) and graph.has_node(mp[1][-1]):
# path_tmp = list(nx.all_simple_paths(graph, mp[1][0],mp[1][-1]))
# if len(path_tmp)>1:
# for p in path_tmp:
# if p not in [lp[1] for lp in looped_paths]:
# looped_paths.append((mp[0],p))
# #else:
# # if not graph.has_node(mp[1][0]):
# # print("MISSING NODE:", mp[1][0], get_wd_label(mp[1][0]))
# # if not graph.has_node(mp[1][-1]):
# # print("MISSING NODE:", mp[1][-1], get_wd_label(mp[1][-1]))
#
##print("len(looped_paths)", len(looped_paths))
#print("looped_paths",looped_paths)
#looped_paths_untagged = []
#for lp in looped_paths:
# row_tmp = []
# for w in lp[1]:
# if w.find("-") > 0:
# row_tmp.append(w[:w.find("-")])
# else:
# row_tmp.append(w)
# looped_paths_untagged.append((lp[0],row_tmp))
#
#
#
#print("looped_paths_untagged",looped_paths_untagged)
looped_paths_untagged = meaningful_paths
mp_similarities_untagged = []
#mp_similarities_tagged = []
mp_similarities_untagged_hypo = []
#mp_similarities_tagged_hypo = []
who_like = ["who", "whom"]
question_enhanced = []
for q in question:
if q.lemma_ == "where": question_enhanced.append("location")
elif q.lemma_ == "when": question_enhanced.append("date")
elif q.lemma_ in who_like: question_enhanced.append("person")
elif q.lemma_ == "why": question_enhanced.append("cause")
else: question_enhanced.append(q.text)
question_enhanced = nlp(" ".join([q for q in question_enhanced]))
#print("question",question)
#print("question_enhanced",question_enhanced)
#print("[h[0] for h in hypothesises]",[h[0] for h in hypothesises])
for i_lp, lp in enumerate(looped_paths_untagged):
#print(lp)
sentence = get_nlp(" ".join([get_wd_label(w) for w in lp[1]]))
similarity = get_similarity_by_words(sentence, question)
similarity_enhanced = get_similarity_by_words(sentence, question_enhanced)
similarity_avg = (similarity+similarity_enhanced)/2*lp[0]
#print(sentence,question,question_enhanced)
#print("similarity", similarity)
#print("question_enhanced", similarity_enhanced)
#mp_similarities_untagged.append((similarity_enhanced,lp[1]))
#mp_similarities_tagged.append((similarity_enhanced,looped_paths[i_lp][1]))
if lp[1][0] in [h[0] for h in hypothesises]:
#print("lp[1][0]",lp[1][0])
mp_similarities_untagged_hypo.append((similarity_avg, lp[1]))
#mp_similarities_tagged_hypo.append((similarity_avg, looped_paths[i_lp][1]))
mp_similarities_untagged.append((similarity_avg, lp[1]))
#mp_similarities_tagged.append((similarity_avg, looped_paths[i_lp][1]))
#print("mp_similarities_untagged",len(mp_similarities_untagged))
#print("mp_similarities_untagged_hypo",len(mp_similarities_untagged_hypo))
#print("mp_similarities_untagged",mp_similarities_untagged)
#mp_similarities_tagged = sorted(mp_similarities_tagged, key=lambda x: x[0], reverse=True)
#mp_similarities_tagged = [mp for mp in mp_similarities_tagged if mpu[0] > threshold]
mp_similarities_untagged = sorted(mp_similarities_untagged, key=lambda x: x[0], reverse=True)
mp_similarities_untagged = [mpu for mpu in mp_similarities_untagged if mpu[0] > threshold]
#print("mp_similarities_untagged",len(mp_similarities_untagged))
#print("mp_similarities_untagged",mp_similarities_untagged)
[mp_similarities_untagged.append(suh) for suh in mp_similarities_untagged_hypo if not suh in mp_similarities_untagged]
#[mp_similarities_tagged.append(sth) for sth in mp_similarities_tagged_hypo if not sth in mp_similarities_tagged]
#print("mp_similarities_untagged",len(mp_similarities_untagged))
#print("mp_similarities_tagged",len(mp_similarities_tagged))
#WH_FILTER = ["WDT", "WP", "WP$", "WRB"]
#wh_position = [w.i for w in question if w.tag_ in WH_FILTER][0]
#question_list = [w.lower_ for w in question if not w.is_punct]
#question_list_filtered = [w.lower_ for w in question if not w.is_punct and w.tag_ not in WH_FILTER]
if cores <= 0: cores = 1
sentinel = None
out_mp_queue = mp.Queue()
in_mp_queue = mp.Queue()
for mpu in mp_similarities_untagged:
#print("BEFORE mpu",mpu)
in_mp_queue.put(mpu)
procs = [mp.Process(target = match_hypothesises_worker, args = (in_mp_queue, out_mp_queue)) for i in range(cores)]
golden_paths = []
for proc in procs:
proc.daemon = True
proc.start()
for proc in procs:
in_mp_queue.put(sentinel)
for proc in procs:
golden_paths.extend(out_mp_queue.get())
for proc in procs:
proc.join()
golden_paths = [gp for gp in golden_paths if gp]
#print("golden_paths",golden_paths)
#print("len(golden_paths)",len(golden_paths))
sorted_golden_paths = []
for gp in golden_paths:
tmp_gp = []
#if gp[0] in [h[0] for h in hypothesises]:
for e in gp:
if is_wd_entity(e):
tmp_gp.append(get_wd_label(e))
else:
tmp_gp.append(get_wd_label(e[:e.find("-")]))
nlp_gp = get_nlp(" ".join(tmp_gp))
sorted_golden_paths.append((get_similarity_by_words(question,nlp_gp), gp))
#
sorted_golden_paths = sorted(sorted_golden_paths, key=lambda x: x[0], reverse=True)
#print("sorted_golden_paths",sorted_golden_paths)
#print("len(sorted_golden_paths) BEFORE",len(sorted_golden_paths))
#print("w_positions[0]",w_positions[0])
#print("w_names[0][1]",w_names[0][1])
if sorted_golden_paths:
#print("len(sorted_golden_paths)",len(sorted_golden_paths))
best_spos = [sorted_golden_paths[0]]
#print("initial best_spos",best_spos)
#spo = spo_k*3-1
for sgp in sorted_golden_paths:
for dm in range(deep_match):
if dm==0: spo_k=3
else: spo_k = (dm+1)*3-1
#print("spo_k",spo_k)
#print("sgp[1][:",spo_k,"]",sgp[1][:spo_k])
sentence = get_nlp(" ".join([get_wd_label(w) for i_w, w in enumerate(sgp[1][:spo_k]) if i_w != w_positions[0]]))
question_no_w = get_nlp(" ".join([q.text for q in question if q.lower_!=w_names[0][1].lower()]))
#print("question_no_w -> sentence",question_no_w,"->",sentence)
similarity = get_similarity_by_words(sentence, question_no_w)
#print("similarity",similarity)
#print("best_spos[0]",best_spos[0])
if best_spos[0][0] < similarity:
#print("best_spos[0][0] < similarity",best_spos[0][0] < similarity)
best_spos.insert(0,[similarity,sgp[1]])
#print("best_spos",best_spos)
sorted_golden_paths = [sgp[1] for sgp in best_spos]
#print("after sorted_golden_paths",sorted_golden_paths)
else:
for hypo in hypothesises:
#print("hypo",hypo[0])
for lp in [lp[1] for lp in meaningful_paths]:
if len(lp)>1 and lp[0] == hypo[0]:
if lp not in sorted_golden_paths:
sorted_golden_paths.append(lp)
if len(lp)>1 and lp[-1] == hypo[0]:
lp = list(reversed(lp))
if lp not in sorted_golden_paths:
sorted_golden_paths.append(lp)
if len(sorted_golden_paths) >= 1: break
#print("len(sorted_golden_paths) AFTER",len(sorted_golden_paths))
if not sorted_golden_paths:
for hypo in hypothesises:
for p in filtered_paths:
if len(p)>1 and p[0] == hypo[0]:
#print(p)
if p not in sorted_golden_paths:
sorted_golden_paths.append(p)
if len(p)>1 and p[-1] == hypo[0]:
p = list(reversed(p))
if p not in sorted_golden_paths:
sorted_golden_paths.append(p)
if len(sorted_golden_paths) >= 1: break
#print("len(sorted_golden_paths) AFTER AFTER",len(sorted_golden_paths))
if not sorted_golden_paths:
for hypo in hypothesises:
for p in filtered_paths:
if len(p)>1 and hypo[0] in p:
if p not in sorted_golden_paths:
sorted_golden_paths.append(p)
if len(sorted_golden_paths) >= 1: break
#print("len(sorted_golden_paths) AFTER AFTER AFTER",len(sorted_golden_paths))
golden_paths_filtered = []
for gp in sorted_golden_paths:
tmp_path = []
for i_e, e in enumerate(gp):
if i_e < len(gp)-2 and not is_wd_entity(e):
if e == gp[i_e+2]:
golden_paths_filtered.append(gp[:gp.index(e)+2])
break
else:
tmp_path.append(e)
else:
tmp_path.append(e)
if tmp_path:
for i_e, e in enumerate(tmp_path):
if is_wd_entity(e):
if tmp_path.count(e) > 1:
pass
else:
if tmp_path not in golden_paths_filtered:
golden_paths_filtered.append(tmp_path)
#print("w_names",w_names)
if w_names[0][1]: question_no_w = get_nlp(" ".join([q.text for q in question if q.lower_!=w_names[0][1].lower()]))
else: question_no_w = get_nlp(" ".join([q.text for q in question]))
#print("question_no_w",question_no_w)
golden_paths_enhanced = []
for gpf in golden_paths_filtered:
sentence = get_nlp(" ".join([get_wd_label(w) for i_w, w in enumerate(gpf) if i_w != w_positions[0]]))
similarity = get_similarity_by_words(sentence, question_no_w)
golden_paths_enhanced.append((similarity,gpf))
#print("question_no_w -> sentence",question_no_w,"->",sentence,"->",similarity)
for mlp in meaningful_paths:
sentence = get_nlp(" ".join([get_wd_label(w) for i_w, w in enumerate(mlp[1]) if i_w != w_positions[0]]))
reversed_mlp = list(reversed(mlp[1]))
reversed_sentence = get_nlp(" ".join([get_wd_label(w) for i_w, w in enumerate(mlp[1]) if i_w != w_positions[0]]))
if mlp[1][0] == golden_paths_filtered[0][0] and mlp[1] not in golden_paths_enhanced:
similarity = get_similarity_by_words(sentence, question_no_w)
golden_paths_enhanced.append((similarity,mlp[1]))
#print("question_no_w -> sentence",question_no_w,"->",sentence,"->",similarity)
elif mlp[1][0] == golden_paths_filtered[0][-1] and reversed_mlp not in golden_paths_enhanced:
similarity = get_similarity_by_words(reversed_sentence, question_no_w)
golden_paths_enhanced.append((similarity,reversed_mlp))
#print("question_no_w -> reversed_sentence",question_no_w,"->",reversed_sentence,"->",similarity)
#golden_paths_enhanced
#print("golden_paths_enhanced",golden_paths_enhanced)
sorted_golden_paths_enhanced = sorted(golden_paths_enhanced, key=lambda x: x[0], reverse=True)
#print("sorted_golden_paths_enhanced",sorted_golden_paths_enhanced)
golden_paths_enhanced = [gpe[1] for gpe in golden_paths_enhanced]
#print("len(golden_paths_filtered)",len(golden_paths_filtered))
#print("golden_paths_filtered",golden_paths_filtered)
golden_unique_paths = golden_paths_enhanced.copy()
for i_sgp, sgp in enumerate(golden_paths_enhanced):
for sgp_2 in golden_paths_enhanced:
if (is_sublist(sgp, sgp_2) and sgp!=sgp_2):
golden_unique_paths[i_sgp] = []
break
golden_unique_paths = [gup for gup in golden_unique_paths if gup]
hypothesises_names = [h[0] for h in hypothesises]
#print("golden_unique_paths",golden_unique_paths)
#print("before hypothesises_names",hypothesises_names)
#print("golden_unique_paths[0][0]",golden_unique_paths[0][0])
#print("hypothesises_names",hypothesises_names)
#if is_valide_wd_id(hypothesises_names[0]):
if not first_is_winner:
if golden_unique_paths and hypothesises_names:
if golden_unique_paths[0] and hypothesises_names[0]:
if golden_unique_paths[0][0]:
#for hn in hypothesises_names:
if not time_sensitive:
if (not is_wd_entity(hypothesises_names[0])
and is_wd_entity(golden_unique_paths[0][0])
or hypothesises_names[0] != golden_unique_paths[0][0]):
if golden_unique_paths[0][0] in hypothesises_names:
hypothesises_names.insert(0,golden_unique_paths[0][0])
elif (time_sensitive and is_timestamp(hypothesises_names[0]) and is_timestamp(golden_unique_paths[0][0])):
if golden_unique_paths[0][0] in hypothesises_names:
hypothesises_names.insert(0,golden_unique_paths[0][0])
elif time_sensitive:
is_found = False
for gup in golden_unique_paths:
if is_timestamp(gup[0]):
hypothesises_names.insert(0,gup[0])
is_found=True
break
elif is_timestamp(gup[-1]):
hypothesises_names.insert(0,gup[-1])
is_found=True
break
if not is_found:
best_hypo_name = hypothesises_names[0]
best_hypo_count = 0
for hn in hypothesises_names:
current_hypo_count = 0
for gpu in golden_unique_paths:
current_hypo_count += gpu.count(hn)
if current_hypo_count > best_hypo_count:
best_hypo_name = hn
best_hypo_count = current_hypo_count
hypothesises_names.insert(0,best_hypo_name)
else:
best_hypo_name = hypothesises_names[0]
best_hypo_count = 0
for hn in hypothesises_names:
current_hypo_count = 0
for gpu in golden_unique_paths:
current_hypo_count += gpu.count(hn)
if current_hypo_count > best_hypo_count:
best_hypo_name = hn
best_hypo_count = current_hypo_count
hypothesises_names.insert(0,best_hypo_name)
#print("hypothesises_names",hypothesises_names)
#print([gup for gup in golden_unique_paths if gup[0]])
golden_unique_paths_0 = [gup for gup in golden_unique_paths if gup[0] == hypothesises_names[0]]
if not golden_unique_paths_0:
golden_unique_paths_1 = []
#print("not golden_unique_paths")
for gup in golden_unique_paths:
#print("gup",gup)
#print("gup[-1]",gup[-1])
if hypothesises_names[0] == gup[-1]:
reversed_gup = list(reversed(gup))
#print("i am in ", reversed_gup)
golden_unique_paths_1.append(reversed_gup)
golden_unique_paths = golden_unique_paths_1
else:
golden_unique_paths = golden_unique_paths_0
#print("golden_unique_paths",golden_unique_paths)
#golden_unique_paths = [list(reversed(gup)) for gup in golden_unique_paths if gup[-1] == hypothesises_names[0]]
#print("after hypothesises_names",hypothesises_names)
golden_unique_paths
hypothesises_names_filtered = []
[hypothesises_names_filtered.append(h) for h in hypothesises_names if h not in hypothesises_names_filtered]
#elif hypothesises_names[0] != golden_unique_paths[0][0]
golden_unique_paths = [hypothesises_names_filtered]+golden_unique_paths
return golden_unique_paths
#if verbose: print("-> Matching hypothesises...")
#start_time = time.time()
#golden_paths = match_hypothesises(graph, q_nlp, q_themes, q_predicates, hypothesises, paths_nodes_filtered, threshold=0.8, max_reward=2.0)
#end_time = time.time()
#print("Golden paths ->\tRunning time is {}s".format(round(end_time-start_time,2)))
#print(golden_paths)
# In[76]:
def get_context_graph(answer, graph, themes,question, previous_graph=False, in_context=False, top_k=1):
answer_graph = graph.copy()
if answer:
meaningful_ids = [answer[0][0]]
[[meaningful_ids.append(e) for e in p if e not in meaningful_ids] for p in answer[1:top_k+1]]
else: meaningful_ids=[]
#print("-> IN GET CONTEXT <-")
#print("meaningful_ids",meaningful_ids)
#print("answer",answer)
#print("themes",themes)
#print("question",question)
if answer:
if len(answer)>1:
spo_k = count_hops(answer[1],question)
if spo_k>1: last_element = spo_k*3-1
else: last_element = spo_k*3
meaningful_ids_answer = [answer[0][0]]
else: meaningful_ids_answer=[]
#print("before before meaningful_ids_answer", meaningful_ids_answer)
if answer:
if len(answer)>1:
[meaningful_ids_answer.append(e) for e in answer[1][:last_element] if e not in meaningful_ids_answer]
#print("before meaningful_ids_answer",meaningful_ids_answer)
for t in themes[0]:
if not any(item in t[1] for item in meaningful_ids_answer) and any(item in t[1] for item in meaningful_ids):
#print("not any(item in t[1] for item in meaningful_ids_answer) and has any item of t[1] in meaningful_ids")
[meaningful_ids_answer.append(e) for e in t[1]]
#print("meaningful_ids_answer",meaningful_ids_answer)
#print("meaningful_ids",meaningful_ids)
meaningful_ids_answer_names = [get_wd_label(mi) for mi in meaningful_ids_answer]
if in_context:
#print("nx.is_connected(answer_graph)",nx.is_connected(answer_graph))
#print("connected_components(answer_graph)",nx.connected_components(answer_graph))
if nx.is_connected(answer_graph):
context_centers = [(get_wd_label(c),[c]) for c in nx.center(answer_graph) if is_wd_entity(c)]
else:
context_centers = []
#print(nx.connected_components(answer_graph))
# for c_graph in nx.strongly_connected_components(answer_graph):
# print("c_graph",c_graph)
# print("list(c_graph)",list(c_graph))
# context_centers += [(get_wd_label(c),[c]) for c in nx.center(c_graph) if is_wd_entity(c)]
#print("context_centers",context_centers)
for cc in context_centers:
if cc[1][0] not in meaningful_ids:
meaningful_ids+=cc[1]
#print("meaningful_ids",meaningful_ids)
meaningful_names = [get_wd_label(mi) for mi in meaningful_ids]
for n in graph.nodes():
n_tmp = n
if n.find("-") != -1 and not is_timestamp(n):
n_tmp = n[:n.find("-")]
n_n = list(graph.neighbors(n))
#print("n_n",n_n)
if (n_tmp not in meaningful_ids
and n in graph.nodes()
and not any(item in n_n for item in meaningful_ids_answer)
and get_wd_label(n_tmp) not in meaningful_names
):
#print("removing",n)
answer_graph.remove_node(n)
graph_filtered = filter_graph_by_ids(answer_graph, meaningful_ids)
graph_filtered_names = filter_graph_by_names(answer_graph, meaningful_names, entities=True, predicates=False)
graph_filtered = nx.compose(graph_filtered,graph_filtered_names)
for p in [x for x,y in graph_filtered.nodes(data=True) if y["type"]=="predicate"]:
p_n = list(graph_filtered.neighbors(p))
if len(p_n) < 2:
graph_filtered.remove_node(p)
continue
graph_filtered.nodes[p]['qa'] = True
if p in meaningful_ids_answer:
graph_filtered.nodes[p]['qa'] = 1
else:
graph_filtered.nodes[p]['qa'] = 2
#print("answer[0][0]",answer[0][0])
for p in [x for x,y in graph_filtered.nodes(data=True) if y["type"]=="entity"]:
p_n = list(graph_filtered.neighbors(p))
if answer:
if answer[0]:
if len(p_n) == 0 and p != answer[0][0]:
graph_filtered.remove_node(p)
continue
graph_filtered.nodes[p]['qa'] = True
if p in meaningful_ids_answer:
graph_filtered.nodes[p]['qa'] = 1
else:
graph_filtered.nodes[p]['qa'] = 2
if previous_graph: graph_filtered = nx.compose(graph_filtered,previous_graph)
return graph_filtered
#context_graph_test = get_context_graph(answer_2,context_graph_2,question=question,in_context=True)
#plot_graph(context_graph_test, "file_name_graph", "Graph_title")
# In[77]:
def convert_to_literal(to_literal):
if is_timestamp(to_literal):
datetimeObj = datetime.strptime(to_literal, '%Y-%m-%dT%H:%M:%SZ')
if str(datetimeObj.time())!="00:00:00" and str(datetimeObj.date())[5:]=="01-01":
to_literal = str(datetimeObj.year)
elif str(datetimeObj.time())=="00:00:00":
to_literal = str(datetimeObj.strftime("%d %B %Y"))
else:
to_literal = str(datetimeObj.strftime("%d %B %Y")+" at "+datetimeObj.strftime("%H:%M"))
return to_literal
else:
return to_literal
# In[170]:
def is_binary_question(nlp_question):
BINARY_VERBS = ['be', 'have', 'do']
#print(nlp_question[0],nlp_question[0].lemma_,nlp_question[0].pos_,nlp_question[0].tag_)
if len(nlp_question)<=1:
return False
elif nlp_question[0].lemma_ in BINARY_VERBS and "or" in [w.lemma_ for w in nlp_question]:
return False
elif nlp_question[0].lemma_ in BINARY_VERBS:
return True
elif nlp_question[0].lower_ == "not" and nlp_question[1].lemma_ in BINARY_VERBS:
return True
else: return False
#nlp_question_test = get_nlp("Is this and that?")
#is_binary_question(nlp_question_test)
# In[ ]:
# TODO
# is this or this?
# is this and that?
def is_choice_question(nlp_question):
return False
# In[166]:
## questions = ("what was the cause of death of yves klein",
# "Who is the wife of Barack Obama?",
# "Who is the president of the United States?",
# "When was produced the first Matrix movie?",
# "Who made the soundtrack of the The Last Unicorn movie?",
# "Who is the author of Le Petit Prince?",
# "Which actor voiced the Unicorn in The Last Unicorn?",
# "how is called the rabbit in Alice in Wonderland?"
# )
#def print_running_time(start_time, end_time=time.time()):
# print("->\tRunning time is {}s".format(round(end_time-start_time,2)))
def answer_question(question, verbose=False, aggressive=False, looped=False,
deep_k=3, deep_k_step=1, deep_k_max=20,
graph_size_min=100, graph_size_target=350, graph_size_max=900,
paths_filter_max=20000, paths_max=300000,
timer=False, g_paths=True, show_graph=False, cores=mp.cpu_count(),
banning_str=False, reload_cache=False, answer_sentence=False,
previous_answer=False, previous_graph=False, graph_size_target_context=100,
deep_match=4, k_context=1, in_context=False, k_deep_followup=6,
k_deep_context_graph=1, context_themes=False, previous_answers=False,
max_deepness=5, g_autocorrect = True
):
PLURAL_PRONS = ["they"]
if previous_graph:
context_graph=previous_graph.copy()
else:
context_graph=False
(wd_local_statements_dict, wd_labels_dict,
wd_local_word_ids_dict, wd_online_word_ids_dict,
wd_local_predicate_ids_dict, wd_online_predicate_ids_dict,
word_similarities_dict) = load_cache_data(use_cache=reload_cache)
if verbose: start_time = time.time()
if timer: timer_time = time.time()
if verbose and not aggressive: print("User input:",question)
if verbose and aggressive: print("Looped in aggressive mode with:",question)
if verbose and g_autocorrect: print("--> Auto correcting question in progress...")
q_nlp = get_nlp(question, autocorrect=g_autocorrect, banning_str=banning_str)
q_nlp_src = q_nlp[:].doc
if verbose: print("-> Auto corrected q_nlp:",q_nlp)
last_golden_answer = []
last_golden_answer_predicates = []
last_golden_answer_entities = []
q_nlp_enhanced = False
if context_graph:
if verbose: print("> Processing in conversational context..")
in_context = True
#deep_k = deep_k+1
BINARY_ANSWERS = ["yes","no"]
if previous_answer:
if previous_answer[0][0] in BINARY_ANSWERS:
previous_answer = False
if previous_answer:
last_golden_answer = [previous_answer[0][0]]
last_golden_answer_name = get_wd_label(previous_answer[0][0])
if len(previous_answer) > 1:
last_golden_answer_predicates = [e for e in previous_answer[1] if is_wd_predicate(e)]
last_golden_answer_entities = [e for e in previous_answer[1] if is_wd_entity(e)]
last_golden_answer_entities_enhanced = []
for lg in last_golden_answer_entities:
last_golden_answer_entities_enhanced.append(lg)
lg_name = get_wd_label(lg)
lg_ids = get_wd_ids(lg_name, is_predicate=False, top_k=3, limit=6, online=True)
lg_ids = [lgi for lgi in lg_ids if get_wd_label(lgi) == lg_name]
for lgi in lg_ids:
if lgi not in last_golden_answer_entities_enhanced:
last_golden_answer_entities_enhanced.append(lgi)
#print("last_golden_answer_entities_enhanced",last_golden_answer_entities_enhanced)
last_golden_answer_entities = last_golden_answer_entities_enhanced
else:
last_golden_answer_predicates = []
last_golden_answer_entities = []
last_golden_answer_name = ""
else:
last_golden_answer_predicates = []
last_golden_answer_entities = []
last_golden_answer_name = ""
if nx.is_connected(context_graph):
context_centers = [(get_wd_label(c),[c]) for c in nx.center(context_graph)+last_golden_answer if is_wd_entity(c)]
context_centers_predicates = [(get_wd_label(c),[c]) for c in nx.center(context_graph)+last_golden_answer if is_wd_predicate(c)]
else:
context_centers = []
context_centers_predicates = []
#print("last_golden_answer",last_golden_answer)
#print("last_golden_answer_predicates",last_golden_answer_predicates)
#print("last_golden_answer_entities",last_golden_answer_entities)
#print("graph_context",context_graph)
#print("before best - context_centers",context_centers)
#print("before best - context_centers_predicates",context_centers_predicates)
center_best = sorted([(x, len(list(context_graph.neighbors(x)))) for x,y in context_graph.nodes(data=True) if y["type"]=="entity"], key=lambda x: x[1], reverse=True)
center_best = (get_wd_label(center_best[0][0]),[center_best[0][0]])
center_best_predicate = sorted([(x, len(list(context_graph.neighbors(x)))) for x,y in context_graph.nodes(data=True) if y["type"]=="predicate"], key=lambda x: x[1], reverse=True)
if center_best_predicate:
if center_best_predicate[0][0].find("-") != 1:
predicate_id = center_best_predicate[0][0][:center_best_predicate[0][0].find("-")]
center_best_predicate = (get_nlp(get_wd_label(predicate_id)),[predicate_id])
else: center_best_predicate = (get_nlp(get_wd_label(center_best_predicate[0][0])),[center_best_predicate[0][0]])
else:
center_best_predicate = False
#print("center_best_predicate",center_best_predicate)
if center_best[0][0] not in context_centers:
context_centers.append(center_best)
context_centers_filtered = []
[context_centers_filtered.append(cc) for cc in context_centers if cc not in context_centers_filtered]
context_centers = context_centers_filtered
if center_best_predicate and center_best_predicate[0][0] not in context_centers_predicates:
context_centers_predicates.append(center_best_predicate)
#print("after best - context_centers",context_centers)
#print("after best - context_centers_predicates",context_centers_predicates)
new_sentence = []
if verbose: print("-> Replacing pronouns from context..")
if aggressive and verbose: print("-> Replacing verbs in context..")
tmp_element_tracker = False
BANNED_POS_VERB = ["VERB","AUX"]
for i_w, w in enumerate(q_nlp):
if aggressive and i_w == 1 and w.pos_ not in BANNED_POS_VERB and q_nlp[i_w-1].pos_ not in BANNED_POS_VERB and last_golden_answer_predicates:
#print("q_nlp[i_w-1].pos_",q_nlp[i_w-1].pos_)
if tmp_element_tracker: tmp_element_tracker += 1
else: tmp_element_tracker = 0
#print("i_w == 1 tmp_element_tracker",tmp_element_tracker)
new_sentence.append(get_wd_label(last_golden_answer_predicates[0]))
#print("w",w.lower_,w.lemma_)
if w.lemma_ == "-PRON-" and w.lower_ in PLURAL_PRONS:
#print("plural")
for i_cc, cc in enumerate(context_centers):
if i_cc > 0 and len(context_centers)>1:
new_sentence.append("and")
new_sentence.append(cc[0])
elif w.lemma_ == "-PRON-":
new_sentence.append(context_centers[0][0])
elif aggressive and w.lemma_ == "be":
#print("w.lemma_",w)
for i_p, p in enumerate(last_golden_answer_predicates[:k_deep_followup]):
if type(tmp_element_tracker)==type(int(0)) and i_p <= tmp_element_tracker: continue
elif tmp_element_tracker: tmp_element_tracker += 1
else: tmp_element_tracker = 0
new_sentence.append(get_wd_label(p))
break
#if w.pos_ != "VERB" and q_nlp[i_w+1].pos_ != "VERB":
# for i_p, p in enumerate(last_golden_answer_predicates[:k_deep_followup]):
# if tmp_element_tracker and i_p < tmp_element_tracker: continue
# elif tmp_element_tracker: tmp_element_tracker += 1
# else: tmp_element_tracker = 0
# previous_predicate_position = i_w+1
# new_sentence.append(p)
# new_sentence.append(w.text)
#
#
#if previous_predicate_position
#if i_p == 0 and second_position_taken:continue
#new_sentence.append(get_wd_label(p))
else:
new_sentence.append(w.text)
question_enhanced = " ".join(new_sentence)
q_nlp_enhanced = get_nlp(question_enhanced)
if verbose: print("-> New q_nlp:",q_nlp_enhanced)
time_sensitive = False
if 'when' in [t.lower_ for t in q_nlp if t.tag_=="WRB"]: time_sensitive = True
binary_question = is_binary_question(q_nlp)
q_themes = get_themes(q_nlp, question, top_k=2, online=True)
if verbose: print("-> q_themes:",q_themes)
q_themes_enhanced = get_enhanced_themes(q_themes, top_k=1, title_limit=4, aggressive=True)
if verbose: print("-> q_themes_enhanced:",q_themes_enhanced)
if verbose: print("--> Calculating predicates... (could be long.. depends on uncached unpure predicates)")
q_predicates_db = get_predicates(q_nlp, q_themes, top_k=0)
q_predicates_online = get_predicates_online(q_nlp, top_k=2, aggressive=aggressive)
q_predicates = []
q_predicates_db_ids = [p[1] for p in q_predicates_db]
q_predicates_db_names = [p[0] for p in q_predicates_db]
q_predicates_online_ids = [p[1] for p in q_predicates_online]
q_predicates_online_names = [p[0] for p in q_predicates_online]
for i_n,n in enumerate(q_predicates_db_names):
pn_online_text = [n.text for n in q_predicates_online_names]
tmp_ids = q_predicates_db_ids[i_n]
if n.text in pn_online_text:
for p_o in q_predicates_online_ids[pn_online_text.index(n.text)]:
if p_o not in tmp_ids:
tmp_ids.append(p_o)
q_predicates.append((n,tmp_ids))
for i_n_o,n_o in enumerate(q_predicates_online_names):
n_db_text = [n.text for n in q_predicates_db_names]
if n_o.text not in n_db_text:
q_predicates.append((n_o, q_predicates_online_ids[i_n_o]))
if verbose: print("-> q_predicates:",q_predicates)
if timer:
print("-> q_predicates \tRunning time is {}s".format(round(time.time()-timer_time,2)))
timer_time = time.time()
if context_graph:
if last_golden_answer_predicates:
for i_lgap, lgap in enumerate(last_golden_answer_predicates):
if i_lgap > k_deep_followup:# and not aggressive:
break
#elif aggressive and i_lgap >= k_deep_followup*2:
# break
if lgap.find("-") != -1:
lgap = lgap[:lgap.find("-")]
q_predicates.append((get_nlp(get_wd_label(lgap)),[lgap]))
#print("updated q_predicates:",q_predicates)
if context_centers_predicates:
q_predicates=context_centers_predicates+q_predicates
q_predicates_filtered = []
[q_predicates_filtered.append(p) for p in q_predicates if p[1] not in [pf[1] for pf in q_predicates_filtered]]
q_predicates = q_predicates_filtered
if verbose: print("--> Predicates enhanced by previous context:",q_predicates)
#print("context_centers",context_centers)
q_themes_new = []
#print([cc[0] for cc in context_centers])
for t in q_themes[0]:
replaced = False
for cc in context_centers:
#print("cc[0]",cc[0],type(cc[0]))
#print("t[0]",t[0],type(t[0]))
if cc[0].lower() == t[0].lower_:
q_themes_new.append((t[0],cc[1]))
replaced = True
if not replaced and (t[0].text,t[1]) not in [(e[0].text,e[1]) for e in q_themes_new]:
q_themes_new.append((t[0],t[1]))
q_themes_1_new=[]
for t in q_themes[1]:
is_in = False
for cc in context_centers:
for w in cc[0].split(" "):
if w in t.text.split(" "):
is_in = True
break
if is_in:
break
if not is_in and t[0].text not in [e.text for e in q_themes_1_new]:
q_themes_1_new.append(t[0])
#q_themes_new_filtered = []
#[q_themes_new_filtered.append(t) for t in q_themes_new if t and t not in q_themes_new_filtered]
#
#q_themes_new_1_filtered = []
#[q_themes_new_1_filtered.append(t) for t in q_themes_1_new if t and t not in q_themes_new_1_filtered]
q_themes_src = q_themes[:]
if not q_themes_new:
q_themes_new = [(get_nlp(last_golden_answer_name), last_golden_answer)]
q_themes = (q_themes_new, q_themes_1_new)
if verbose: print("----> q_themes in context:",q_themes)
q_theme_names = [q[0].text for q in q_themes[0]]
q_theme_ids = sum([qt[1] for qt in q_themes[0]],[])
#print("q_theme_ids",q_theme_ids)
q_theme_enhanced_names = [q[0] for q in q_themes_enhanced]
meaningful_names = [mn for mn in q_theme_names+q_theme_enhanced_names if mn != ""]
if verbose: print("--> Potential meaningful keywords for the sentence:",meaningful_names)
if context_graph:
previous_entities_ids = [x for x,y in context_graph.nodes(data=True) if y["type"]=="entity"]
previous_entities_ids_enhanced = sum([get_wd_ids(get_wd_label(e), is_predicate=False, top_k=3, limit=6, online=True) for e in previous_entities_ids],[])
previous_entities_names_nlp = [get_nlp(get_wd_label(e))[:] for e in previous_entities_ids]
previous_entities_names = [get_wd_label(e) for e in previous_entities_ids]
meaningful_names += previous_entities_names
#('The Last Unicorn', ['Q967268']), ('Mia Farrow', ['Q202725']), ('The Last Unicorn', ['Q176198'])]
context_centers_names = [cc[0] for cc in context_centers]
#print("context_centers_names",context_centers_names)
[meaningful_names.append(w) for w in context_centers_names if w not in meaningful_names and w!=""]
if verbose: print("---> Meaningful keywords enhanced by previous context:",meaningful_names)
meaningful_names_no_previous_answer = []
#print("last_golden_answer",last_golden_answer)
for mn in meaningful_names:
#if mn.lower() != last_golden_answer_name.lower():
if len(get_nlp(mn)[:]) > 0:
meaningful_names_no_previous_answer.append(get_nlp(mn)[:])
#meaningful_names_no_previous_answer = meaningful_names.remove(last_golden_answer)
print("meaningful_names_no_previous_answer",meaningful_names_no_previous_answer)
#print("previous_entities_ids_enhanced",previous_entities_ids_enhanced)
#meaningful_names_no_previous_answer += previous_entities_names
meaningful_names_no_previous_answer_ids = []
for mnnpai in get_themes_ids_from_chunks(meaningful_names_no_previous_answer):
tmp_row = []
for mn in mnnpai:
if mn in last_golden_answer_entities+q_theme_ids+previous_entities_ids+previous_entities_ids_enhanced:
tmp_row.append(mn)
meaningful_names_no_previous_answer_ids.append(tmp_row)
#last_golden_answer = [previous_answer[0][0]]
#last_golden_answer_name = get_wd_label(previous_answer[0][0])
meaningful_names_no_previous_answer_as_theme = merge_lists(meaningful_names_no_previous_answer,meaningful_names_no_previous_answer_ids)
meaningful_names_no_previous_answer_as_theme_filtered = []
for mnno in meaningful_names_no_previous_answer_as_theme:
tmp_row = []
for mnn in mnno[1]:
if mnno[0].text == get_wd_label(mnn):
tmp_row.append(mnn)
meaningful_names_no_previous_answer_as_theme_filtered.append((mnno[0],tmp_row))
meaningful_names_no_previous_answer_as_theme = meaningful_names_no_previous_answer_as_theme_filtered
meaningful_names_no_previous_answer_as_theme = ([mn for mn in meaningful_names_no_previous_answer_as_theme if mn[1]],[])
if not meaningful_names_no_previous_answer_as_theme[0]:
meaningful_names_no_previous_answer_as_theme = q_themes
if verbose: print("----> Meaningful keywords casted as theme",meaningful_names_no_previous_answer_as_theme)
q_focused_parts = meaningful_names_no_previous_answer_as_theme[0]
#print("q_focused_parts in context",q_focused_parts)
if q_nlp_enhanced:
q_nlp = q_nlp_enhanced
if not context_graph:
q_focused_parts = get_focused_parts(q_nlp, q_themes, top_k=2, in_context=in_context)
#print("1 q_focused_parts",q_focused_parts)
#q_focused_parts: [(actor, ['Q10798782', 'P161', 'Q33999', 'Q421946'])]
q_focused_parts_filtered = []
dummy_doc = get_nlp("dummy doc")
dummy_span = dummy_doc[:]
dummy_token = dummy_span[0]
if q_focused_parts:
for t in q_focused_parts:
#print("len(t)",len(t))
if len(t)>1:
t0_tmp = t[0]
#print("type(t0_tmp)",type(t0_tmp))
if type(dummy_token) == type(t0_tmp):
t0_tmp = get_nlp(t[0].text)[:]
#print("new t0_tmp,type(t0_tmp)",t0_tmp,type(t0_tmp))
q_focused_parts_filtered.append((t0_tmp,t[1]))
#print("len(q_focused_parts_filtered)",len(q_focused_parts_filtered))
q_focused_parts = q_focused_parts_filtered
#print("before q_themes_src q_focused_parts",q_focused_parts)
#if context_graph: print("q_themes_src",q_themes_src)
if context_graph: [q_focused_parts.append(t) for t in q_themes_src[0] if t[0].text not in [fp[0].text for fp in q_focused_parts]]
if verbose: print("q_focused_parts:",q_focused_parts)
if verbose: print("-> Building the graph with k_deep",str(deep_k),"... (could be long)")
# Auto-scaling the graph size with deepness
if deep_k > deep_k_max and looped:
deep_k_max+=int(deep_k_max/2)
if in_context: q_themes = meaningful_names_no_previous_answer_as_theme
previous_graph_size = 0
previous_graph_len = 0
if deep_k<2:
if in_context: deep_k = 1
else: deep_k = 2
#graph, predicates_dict = build_graph(q_nlp, q_themes, q_themes_enhanced, q_predicates, deep_k=deep_k, time_sensitive=time_sensitive, cores=cores, context_graph=context_graph,previous_answer=previous_answer,aggressive=aggressive,k_deep_followup=k_deep_followup,max_deepness=max_deepness)
#if verbose: print("--> ",len(graph), "nodes and", graph.size(), "edges")
#if verbose: print("--> Removing meaningless subgraphs")
#graph = filter_graph_by_names(graph, meaningful_names, entities=True, predicates=False)
#if verbose: print("--> New graph of:",len(graph), "nodes and", graph.size(), "edges")
#if timer:
# print("->New graph\tRunning time is {}s".format(round(time.time()-timer_time,2)))
# timer_time = time.time()
#else:
if deep_k >= deep_k_max:
graph, predicates_dict = build_graph(q_nlp, q_themes, q_themes_enhanced, q_predicates, deep_k=deep_k, time_sensitive=time_sensitive, cores=cores, context_graph=context_graph,previous_answer=previous_answer,aggressive=aggressive,k_deep_followup=k_deep_followup,max_deepness=max_deepness)
if verbose: print("---> deep_k > deep_k_max, running graph as last trial with deep_k:",deep_k)
if timer:
print("->Rebuild new graph \tRunning time is {}s".format(round(time.time()-timer_time,2)))
timer_time = time.time()
if verbose: print("--> Removing meaningless subgraphs")
graph = filter_graph_by_names(graph, meaningful_names, entities=True, predicates=False)
if verbose: print("--> New graph of:",len(graph), "nodes and", graph.size(), "edges")
else:
for k in range(deep_k, deep_k_max, deep_k_step):
graph, predicates_dict = build_graph(q_nlp, q_themes, q_themes_enhanced, q_predicates, deep_k=deep_k, time_sensitive=time_sensitive, cores=cores, context_graph=context_graph,previous_answer=previous_answer,aggressive=aggressive,k_deep_followup=k_deep_followup,max_deepness=max_deepness)
if timer:
print("->New graph \tRunning time is {}s".format(round(time.time()-timer_time,2)))
timer_time = time.time()
if verbose: print("--> ",len(graph), "nodes and", graph.size(), "edges")
if verbose: print("--> Removing meaningless subgraphs")
graph = filter_graph_by_names(graph, meaningful_names, entities=True, predicates=False)
if verbose: print("--> New graph of:",len(graph), "nodes and", graph.size(), "edges")
if previous_graph_size == graph.size() and previous_graph_len == len(graph):
if verbose: print("---> Loop detected, returning the graph in the current state")
break
else:
previous_graph_size = graph.size()
previous_graph_len = len(graph)
if context_graph and (graph.size() > graph_size_target_context or len(graph) > graph_size_target_context) and deep_k >= 2:
deep_k -= deep_k_step
if verbose: print("---> Rebuilding the graph with k_deep",str(deep_k), "... Previously:",len(graph), "nodes or", graph.size(), "edges was above the limit of",graph_size_target_context)
graph, predicates_dict = build_graph(q_nlp, q_themes, q_themes_enhanced, q_predicates, deep_k=deep_k, time_sensitive=time_sensitive, cores=cores, context_graph=context_graph,previous_answer=previous_answer,aggressive=aggressive,k_deep_followup=k_deep_followup,max_deepness=max_deepness)
break
elif (graph.size() > graph_size_target or len(graph) > graph_size_target) and deep_k >= 2:
deep_k -= deep_k_step
if verbose: print("---> Rebuilding the graph with k_deep",str(deep_k), "... Previously:",len(graph), "nodes or", graph.size(), "edges was above the limit of",graph_size_target)
graph, predicates_dict = build_graph(q_nlp, q_themes, q_themes_enhanced, q_predicates, deep_k=deep_k, time_sensitive=time_sensitive, cores=cores, context_graph=context_graph,previous_answer=previous_answer,aggressive=aggressive,k_deep_followup=k_deep_followup,max_deepness=max_deepness)
break
elif graph.size() <= graph_size_min or len(graph) <= graph_size_min:
if context_graph:
break
elif graph.size() < graph_size_min/3 or len(graph) < graph_size_min/3:
deep_k += deep_k_step*3
elif graph.size() < graph_size_min/4*3 or len(graph) < graph_size_min/4*3:
deep_k += deep_k_step*2
else:
deep_k += deep_k_step
if verbose: print("---> Rebuilding the graph with k_deep",str(deep_k), "... Previously:",len(graph), "nodes or", graph.size(), "edges was below the limit of",graph_size_min)
else: break
if graph.size()>graph_size_max or len(graph)>graph_size_max:
if verbose: print("---> Too many nodes, statistically it's not worth the run. Cancelling question, it probably require reasoning.\n")
return False,False
if len(graph)==0:
if verbose: print("---> No nodes, cancelling this run\n")
if looped:
if previous_graph: return False,previous_graph
else: return False,False
else: return answer_question(question, verbose=verbose, aggressive=True, looped=True,
deep_k=deep_k, deep_k_step=deep_k_step, deep_k_max=deep_k_max,
graph_size_min=graph_size_min, graph_size_target=graph_size_target, graph_size_max=graph_size_max,
paths_filter_max=paths_filter_max, paths_max=paths_max,
timer=timer, g_paths=g_paths, show_graph=show_graph, cores=cores,
banning_str=banning_str, reload_cache=reload_cache, answer_sentence=answer_sentence,
previous_answer=previous_answer, previous_graph=previous_graph, graph_size_target_context=graph_size_target_context,
deep_match=deep_match, k_context=k_context, in_context=in_context, k_deep_followup=k_deep_followup,
k_deep_context_graph=k_deep_context_graph, context_themes=context_themes, previous_answers=previous_answers,
max_deepness=max_deepness, g_autocorrect=g_autocorrect
)
#if context_graph:
# for g in [g for g in (graph.subgraph(c) for c in nx.connected_components(graph))]:
# print("g",list(g.nodes()))
# print("context_centers",context_centers)
# context_centers_ids = []
# for cc in context_centers:
# context_centers_ids+=cc[1]
# if all(item in list(g.nodes()) for item in context_centers_ids):
# print("yes")
if show_graph:
if verbose: print("---> Ploting the full graph")
plot_graph(graph, "file_name_full_graph", "Full_Graph_title")
if binary_question:
#print("Binary question detected!")
q_nlp_str = q_nlp_src.text
q_nlp_list = q_nlp_str.split(" ")
q_nlp_list_len = len(q_nlp_list)
if len([y['name'] for x,y in graph.nodes(data=True)]) == 0:
answer = [["no"],[]]
if previous_answers:
return [answer, previous_graph]
else: return [answer, False]
for node_name in [y['name'] for x,y in graph.nodes(data=True)]:
#print("node_name",node_name,type(node_name))
node_name_len = len(node_name.split(" "))
#print("node_name_len",node_name_len)
if node_name_len <= 0:
continue
for i_w, w in enumerate(q_nlp_list):
if i_w+node_name_len-1<q_nlp_list_len:
biword = " ".join(q_nlp_list[i_w:i_w+node_name_len])
#print("biword",biword)
node_name_similarity = get_nlp(biword).similarity(get_nlp(node_name))
#print("node_name_similarity",node_name_similarity)
if node_name_similarity > 0.9:
#print("q_themes[0]",q_themes[0],type(q_themes[0][0]))
#print("q_themes[0][0]",q_themes[0][0])
#print("q_themes[0][0][1]",q_themes[0][0][1])
node_name_id = sum([ti[1] for ti in q_themes[0] if ti[0].text==node_name],[])
print("node_name_id",node_name_id)
if q_themes[0]:
context_graph = get_context_graph(node_name_id, graph, q_themes,question=question,previous_graph=previous_graph,in_context=in_context, top_k=k_deep_context_graph)
elif q_themes_enhanced[0]:
context_graph = get_context_graph(node_name_id, graph, q_themes_enhanced,question=question,previous_graph=previous_graph,in_context=in_context, top_k=k_deep_context_graph)
else:
context_graph = graph
answer = [["yes"],[]]
if show_graph:
if verbose: print("---> Ploting the context graph (PLOT 0)")
plot_graph(context_graph, "file_name_full_graph", str(question)+str(": ")+str(answer[0][0]))
return [answer, context_graph]
#q_themes[0] [(Isla Fisher, ['Q228638']), (Wedding Crashers, ['Q238866', 'Q11902317']), (David Dobkin, ['Q607615', 'Q5232995']), (English, ['Q11616958', 'Q1219933', 'Q12261586']), (Dad, ['Q16167495', 'Q1156922', 'Q12838408']), (film, ['Q11424']), (United States of America, ['Q19971019', 'Q30'])] <class 'tuple'>
#node_name_pos = q_nlp_str.find(node_name)
#if node_name_pos != -1:
# print("node_name_pos",node_name_pos)
# node_name_ids = get_wd_ids(node_name)
# context_graph = get_context_graph([], graph, q_themes,question=question,previous_graph=previous_graph,in_context=in_context, top_k=k_deep_context_graph)
# answer = [["yes"],[]]
# return [answer, context_graph]
answer = [["no"],[]]
if previous_answers:
return [answer, previous_graph]
else: return [answer, False]
#answer = cleared_golden_paths
#context_graph = get_context_graph(answer[:k_context+1], graph, q_themes,question=question,previous_graph=previous_graph,in_context=in_context, top_k=k_deep_context_graph)
#if show_graph:
# if verbose: print("---> Ploting the context graph (PLOT 1)")
# plot_graph(context_graph, "file_name_context_graph", "Context_Graph_title")
#return [answer, context_graph]#[answer, graph]#
if verbose: print("-> predicates_dict:",predicates_dict)
paths_keywords = find_paths_keywords(graph, q_nlp, q_themes, q_themes_enhanced, q_predicates, q_focused_parts)
if verbose: print("-> paths_keywords:",paths_keywords)
if timer: timer_time = time.time()
if verbose: print("-> Computing possible paths... (could be long)")
path_nodes = find_path_nodes_from_graph(q_nlp, graph, predicates_dict, paths_keywords, threshold=0.8,special_pred_theshold=0.7, thres_inter=0.1, top_performance=len(graph), min_paths=100, cores=cores)
if verbose: print("--> len(path_nodes):",len(path_nodes))
if timer:
print("->Computing possible paths \tRunning time is {}s".format(round(time.time()-timer_time,2)))
timer_time = time.time()
if len(path_nodes) < paths_filter_max:
if verbose: print("-> Filtering paths... (could be long)")
paths_nodes_filtered = paths_nodes_filter(path_nodes, graph)
if verbose: print("--> len(paths_nodes_filtered):",len(paths_nodes_filtered))
#print("paths_nodes_filtered",paths_nodes_filtered)
if timer:
print("->\tRunning time is {}s".format(round(time.time()-timer_time,2)))
timer_time = time.time()
elif len(path_nodes) < paths_max:
if verbose: print("---> Too many paths, statistically it's not worth the run. Cancelling question, it probably require reasoning.\n")
return False,False
else:
if verbose: print("--> Skipping paths filtering... (too much paths)")
paths_nodes_filtered = paths_nodes_filter(path_nodes, graph, with_sublists=False)
if verbose: print("-> Computing hypothesises...")
#print("IN MAIN - q_themes",q_themes)
hypothesises = get_hypothesises(q_nlp, predicates_dict, q_predicates, q_themes, paths_keywords, paths_nodes_filtered, threshold=0.5, max_reward=2.0, in_context=in_context)
if verbose: print("--> hypothesises:",hypothesises)
if timer:
print("->Computing hypothesises \tRunning time is {}s".format(round(time.time()-timer_time,2)))
timer_time = time.time()
if g_paths:
if hypothesises:
if verbose: print("-> Computing golden paths...")
golden_paths = match_hypothesises(graph, q_nlp, q_themes, q_themes_enhanced, q_focused_parts, q_predicates, hypothesises, paths_nodes_filtered, threshold=0.8, max_reward=2.0,winner_threshold_diff=4.0, time_sensitive=time_sensitive,deep_match=deep_match)
if verbose: print("--> len(golden_paths):",len(golden_paths)-1)
if timer:
print("->\tRunning time is {}s".format(round(time.time()-timer_time,2)))
timer_time = time.time()
else:
if not looped and not aggressive:
if verbose: print("-> Looping on aggressive mode...\n")
return answer_question(question, verbose=verbose, aggressive=True, looped=True,
deep_k=deep_k, deep_k_step=deep_k_step, deep_k_max=deep_k_max,
graph_size_min=graph_size_min, graph_size_target=graph_size_target, graph_size_max=graph_size_max,
paths_filter_max=paths_filter_max, paths_max=paths_max,
timer=timer, g_paths=g_paths, show_graph=show_graph, cores=cores,
banning_str=banning_str, reload_cache=reload_cache, answer_sentence=answer_sentence,
previous_answer=previous_answer, previous_graph=previous_graph, graph_size_target_context=graph_size_target_context,
deep_match=deep_match, k_context=k_context, in_context=in_context, k_deep_followup=k_deep_followup,
k_deep_context_graph=k_deep_context_graph, context_themes=context_themes, previous_answers=previous_answers,
max_deepness=max_deepness, g_autocorrect=g_autocorrect
)
else:
if verbose: print("--> End of loop")
golden_paths=[]
save_cache_data(save_cache=save_cache)
if g_paths:
if golden_paths == (False, False):
golden_paths = False
if golden_paths:
if golden_paths[0]:
cleared_golden_paths = [golden_paths[0].copy()]
for p in golden_paths[1:]:
tmp_labeling = []
for e in p:
#tmp_labeling.append(get_wd_label(e))
tmp_labeling.append(e)
if tmp_labeling not in cleared_golden_paths:
cleared_golden_paths.append(tmp_labeling)
else: cleared_golden_paths = []
if verbose: print("--> len(cleared_golden_paths):",len(cleared_golden_paths)-1)
if len(cleared_golden_paths) > 1:
if verbose: print("---> First path:",cleared_golden_paths[1])
if timer: timer_time = time.time()
#
if verbose: print("->\tTotal Running time is {}s\n".format(round(time.time()-start_time,2)))
if g_paths:
if golden_paths:
answer = cleared_golden_paths
context_graph = get_context_graph(answer[:k_context+1], graph, q_themes,question=question,previous_graph=previous_graph,in_context=in_context, top_k=k_deep_context_graph)
if show_graph:
if verbose: print("---> Ploting the context graph (PLOT 1)")
plot_graph(context_graph, "file_name_full_graph", str(question)+str(": ")+str(answer[0][0]))
return [answer, context_graph]#[answer, graph]#
elif hypothesises and previous_graph:
answer = [[a[0] for a in hypothesises]] + [[hypothesises[0][0]]]
if show_graph:
if verbose: print("---> Ploting the previous context graph (PLOT 2)")
plot_graph(previous_graph, "file_name_full_graph", str(question)+str(": ")+str(answer[0][0]))
return [answer, previous_graph]
elif previous_graph:
if show_graph:
if verbose: print("---> Ploting the previous context graph (PLOT 3)")
plot_graph(previous_graph, "file_name_full_graph", str(question)+str(": ")+str(False))
return [False, previous_graph]
else: return False,False
else:
if hypothesises:
answer = [[a[0] for a in hypothesises]] + [[hypothesises[0][0]]]
context_graph = get_context_graph(answer[:k_context+1], graph, q_themes,question=question,previous_graph=previous_graph,in_context=in_context, top_k=k_deep_context_graph)
if show_graph:
if verbose: print("---> Ploting the context graph (PLOT 4)")
plot_graph(context_graph, "file_name_full_graph", str(question)+str(": ")+str(answer[0][0]))
return [answer, context_graph]
elif previous_graph:
if show_graph:
if verbose: print("---> Ploting the previous context graph (PLOT 5)")
plot_graph(previous_graph, "file_name_full_graph", str(question)+str(": ")+str(False))
return [False, previous_graph]
else:
return False,False
#answer,context_graph = answer_question("what film is by the writer phil hay?", verbose=True, timer=True) #444.36s
#answer,context_graph = answer_question("When was produced the first Matrix movie?", verbose=True, timer=True) #70.67s
#answer,context_graph = answer_question("Which actor voiced the Unicorn in The Last Unicorn?", verbose=True, timer=True, g_paths=True, show_graph=True) #works 312.12s
#answer,context_graph = answer_question("Who voiced the Unicorn in The Last Unicorn?", verbose=True, timer=True, show_graph=True) #works 323.52s
#answer,context_graph = answer_question("How many actors voiced the Unicorn in The Last Unicorn?", verbose=True, timer=True) #592.22s
#answer,context_graph = answer_question("Which is the nation of Martha Mattox", verbose=True, timer=True) #97.89s
#answer,context_graph = answer_question("Who made the soundtrack of the The Last Unicorn movie?", verbose=True, timer=True)
#answer,context_graph = answer_question("Who is the author of Le Petit Prince?", verbose=True, timer=True)
#answer,context_graph = answer_question("When was produced the first Matrix movie?", verbose=True, timer=True)
#answer,context_graph = answer_question("Who is the president of the United States?", verbose=True, timer=True) #node Q76 not in graph 324.88s
#answer,context_graph = answer_question("Who is the wife of Barack Obama?", verbose=True, timer=True) #works 275.94s
#answer,context_graph = answer_question("what was the cause of death of yves klein", verbose=True, timer=True) #309.06s
#answer,context_graph = answer_question("what city was alex golfis born in", verbose=True, timer=True)
#answer,context_graph = answer_question("which stadium do the wests tigers play in", verbose=True, timer=True) #462.47s
#answer,context_graph = answer_question("lol", verbose=True, timer=True)
#answer,context_graph = answer_question("what's akbar tandjung's ethnicity", verbose=True, timer=True)
#answer,context_graph = answer_question("Which equestrian was is in dublin ?", verbose=True, timer=True)
#answer,context_graph = answer_question("how does engelbert zaschka identify ", verbose=True, timer=True)
#answer,context_graph = answer_question("Who influenced michael mcdowell?", verbose=True, timer=True)
#answer,context_graph = answer_question("what does 2674 pandarus orbit", verbose=True, timer=True)
#answer,context_graph = answer_question("what production company was involved in smokin' aces 2: assasins' ball", verbose=True, timer=True)
#answer,context_graph = answer_question("who's a kung fu star from hong kong", verbose=True, timer=True)
#answer,context_graph = answer_question("Which genre of album is harder.....faster?", verbose=True, timer=True)
#answer,context_graph = answer_question("Which equestrian was born in dublin?", verbose=True, timer=True)
#answer,context_graph = answer_question("Who is the author that wrote the book Moby Dick", verbose=True, timer=True, show_graph=True) #314.04s works
#answer,context_graph = answer_question("Name a person who died from bleeding.", verbose=True, timer=True) # 117.35s
#answer,context_graph = answer_question("What is the name of the person who created Saved by the Bell?", verbose=True, timer=True)
#answer,context_graph = answer_question("of what nationality is ken mcgoogan", verbose=True, timer=True) #works 51.39s
#
#answer,context_graph = answer_question("What is a tv action show?", verbose=True, timer=True, g_paths=False)
#answer,context_graph = answer_question("who published neo contra", verbose=True, timer=True, g_paths=False)
#answer,context_graph = answer_question("When was the publication date of the movie Grease?", verbose=True, timer=True)
#answer,context_graph = answer_question("When did the movie Grease come out?", verbose=True, timer=True, show_graph=True)
#answer,context_graph = answer_question("whats the name of the organization that was founded by frei otto", verbose=True, timer=True, g_paths=False)
#answer,context_graph = answer_question("where was johannes messenius born", verbose=True, timer=True, g_paths=False)
#answer,context_graph = answer_question("What is a type of gameplay available to gamers playing custom robo v2", verbose=True, timer=True, g_paths=False)
#answer,context_graph = answer_question("Which genre of album is Harder ... Faster?", verbose=True, timer=True, show_graph=True)
#answer,context_graph = answer_question("Who is the author that wrote the book Moby Dick", verbose=True, timer=True, show_graph=True)
#answer,context_graph = answer_question("how does engelbert zaschka identify", verbose=True, timer=True, show_graph=True)
#answer,context_graph = answer_question("where was shigeyasu suzuki's place of birth", verbose=True, timer=True, show_graph=True)
#answer,context_graph = answer_question("What is the name of the writer of The Secret Garden?", verbose=True, timer=True, show_graph=True)
#answer,context_graph = answer_question("Who was an influential figure for miško Šuvaković", verbose=True, timer=True, show_graph=True)
#
#answer,context_graph = answer_question("When did the movie Grease come out?", verbose=True, timer=True, show_graph=True)
#answer,context_graph = answer_question("of what nationality is ken mcgoogan", verbose=True, timer=True) #works 51.39s
#answer,context_graph = answer_question("Where did roger marquis die", verbose=True, timer=True, show_graph=True) # works 64.56s
#answer,context_graph = answer_question("How many people were in The Beatles?", verbose=True, timer=True, show_graph=True)
#answer,context_graph = answer_question("which type of people does roberto benigni belong to", verbose=True, timer=True, show_graph=True)
#answer,context_graph = answer_question("Which actor voice the Unicorn in The Last Unicorn?", verbose=True, timer=True, show_graph=True)
#answer_1,context_graph_1 = answer_question("By whom was Misery written?", verbose=True, timer=True, show_graph=True,deep_k=18)
#if answer_1:
# print("Answer:",convert_to_literal(get_wd_label(answer_1[0][0])), "("+str(answer_1[0][0])+")\n")
#answer_1,context_graph_1 = answer_question("Who is the wife of Barack Obama?", verbose=True, timer=True, show_graph=True)
#answer_1,context_graph_1 = answer_question("where did st. gerard majella annual novena die", verbose=True, timer=True, show_graph=True)
#answer_1,context_graph_1 = answer_question("who is the writer of chance and necessity", verbose=True, timer=True, show_graph=True)
#answer_1,context_graph_1 = answer_question("in which conflict did frederick benteen participate in", verbose=True, timer=True, show_graph=True)
#
#answer_1,context_graph_1 = answer_question("is g.o.r.a. fantasy or science fiction", verbose=True, timer=True, show_graph=True)
#
#if answer_1:
# print("Answer:",convert_to_literal(get_wd_label(answer_1[0][0])), "("+str(answer_1[0][0])+")\n")
### #print("Paths:",[[get_wd_label(e) for e in row] for row in answer[1:]])
#answer,context_graph = answer_question("When did the movie Grease come out?", verbose=True, timer=True, g_paths=False, show_graph=True)
#answer,context_graph = answer_question("When was the publication date of the movie Grease?", verbose=True, timer=True, g_paths=False)
#answer,context_graph = answer_question("Which actor voiced the Unicorn in The Last Unicorn?", verbose=True, timer=True, show_graph=True)
#answer,context_graph = answer_question("whats the name of the organization that was founded by frei otto", verbose=True, timer=True, g_paths=False, show_graph=True)
#answer,context_graph = answer_question("What was Joseph Ferdinand Daniel Place of Death", verbose=True, timer=True, g_paths=False, show_graph=True)
#answer_1,context_graph_1 = answer_question("Who is the wife of Barack Obama?", verbose=True, timer=True, show_graph=True)
#answer_2,context_graph_2 = answer_question("When did they marry?", previous_answer=answer_1, previous_graph=context_graph_1, verbose=True, timer=True, show_graph=True)
#answer_3,context_graph_3 = answer_question("When did they marry?", previous_answer=answer_1, previous_graph=context_graph_1, verbose=True, timer=True, show_graph=True)
#answer_1,context_graph_1 = answer_question("Who is the wife of Barack Obama?", verbose=True, timer=True, show_graph=True)
#answer_2,context_graph_2 = answer_question("When did they marry?", previous_answer=answer_1, previous_graph=context_graph_1, verbose=True, timer=True, show_graph=True)
#answer_2,context_graph_2 = answer_question("When did they marry?", previous_answer=answer_1, previous_graph=context_graph_1, verbose=True, timer=True, show_graph=True)
# In[165]:
#conversation_questions = [
# "Who is the wife of Barack Obama?",
# "When did they marry?"
#]
#answer = "Q13133"
#answer_context=[[answer]]
#context_graph=nx.Graph()
#context_graph.add_node(answer, name=get_wd_label(answer), type='entity', turn=1, weight=1, qa=True)
#
#answer_context,context_graph = answer_question(conversation_questions[1] ,previous_answer=answer_context, previous_graph=context_graph,
# verbose=True, timer=True, show_graph=True)
# In[ ]:
#plot_graph(context_graph, "file_name_context_graph", "Context_Graph_title")
# In[163]:
#conversation_questions = [
# "Which actor voiced the Unicorn in The Last Unicorn?",
# "And Alan Arkin was behind..",
# "Who did the score?",
# "So who performed the songs?",
# "Genre of this band's music?",
# "By the way, who was the director?",
#]
conversation_questions = [
"In which year was the theatrical release of the movie Wedding Crashers?",
"And who's the actor that plays Rachel McAdams' dad?",
"Is Isla Fisher in it too?",
"And what's the director's name?",
"Was Dobkin also the director of Shanghai Knights?"
]
def answer_conversation(questions, answer_context=False, context_graph=False, start_from=0, end_at=6):
conversation_history = []
for i_q,question in enumerate(questions):
if i_q >= start_from and i_q<=end_at:
if i_q == 0:
answer_context,context_graph = answer_question(question ,previous_answer=answer_context, previous_graph=context_graph, verbose=True, timer=True, show_graph=True)
#break
elif context_graph:
#break
print("Context Question:",question)
answer_context,context_graph = answer_question(question,previous_answer=answer_context, previous_graph=context_graph, verbose=True, timer=True, show_graph=True, aggressive=False)
else:
print("NO CONTEXT ERROR")
break
conversation_history.append([answer_context,context_graph])
if answer_context: print("Answer:",convert_to_literal(get_wd_label(answer_context[0][0])), "("+str(answer_context[0][0])+")\n")
else: print("Answer:",answer_context)
#break
return conversation_history
#first_answer = conversation_history[0][0]
#first_context = conversation_history[0][1]
#conversation_history_2 = answer_conversation(conversation_questions,answer_context=first_answer,context_graph=first_context, start_from=1, end_at=1)
#second_answer = conversation_history_2[1][0]
#second_context = conversation_history_2[1][1]
#conversation_history_3 = answer_conversation(conversation_questions,answer_context=second_answer,context_graph=second_context, start_from=2, end_at=2)
#third_answer = conversation_history_3[2][0]
#third_context = conversation_history_3[2][1]
#conversation_history_4 = answer_conversation(conversation_questions,answer_context=third_answer,context_graph=third_context, start_from=3, end_at=6)
#conversation_history = answer_conversation(conversation_questions,answer_context=False,context_graph=False, start_from=0, end_at=6)
# In[ ]:
#conversation_history
# In[ ]:
#get_nlp("cast member").similarity(get_nlp("behind"))
#get_wd_label("Q30060419")
# In[ ]:
#for ch in conversation_history:
# if ch[0]:
# print("ch[0]",ch[0][0])
# plot_graph(ch[1], "file_name_context_graph", "Context_Graph_title")
# In[ ]:
def generate_answer_sentence(answer, question, model, tokenizer, verbose=False, k_spo=False, g_autocorrect=True):
if answer:
answer_paths = answer[1:]
answer_best_path = answer[1]
answer_fact = answer[0][0]
#if verbose: print("answer_paths",answer_paths)
#if verbose: print("answer_fact",answer_fact)
if verbose: print("Answer:",get_wd_label(answer_fact), "("+str(answer_fact)+")")
#if verbose: print("Paths:",[[get_wd_label(e) for e in row] for row in answer_paths][0])
if verbose: print("Best path:",[get_wd_label(e) for e in answer_best_path])
if k_spo:
spo_k = k_spo
else:
spo_k = count_hops(answer_best_path,question)
if verbose: print("spo_k",spo_k)
if spo_k>1:
last_element = spo_k*3-1
else:
last_element = spo_k*3
answer_best_path_spos = [get_wd_label(e) for e in answer_best_path][:last_element]
#print("answer_best_path_spos",answer_best_path_spos)
raw_list = []
sentence_spliters = []
for i_e,e in enumerate(answer_best_path_spos):
e = convert_to_literal(e)
if i_e == 0:
raw_list.append('[CLS]')
if is_timestamp(e): raw_list.append('[MASK]')
raw_list.append(e)
raw_list.append('[MASK]')
elif i_e == len(answer_best_path_spos)-1:
if is_timestamp(e): raw_list.append('[MASK]')
raw_list.append(e)
raw_list.append('[MASK]')
raw_list.append('[SEP]')
elif spo_k > 1 and i_e % 3 and i_e != 1:
sentence_spliters.append(len(raw_list)+2)
#raw_list.append('[MASK]')
if is_timestamp(e): raw_list.append('[MASK]')
raw_list.append(e)
raw_list.append('[MASK]')
#raw_list.append('[MASK]')
#raw_list.append('[SEP]')
else:
#print("e",e)
get_nlp(e)
if e.find("is") == -1:
raw_list.append('[MASK]')
raw_list.append(e)
raw_list.append('[MASK]')
#print("answer_best_path_to_complete",answer_best_path_to_complete)
raw_text = " ".join(raw_list)
if verbose: print("Best answer path selected for spo_k",raw_text)
#return "lol"
tokenized_text = tokenizer.tokenize(raw_text)
#print("tokenized_text",tokenized_text)
indexed_tokens = tokenizer.convert_tokens_to_ids(tokenized_text)
#print("indexed_tokens",indexed_tokens)
segments_ids = [0] * len(tokenized_text)
#print("segments_ids",segments_ids)
if spo_k > 2:
#print("sentence_spliters",sentence_spliters)
previous_position = 0
for i_ss, ss in enumerate(sentence_spliters):
for i_p in range(ss+1, len(segments_ids)):
#print(i_p)
segments_ids[i_p] = i_ss+1
#print(i_ss, ss)
#segments_ids[previous_position:ss] = [i_ss] * int(ss)-int(previous_position)
#previous_position = ss
#print("segments_ids",segments_ids)
else:
segments_ids = [0] * len(tokenized_text)
#print("sentence",segments_ids)
#return False
tokens_tensor = torch.tensor([indexed_tokens])
segments_tensors = torch.tensor([segments_ids])
model.eval()
tokens_tensor = tokens_tensor.to(torch.device('cuda:0'))
segments_tensors = segments_tensors.to(torch.device('cuda:0'))
model.to(torch.device('cuda:0'))
masked_indices = [i for i, x in enumerate(tokenized_text) if x == "[MASK]"]# or x == "[CLS]"]
masked_indices_raw = [i for i, x in enumerate(raw_list) if x == "[MASK]"]# or x == "[CLS]"]
with torch.no_grad():
outputs = model(tokens_tensor, token_type_ids=segments_tensors)
predictions = outputs[0]
completed_text = raw_list.copy()
#print("source:", " ".join(completed_text))
for i_mi, masked_index in enumerate(masked_indices):
predicted_index = torch.argmax(predictions[0, masked_index]).item()
predicted_token = tokenizer.convert_ids_to_tokens([predicted_index])[0]
completed_text[masked_indices_raw[i_mi]] = predicted_token
completed_text = " ".join(completed_text).replace("[CLS]","").replace("[SEP]","").replace("/","").replace(" .",". ")#[6:-5]
completed_text = re.sub(r'\s*,\s*', ', ', completed_text)
completed_text = re.sub(r'\s*, ,\s*', ', ', completed_text)
completed_text = re.sub(r'\s*\.\s*', '. ', completed_text)
completed_text = re.sub(r'\s*\(\s*', ' (', completed_text)
completed_text = re.sub(r'\s*\)\s*', ') ', completed_text)
completed_text = re.sub(r'\s* ; \s*', '; ', completed_text)
completed_text = re.sub(r'\s* : \s*', ': ', completed_text)
completed_text = re.sub(r'\s* - \s*', ', ', completed_text)
if completed_text[len(completed_text)-2:] == "; ": completed_text = completed_text[:-2]+"."
if completed_text[len(completed_text)-1:] == " ": completed_text = completed_text[:-1]
if completed_text[0] == " ": completed_text = completed_text[1:]
if verbose: print("Spot filled:",completed_text)
if not g_autocorrect:
return [completed_text,completed_text]
corrected_completed_text = corrector.correct(completed_text)
if verbose: print("\n\nBest Answer sentence for spo_k",spo_k,"and corrected:",corrected_completed_text[0]["sequence"])
if corrected_completed_text[0]["sequence"].find("/") != -1:
return [completed_text, corrected_completed_text[0]["sequence"]]
else:
return [corrected_completed_text[0]["sequence"], completed_text]
else:
return False
#print("--> Generating complete sentence from answer...")
#question = "who is the wife of barrack obama"
#question = "which actor voiced the unicorn in the last unicorn?"
#question = "when did the the first star wars released"
#question = "Where did Michelle Obama and Barrack Obama get married?"
#answer = answer_question(question, verbose=True, timer=True, show_graph=True)
#print(answer)
#question = "And Alan Arkin was behind.."
#answer = conversation_history[1][0]
#generate_answer_sentence(answer, question, bert_model, bert_tokenizer, verbose=True, k_spo=False)[0]
# In[ ]:
#import torch.nn.functional as functional
import random
def generate_next_word(input_text, model, tokenizer, threshold=0.7):
input_ids = torch.tensor(tokenizer.encode(input_text)).unsqueeze(0)
outputs = model(input_ids)[0][:, -1]
possibilities = torch.nn.functional.softmax(outputs, dim=-1).squeeze()
indexes = torch.argsort(possibilities, descending=True)
results = []
proba_sum = 0.0
for idx in indexes:
results.append(idx)
proba_sum += possibilities[idx]
if proba_sum > threshold:
pred_idx = indexes.new_tensor([random.choice(results)])
break
predication = tokenizer.convert_ids_to_tokens(int(pred_idx))
return tokenizer.convert_tokens_to_string(predication)
def extend_sentence(sentence, gpt2_model, gpt2_tokenizer, verbose=False, k_sentences=False):
BREAKING_TOKENS = [".","!","?"]
#tokenizer = GPT2Tokenizer.from_pretrained("gpt2")
#model = GPT2LMHeadModel.from_pretrained("gpt2")
#gpt2_modelpath = "gpt2-large"
#gpt2_tokenizer = GPT2Tokenizer.from_pretrained(gpt2_modelpath)
#gpt2_model = GPT2LMHeadModel.from_pretrained(gpt2_modelpath)
sentence += generate_next_word(sentence, gpt2_model, gpt2_tokenizer, threshold=0.7)
done = False
if k_sentences:
k_sentences -= 1
while not done:
next_word = generate_next_word(sentence, gpt2_model, gpt2_tokenizer, threshold=0.7)
if next_word == "\n": continue
#print("next_word",next_word)
sentence += next_word
if verbose: print("\r\t>>> Building sentence: {}".format(sentence), end=' ')
for bt in BREAKING_TOKENS:
if next_word.find(bt) != -1:
#print("bt",bt)
k_sentences -= 1
break
if k_sentences<0:
break
done=True
#print("k_sentences",k_sentences)
#sentence.replace("\\","").replace("\" ","\"").replace("\" \"","\"").replace("\\\'","\'").replace("\\n","").replace("\'","'")
#if sentence.count("\"") == 1: sentence.replace("\"","")
return sentence
#sentence = "Yes, and"
#extend_sentence(sentence, gpt2_model, gpt2_tokenizer, verbose=True, k_sentences=1)
#del gpt2_tokenizer
#del gpt2_model
# In[ ]:
# In[ ]:
#def answer_conversation(questions, answer_context=False, context_graph=False, start_from=0, end_at=6):
# conversation_history = []
# for i_q,question in enumerate(questions):
# if i_q >= start_from and i_q<=end_at:
# if i_q == 0:
# answer_context,context_graph = answer_question(question ,previous_answer=answer_context, previous_graph=context_graph, verbose=True, timer=True, show_graph=True)
# #break
# elif context_graph:
# #break
# print("Context Question:",question)
# answer_context,context_graph = answer_question(question,previous_answer=answer_context, previous_graph=context_graph, verbose=True, timer=True, show_graph=True, aggressive=False)
# else:
# print("NO CONTEXT ERROR")
# break
#
# conversation_history.append([answer_context,context_graph])
# if answer_context: print("Answer:",convert_to_literal(get_wd_label(answer_context[0][0])), "("+str(answer_context[0][0])+")\n")
# #break
# return conversation_history
#bert_modelpath = "bert-large-uncased"
#bert_model = BertForMaskedLM.from_pretrained(bert_modelpath)
#bert_tokenizer = BertTokenizer.from_pretrained(bert_modelpath)
#
#gpt2_modelpath = "gpt2-xl"
#gpt2_tokenizer = GPT2Tokenizer.from_pretrained(gpt2_modelpath)
#gpt2_model = GPT2LMHeadModel.from_pretrained(gpt2_modelpath)
# In[ ]:
def get_full_answer(question, answer_context=False, context_graph=False, verbose=False, timer=False, show_graph=False, k_spo=False, k_chatty=1, g_autocorrect=True):
BREAKING_TOKENS = [".","!","?"]
answer_context,context_graph = answer_question(question,
previous_answer=answer_context, previous_graph=context_graph,
verbose=verbose, timer=timer, show_graph=show_graph, g_autocorrect=g_autocorrect)
if answer_context:
if verbose: print("\nGET_FULL_ANSWER - building sentence from:",answer_context[1])
answer_sentence = generate_answer_sentence(answer_context, question, bert_model, bert_tokenizer, verbose=verbose, k_spo=k_spo, g_autocorrect=g_autocorrect)
if answer_sentence[0][-1] in BREAKING_TOKENS and k_chatty>0:
answer_sentence_no_ending = answer_sentence[0][:-1]
else: answer_sentence_no_ending = answer_sentence[0]
if k_chatty>0:
if verbose: print("\nGET_FULL_ANSWER - extending sentence with:",answer_sentence_no_ending)
answer_sentence_extended = extend_sentence(answer_sentence_no_ending, gpt2_model, gpt2_tokenizer, verbose=verbose, k_sentences=k_chatty)
else: answer_sentence_extended = answer_sentence_no_ending
return answer_sentence_extended, answer_context, context_graph
else:
return "I don't know.", False, False
#question = "Who is the wife of Barack Obama?"
#print("Asking:", question)
#answer = get_full_answer(question, answer_context=False, context_graph=False, verbose=True, timer=True, show_graph=True, k_spo=2, k_chatty=2, g_autocorrect=True)
#print("\n\n\nGraphQA answer:",answer[0])
# In[ ]:
#question_2 = "When did they marry?"
#answer_2 = get_full_answer(question_2, answer_context=answer[1], context_graph=answer[2], verbose=True, timer=True, show_graph=True, k_spo=2, k_chatty=1, g_autocorrect=True)
#answer_sentence_extended, answer_context, context_graph
# In[ ]:
#answer
# In[ ]:
#conversation_history[1][0]
# In[ ]:
questions = [
"Which actor voiced the Unicorn in The Last Unicorn?",
"Who voiced the Unicorn in The Last Unicorn?",
"When was the publication date of the movie Grease?",
"When was produced the first Matrix movie?",
"Which is the nation of Martha Mattox",
"Where did roger marquis die",
"Who is the author that wrote the book Moby Dick",
"Who is the wife of Barack Obama?",
"of what nationality is ken mcgoogan",
"When did the movie Grease come out?",
"which stadium do the wests tigers play in",
"What is the name of the writer of The Secret Garden?",
"whats the name of the organization that was founded by frei otto",
"Which genre of album is harder.....faster?",
"Which genre of album is Harder ... Faster?",
"Which equestrian was is in dublin ?",
"how does engelbert zaschka identify ",
"Who influenced michael mcdowell?",
"what does 2674 pandarus orbit"
]
#for i_q, question in enumerate(questions):
# if i_q >= 0:
# answer,context_graph = answer_question(question, verbose=True, timer=True, show_graph=True)
# if answer:
# print("Answer:",convert_to_literal(get_wd_label(answer[0][0])), "("+str(answer[0][0])+")\n")
# In[ ]:
def get_top_1_answer(answer_convex,question):
time_sensitive = False
if 'when' in [t.lower_ for t in get_nlp(question) if t.tag_=="WRB"]: time_sensitive = True
top_1_answer = answer_convex[0]["answer"]
if time_sensitive:
for answer in answer_convex:
if is_timestamp(answer["answer"]):
top_1_answer = answer["answer"]
break
return top_1_answer
#top_1_answer = get_top_1_answer(answer_convex, question)
#print("top_1_answer",top_1_answer)
# In[ ]:
def standardize_graph(graph):
this_graph = graph.copy()
for n in this_graph.nodes():
n_pos = n.find("-")
n_name = n
if n_pos != -1: n_name = n[:n_pos]
this_graph.nodes[n]["name"] = get_wd_label(n_name)
this_graph.nodes[n]["weight"] = 1
return this_graph
# In[ ]:
### try with Convex
#import convex as cx
conversation_questions = [
"Which actor voice the Unicorn in The Last Unicron?",
"And Alan Arkin was behind..",
"Who did the score?",
"So who performed the songs?",
"Genre of this band's music?",
"By the way, who was the director?"
]
def try_conversation_with_convex(questions, answer_convex=False, context_graph=False):
frontier_detection=[0.9, 0.6, 0.3] #random_access
answer_detection=[0.9, 0.1] #total_distance_qa_nodes, total_distance_frontiers
frontiers=3
for i_q,question in enumerate(questions):
turn = i_q+1
if i_q >= 1:
if i_q == 0:
answer_convex,context_graph = answer_question(question ,previous_answer=answer_convex, previous_graph=context_graph, verbose=True, timer=True, show_graph=True)
if answer_convex: print("Answer:",convert_to_literal(get_wd_label(answer_convex[0][0])), "("+str(answer_convex[0][0])+")\n")
elif context_graph:
print("Context Question:",question)
answer_convex, context_graph = cx.answer_follow_up_question(question, turn, context_graph, frontier_detection+answer_detection, frontiers)
context_graph = standardize_graph(context_graph)
top_1_answer = get_top_1_answer(answer_convex[1:],question)
plot_graph(context_graph, "file_name_context_graph", "Context_Graph_title")
if top_1_answer: print("Answer:",convert_to_literal(get_wd_label(top_1_answer)), "("+str(top_1_answer)+")\n")
for ac in answer_convex:
print(ac)
print("\n")
else:
print("NO CONTEXT ERROR")
break
#try_conversation_with_convex(conversation_questions,answer=answer,context_graph=context_graph)
# In[ ]:
#import convex as cx
#conversation_questions = [
# "Who is the wife of Barack Obama?",
# "Where did they marry?"
#]
#answer = "Q13133"
#answer_context=[[answer]]
#context_graph=nx.Graph()
#context_graph.add_node(answer, name=get_wd_label(answer), type='entity', turn=1, weight=1, qa=True)
#frontier_detection=[0.9, 0.6, 0.3] #random_access
#answer_detection=[0.9, 0.1] #total_distance_qa_nodes, total_distance_frontiers
#frontiers=3
#turn=2
#question = conversation_questions[1]
#answer_context,context_graph = answer_question(conversation_questions[1] ,previous_answer=answer_context, previous_graph=context_graph,
# verbose=True, timer=True, show_graph=True)
#answer_convex, context_graph = cx.answer_follow_up_question(question, turn, context_graph, frontier_detection+answer_detection, frontiers)
#context_graph = standardize_graph(context_graph)
##top_1_answer = get_top_1_answer(answer_convex[0:],question)
#plot_graph(context_graph, "file_name_context_graph", "Context_Graph_title")
#if top_1_answer: print("Answer:",convert_to_literal(get_wd_label(answer_convex[0]["answer"])), "("+str(answer_convex[0]["answer"])+")\n")
#for ac in answer_convex:
# print(ac)
#print("\n")
#
# In[ ]:
# TODO try decomposing the question into subquestions (multi-hop into single-hop SPO)
#test_nlp_1 = get_nlp("What is the name of Metallica's first album")
#test_nlp_2 = get_nlp("What year did the TV show Arrested Development first air?")
#test_nlp_3 = get_nlp("How many Back to the Future movie are there?")
#test_nlp_4 = get_nlp("what is the name of an episode of life on a stick")
#test_nlp_5 = get_nlp("Which album is a death metal album?")
#test_nlp_6 = get_nlp("Star wars: the first order, the movie.")
#test_nlp_7 = get_nlp("What is the first star wars movie?")
#
#def is_reasoning_question(nlp_question):
# print(nlp_question)
# for e in nlp_question:
# print(e,e.tag_,e.pos_,list(e.children))
# print("\n")
#
#is_reasoning_question(test_nlp_1)
#is_reasoning_question(test_nlp_2)
#is_reasoning_question(test_nlp_3)
#is_reasoning_question(test_nlp_4)
#is_reasoning_question(test_nlp_5)
#is_reasoning_question(test_nlp_7)
|
pytest_pyramid_server.py
|
'''
Created on 25 Apr 2012
@author: eeaston
'''
import os
from six.moves import configparser
import sys
import socket
import glob
import shutil
import threading
try:
from path import Path
except ImportError:
from path import path as Path
from wsgiref.simple_server import make_server
from paste.deploy.loadwsgi import loadapp
from pytest import yield_fixture
from pytest_server_fixtures import CONFIG
from pytest_server_fixtures.http import HTTPTestServer
class ConfigNotFoundError(Exception):
"""Raised when a given config file and path is not found."""
@yield_fixture(scope='session')
def pyramid_server(request):
""" Session-scoped Pyramid server run in a subprocess, out of a temp dir.
This is a 'real' server that you can point a Selenium webdriver at.
This fixture searches for its configuration in the current working directory
called 'testing.ini'. All .ini files in the cwd will be copied to the tempdir
so that config chaining still works.
The fixture implementation in `PyramidTestServer` has more flexible configuration
options, use it directly to define more fine-grained fixtures.
Methods
-------
get_config() : Return current configuration as a dict.
get() : Query url relative to the server root.
.. Retry failures by default.
post() : Post payload to url relative to the server root.
.. Retry failures by default.
Attributes
----------
working_config (`path.path`): Path to the config file used by the server at runtime
.. also inherits all attributes from the `workspace` fixture
"""
with PyramidTestServer() as server:
server.start()
yield server
class PyramidTestServer(HTTPTestServer):
port_seed = 65532
def __init__(self, config_dir=None, config_filename=None, extra_config_vars=None, **kwargs):
""" Test server for a Pyramid project
Parameters
----------
config_dir:
Path to a directory to find the config file/s. Defaults to current working dir, and
all .ini files in the directory will be made available for config file chaining.
config_filename:
Name of the main config file to use. Defaults to testing.ini.
extra_config_vars:
Dict of any extra entries to add to the file, as { section: { key: value } }
"""
self.extra_config_vars = extra_config_vars if extra_config_vars is not None else {}
self.config_dir = config_dir if config_dir is not None else os.getcwd()
self.config_filename = config_filename if config_filename else 'testing.ini'
self.working_config = None
self.original_config = Path(self.config_dir) / self.config_filename
# Always print debug output for this process
os.environ['DEBUG'] = '1'
kwargs['hostname'] = kwargs.get('hostname', CONFIG.fixture_hostname)
super(PyramidTestServer, self).__init__(preserve_sys_path=True, **kwargs)
def pre_setup(self):
""" Make a copy of at the ini files and set the port number and host in the new testing.ini
"""
self.working_config = self.workspace / self.config_filename
# We need the other ini files as well here as they may be chained
for filename in glob.glob(os.path.join(self.config_dir, '*.ini')):
shutil.copy(filename, self.workspace)
Path.copy(self.original_config, self.working_config)
parser = configparser.ConfigParser()
parser.read(self.original_config)
parser.set('server:main', 'port', str(self.port))
parser.set('server:main', 'host', self.hostname)
[parser.set(section, k, v) for section, cfg in self.extra_config_vars.items() for (k, v) in cfg.items()]
with open(str(self.working_config), 'w') as fp:
parser.write(fp)
try:
parser.get('app:main', 'url_prefix')
except configparser.NoOptionError:
parser.set('app:main', 'url_prefix', '')
# Set the uri to be the hostname and the url prefix
self._uri = "http://%s:%s/%s" % (self.hostname, self.port, parser.get('app:main', 'url_prefix'))
@property
def run_cmd(self):
return [sys.executable, '-c', 'import sys; from pyramid.scripts.pserve import main; sys.exit(main())', self.working_config]
def get_config(self):
""" Convenience method to return our currently running config file as
an items dictionary, skipping logging sections
"""
# Use our workspace for %(here) expansion
parser = configparser.ConfigParser({'here': self.workspace})
parser.read(self.config)
return dict([(section, dict(parser.items(section)))
for section in parser.sections()
if not section.startswith('logger')
and not section.startswith('formatter')
and not section.startswith('handler')])
class InlinePyramidTestServer(PyramidTestServer):
random_port = True
port_seed = None
def start_server(self, env=None):
""" Start the server instance.
"""
print('\n==================================================================================')
print("Starting wsgiref pyramid test server on host %s port %s" % (self.hostname, self.port))
wsgi_app = loadapp('config:' + self.working_config)
self.server = make_server(self.hostname, self.port, wsgi_app)
worker = threading.Thread(target=self.server.serve_forever)
worker.daemon = True
worker.start()
self.wait_for_go()
print("Server now awake")
print('==================================================================================')
def kill(self):
if self.server:
print('\n==================================================================================')
print("Stopping wsgiref pyramid test server on host %s port %s" % (self.hostname, self.port))
print('==================================================================================')
self.server.shutdown()
self.server = None
|
googlenet_processor.py
|
#! /usr/bin/env python3
# Copyright(c) 2017 Intel Corporation.
# License: MIT See LICENSE file in root directory.
# NPS
# processes images via googlenet
from mvnc import mvncapi as mvnc
import numpy as np
import cv2
import queue
import threading
class googlenet_processor:
# GoogLeNet assumes input images are these dimensions
GN_NETWORK_IMAGE_WIDTH = 224
GN_NETWORK_IMAGE_HEIGHT = 224
EXAMPLES_BASE_DIR = '../../'
ILSVRC_2012_dir = EXAMPLES_BASE_DIR + 'data/ilsvrc12/'
MEAN_FILE_NAME = ILSVRC_2012_dir + 'ilsvrc_2012_mean.npy'
LABELS_FILE_NAME = ILSVRC_2012_dir + 'synset_words.txt'
# initialize the class instance
# googlenet_graph_file is the path and filename of the googlenet graph file
# produced via the ncsdk compiler.
# ncs_device is an open Device instance from the ncsdk
# input_queue is a queue instance from which images will be pulled that are
# in turn processed (inferences are run on) via the NCS device
# each item on the queue should be an opencv image. it will be resized
# as needed for the network
# output_queue is a queue object on which the results of the inferences will be placed.
# For each inference a list of the following items will be placed on the output_queue:
# index of the most likely classification from the inference.
# label for the most likely classification from the inference.
# probability the most likely classification from the inference.
def __init__(self, googlenet_graph_file: str, ncs_device: mvnc.Device, input_queue: queue.Queue, output_queue:queue.Queue,
queue_wait_input: float, queue_wait_output:float):
self._queue_wait_input = queue_wait_input
self._queue_wait_output = queue_wait_output
# GoogLenet initialization
# googlenet mean values will be read in from .npy file
self._gn_mean = [0., 0., 0.]
# labels to display along with boxes if googlenet classification is good
# these will be read in from the synset_words.txt file for ilsvrc12
self._gn_labels = [""]
# loading the means from file
try:
self._gn_mean = np.load(googlenet_processor.MEAN_FILE_NAME).mean(1).mean(1)
except:
print('\n\n')
print('Error - could not load means from ' + googlenet_processor.MEAN_FILE_NAME)
print('\n\n')
raise
# loading the labels from file
try:
self._gn_labels = np.loadtxt(googlenet_processor.LABELS_FILE_NAME, str, delimiter='\t')
for label_index in range(0, len(self._gn_labels)):
temp = self._gn_labels[label_index].split(',')[0].split(' ', 1)[1]
self._gn_labels[label_index] = temp
except:
print('\n\n')
print('Error - could not read labels from: ' + googlenet_processor.LABELS_FILE_NAME)
print('\n\n')
raise
# Load googlenet graph from disk and allocate graph via API
try:
with open(googlenet_graph_file, mode='rb') as gn_file:
gn_graph_from_disk = gn_file.read()
self._gn_graph = mvnc.Graph("GoogLeNet Graph")
self._fifo_in, self._fifo_out = self._gn_graph.allocate_with_fifos(ncs_device, gn_graph_from_disk)
except Exception as caught_except:
print(caught_except)
print('\n\n')
print('Error - could not load googlenet graph file: ' + googlenet_graph_file)
print('\n\n')
raise
self._input_queue = input_queue
self._output_queue = output_queue
self._worker_thread = threading.Thread(target=self._do_work, args=())
# call one time when the instance will no longer be used.
def cleanup(self):
self._fifo_in.destroy()
self._fifo_out.destroy()
self._gn_graph.destroy()
# start asynchronous processing on a worker thread that will pull images off the input queue and
# placing results on the output queue
def start_processing(self):
self._end_flag = False
if (self._worker_thread == None):
self._worker_thread = threading.Thread(target=self._do_work, args=())
self._worker_thread.start()
# stop asynchronous processing of the worker thread.
# when returns the worker thread will have terminated.
def stop_processing(self):
self._end_flag = True
self._worker_thread.join()
self._worker_thread = None
# the worker thread function. called when start_processing is called and
# returns when stop_processing is called.
def _do_work(self):
print('in googlenet_processor worker thread')
while (not self._end_flag):
try:
input_image = self._input_queue.get(True, self._queue_wait_input)
index, label, probability = self.googlenet_inference(input_image, "NPS")
self._output_queue.put((index, label, probability), True, self._queue_wait_output)
self._input_queue.task_done()
except queue.Empty:
print('googlenet processor: No more images in queue.')
except queue.Full:
print('googlenet processor: queue full')
print('exiting googlenet_processor worker thread')
# Executes an inference using the googlenet graph and image passed
# gn_graph is the googlenet graph object to use for the inference
# its assumed that this has been created with allocate graph and the
# googlenet graph file on an open NCS device.
# input_image is the image on which a googlenet inference should be
# executed. It will be resized to match googlenet image size requirements
# and also converted to float32.
# returns a list of the following three items
# index of the most likely classification from the inference.
# label for the most likely classification from the inference.
# probability the most likely classification from the inference.
def googlenet_inference(self, input_image:np.ndarray, user_obj):
# Resize image to googlenet network width and height
# then convert to float32, normalize (divide by 255),
# and finally convert to convert to float16 to pass to LoadTensor as input for an inference
input_image = cv2.resize(input_image, (googlenet_processor.GN_NETWORK_IMAGE_WIDTH,
googlenet_processor.GN_NETWORK_IMAGE_HEIGHT),
cv2.INTER_LINEAR)
input_image = input_image.astype(np.float32)
input_image[:, :, 0] = (input_image[:, :, 0] - self._gn_mean[0])
input_image[:, :, 1] = (input_image[:, :, 1] - self._gn_mean[1])
input_image[:, :, 2] = (input_image[:, :, 2] - self._gn_mean[2])
# Load tensor and get result. This executes the inference on the NCS
self._gn_graph.queue_inference_with_fifo_elem(self._fifo_in, self._fifo_out, input_image.astype(np.float32), None)
output, userobj = self._fifo_out.read_elem()
order = output.argsort()[::-1][:1]
'''
print('\n------- prediction --------')
for i in range(0, 5):
print('prediction ' + str(i) + ' (probability ' + str(output[order[i]]) + ') is ' + self._gn_labels[
order[i]] + ' label index is: ' + str(order[i]))
'''
# index, label, probability
return order[0], self._gn_labels[order[0]], output[order[0]]
|
scheduler_job.py
|
# pylint: disable=no-name-in-module
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import datetime
import itertools
import logging
import multiprocessing
import os
import sched
import signal
import sys
import threading
import time
from collections import defaultdict
from contextlib import redirect_stderr, redirect_stdout, suppress
from datetime import timedelta
from multiprocessing.connection import Connection as MultiprocessingConnection
from typing import Any, Callable, DefaultDict, Dict, Iterable, List, Optional, Set, Tuple
from setproctitle import setproctitle
from sqlalchemy import and_, func, not_, or_
from sqlalchemy.exc import OperationalError
from sqlalchemy.orm import load_only, selectinload
from sqlalchemy.orm.session import Session, make_transient
from airflow import models, settings
from airflow.configuration import conf
from airflow.exceptions import AirflowException, TaskNotFound
from airflow.executors.executor_loader import UNPICKLEABLE_EXECUTORS
from airflow.jobs.base_job import BaseJob
from airflow.models import DAG, DagModel, SlaMiss, errors
from airflow.models.dagbag import DagBag
from airflow.models.dagrun import DagRun
from airflow.models.serialized_dag import SerializedDagModel
from airflow.models.taskinstance import SimpleTaskInstance, TaskInstanceKey
from airflow.stats import Stats
from airflow.ti_deps.dependencies_states import EXECUTION_STATES
from airflow.utils import timezone
from airflow.utils.callback_requests import (
CallbackRequest,
DagCallbackRequest,
SlaCallbackRequest,
TaskCallbackRequest,
)
from airflow.utils.dag_processing import AbstractDagFileProcessorProcess, DagFileProcessorAgent
from airflow.utils.email import get_email_address_list, send_email
from airflow.utils.log.logging_mixin import LoggingMixin, StreamLogWriter, set_context
from airflow.utils.mixins import MultiprocessingStartMethodMixin
from airflow.utils.session import create_session, provide_session
from airflow.utils.sqlalchemy import is_lock_not_available_error, prohibit_commit, skip_locked, with_row_locks
from airflow.utils.state import State
from airflow.utils.types import DagRunType
TI = models.TaskInstance
DR = models.DagRun
DM = models.DagModel
class DagFileProcessorProcess(AbstractDagFileProcessorProcess, LoggingMixin, MultiprocessingStartMethodMixin):
"""Runs DAG processing in a separate process using DagFileProcessor
:param file_path: a Python file containing Airflow DAG definitions
:type file_path: str
:param pickle_dags: whether to serialize the DAG objects to the DB
:type pickle_dags: bool
:param dag_ids: If specified, only look at these DAG ID's
:type dag_ids: List[str]
:param callback_requests: failure callback to execute
:type callback_requests: List[airflow.utils.callback_requests.CallbackRequest]
"""
# Counter that increments every time an instance of this class is created
class_creation_counter = 0
def __init__(
self,
file_path: str,
pickle_dags: bool,
dag_ids: Optional[List[str]],
callback_requests: List[CallbackRequest],
):
super().__init__()
self._file_path = file_path
self._pickle_dags = pickle_dags
self._dag_ids = dag_ids
self._callback_requests = callback_requests
# The process that was launched to process the given .
self._process: Optional[multiprocessing.process.BaseProcess] = None
# The result of DagFileProcessor.process_file(file_path).
self._result: Optional[Tuple[int, int]] = None
# Whether the process is done running.
self._done = False
# When the process started.
self._start_time: Optional[datetime.datetime] = None
# This ID is use to uniquely name the process / thread that's launched
# by this processor instance
self._instance_id = DagFileProcessorProcess.class_creation_counter
self._parent_channel: Optional[MultiprocessingConnection] = None
DagFileProcessorProcess.class_creation_counter += 1
@property
def file_path(self) -> str:
return self._file_path
@staticmethod
def _run_file_processor(
result_channel: MultiprocessingConnection,
parent_channel: MultiprocessingConnection,
file_path: str,
pickle_dags: bool,
dag_ids: Optional[List[str]],
thread_name: str,
callback_requests: List[CallbackRequest],
) -> None:
"""
Process the given file.
:param result_channel: the connection to use for passing back the result
:type result_channel: multiprocessing.Connection
:param parent_channel: the parent end of the channel to close in the child
:type parent_channel: multiprocessing.Connection
:param file_path: the file to process
:type file_path: str
:param pickle_dags: whether to pickle the DAGs found in the file and
save them to the DB
:type pickle_dags: bool
:param dag_ids: if specified, only examine DAG ID's that are
in this list
:type dag_ids: list[str]
:param thread_name: the name to use for the process that is launched
:type thread_name: str
:param callback_requests: failure callback to execute
:type callback_requests: List[airflow.utils.callback_requests.CallbackRequest]
:return: the process that was launched
:rtype: multiprocessing.Process
"""
# This helper runs in the newly created process
log: logging.Logger = logging.getLogger("airflow.processor")
# Since we share all open FDs from the parent, we need to close the parent side of the pipe here in
# the child, else it won't get closed properly until we exit.
log.info("Closing parent pipe")
parent_channel.close()
del parent_channel
set_context(log, file_path)
setproctitle(f"airflow scheduler - DagFileProcessor {file_path}")
try:
# redirect stdout/stderr to log
with redirect_stdout(StreamLogWriter(log, logging.INFO)), redirect_stderr(
StreamLogWriter(log, logging.WARN)
), Stats.timer() as timer:
# Re-configure the ORM engine as there are issues with multiple processes
settings.configure_orm()
# Change the thread name to differentiate log lines. This is
# really a separate process, but changing the name of the
# process doesn't work, so changing the thread name instead.
threading.current_thread().name = thread_name
log.info("Started process (PID=%s) to work on %s", os.getpid(), file_path)
dag_file_processor = DagFileProcessor(dag_ids=dag_ids, log=log)
result: Tuple[int, int] = dag_file_processor.process_file(
file_path=file_path,
pickle_dags=pickle_dags,
callback_requests=callback_requests,
)
result_channel.send(result)
log.info("Processing %s took %.3f seconds", file_path, timer.duration)
except Exception: # pylint: disable=broad-except
# Log exceptions through the logging framework.
log.exception("Got an exception! Propagating...")
raise
finally:
# We re-initialized the ORM within this Process above so we need to
# tear it down manually here
settings.dispose_orm()
result_channel.close()
def start(self) -> None:
"""Launch the process and start processing the DAG."""
start_method = self._get_multiprocessing_start_method()
context = multiprocessing.get_context(start_method)
_parent_channel, _child_channel = context.Pipe(duplex=False)
process = context.Process(
target=type(self)._run_file_processor,
args=(
_child_channel,
_parent_channel,
self.file_path,
self._pickle_dags,
self._dag_ids,
f"DagFileProcessor{self._instance_id}",
self._callback_requests,
),
name=f"DagFileProcessor{self._instance_id}-Process",
)
self._process = process
self._start_time = timezone.utcnow()
process.start()
# Close the child side of the pipe now the subprocess has started -- otherwise this would prevent it
# from closing in some cases
_child_channel.close()
del _child_channel
# Don't store it on self until after we've started the child process - we don't want to keep it from
# getting GCd/closed
self._parent_channel = _parent_channel
def kill(self) -> None:
"""Kill the process launched to process the file, and ensure consistent state."""
if self._process is None:
raise AirflowException("Tried to kill before starting!")
self._kill_process()
def terminate(self, sigkill: bool = False) -> None:
"""
Terminate (and then kill) the process launched to process the file.
:param sigkill: whether to issue a SIGKILL if SIGTERM doesn't work.
:type sigkill: bool
"""
if self._process is None or self._parent_channel is None:
raise AirflowException("Tried to call terminate before starting!")
self._process.terminate()
# Arbitrarily wait 5s for the process to die
with suppress(TimeoutError):
self._process._popen.wait(5) # type: ignore # pylint: disable=protected-access
if sigkill:
self._kill_process()
self._parent_channel.close()
def _kill_process(self) -> None:
if self._process is None:
raise AirflowException("Tried to kill process before starting!")
if self._process.is_alive() and self._process.pid:
self.log.warning("Killing DAGFileProcessorProcess (PID=%d)", self._process.pid)
os.kill(self._process.pid, signal.SIGKILL)
if self._parent_channel:
self._parent_channel.close()
@property
def pid(self) -> int:
"""
:return: the PID of the process launched to process the given file
:rtype: int
"""
if self._process is None or self._process.pid is None:
raise AirflowException("Tried to get PID before starting!")
return self._process.pid
@property
def exit_code(self) -> Optional[int]:
"""
After the process is finished, this can be called to get the return code
:return: the exit code of the process
:rtype: int
"""
if self._process is None:
raise AirflowException("Tried to get exit code before starting!")
if not self._done:
raise AirflowException("Tried to call retcode before process was finished!")
return self._process.exitcode
@property
def done(self) -> bool:
"""
Check if the process launched to process this file is done.
:return: whether the process is finished running
:rtype: bool
"""
if self._process is None or self._parent_channel is None:
raise AirflowException("Tried to see if it's done before starting!")
if self._done:
return True
if self._parent_channel.poll():
try:
self._result = self._parent_channel.recv()
self._done = True
self.log.debug("Waiting for %s", self._process)
self._process.join()
self._parent_channel.close()
return True
except EOFError:
# If we get an EOFError, it means the child end of the pipe has been closed. This only happens
# in the finally block. But due to a possible race condition, the process may have not yet
# terminated (it could be doing cleanup/python shutdown still). So we kill it here after a
# "suitable" timeout.
self._done = True
# Arbitrary timeout -- error/race condition only, so this doesn't need to be tunable.
self._process.join(timeout=5)
if self._process.is_alive():
# Didn't shut down cleanly - kill it
self._kill_process()
if not self._process.is_alive():
self._done = True
self.log.debug("Waiting for %s", self._process)
self._process.join()
self._parent_channel.close()
return True
return False
@property
def result(self) -> Optional[Tuple[int, int]]:
"""
:return: result of running DagFileProcessor.process_file()
:rtype: tuple[int, int] or None
"""
if not self.done:
raise AirflowException("Tried to get the result before it's done!")
return self._result
@property
def start_time(self) -> datetime.datetime:
"""
:return: when this started to process the file
:rtype: datetime
"""
if self._start_time is None:
raise AirflowException("Tried to get start time before it started!")
return self._start_time
@property
def waitable_handle(self):
return self._process.sentinel
class DagFileProcessor(LoggingMixin):
"""
Process a Python file containing Airflow DAGs.
This includes:
1. Execute the file and look for DAG objects in the namespace.
2. Execute any Callbacks if passed to DagFileProcessor.process_file
3. Serialize the DAGs and save it to DB (or update existing record in the DB).
4. Pickle the DAG and save it to the DB (if necessary).
5. Record any errors importing the file into ORM
Returns a tuple of 'number of dags found' and 'the count of import errors'
:param dag_ids: If specified, only look at these DAG ID's
:type dag_ids: List[str]
:param log: Logger to save the processing process
:type log: logging.Logger
"""
UNIT_TEST_MODE: bool = conf.getboolean('core', 'UNIT_TEST_MODE')
def __init__(self, dag_ids: Optional[List[str]], log: logging.Logger):
super().__init__()
self.dag_ids = dag_ids
self._log = log
@provide_session
def manage_slas(self, dag: DAG, session: Session = None) -> None:
"""
Finding all tasks that have SLAs defined, and sending alert emails
where needed. New SLA misses are also recorded in the database.
We are assuming that the scheduler runs often, so we only check for
tasks that should have succeeded in the past hour.
"""
if not any(isinstance(ti.sla, timedelta) for ti in dag.tasks):
self.log.info("Skipping SLA check for %s because no tasks in DAG have SLAs", dag)
return
qry = (
session.query(TI.task_id, func.max(TI.execution_date).label('max_ti'))
.with_hint(TI, 'USE INDEX (PRIMARY)', dialect_name='mysql')
.filter(TI.dag_id == dag.dag_id)
.filter(or_(TI.state == State.SUCCESS, TI.state == State.SKIPPED))
.filter(TI.task_id.in_(dag.task_ids))
.group_by(TI.task_id)
.subquery('sq')
)
max_tis: List[TI] = (
session.query(TI)
.filter(
TI.dag_id == dag.dag_id,
TI.task_id == qry.c.task_id,
TI.execution_date == qry.c.max_ti,
)
.all()
)
ts = timezone.utcnow()
for ti in max_tis:
task = dag.get_task(ti.task_id)
if not isinstance(task.sla, timedelta):
continue
dttm = dag.following_schedule(ti.execution_date)
while dttm < timezone.utcnow():
following_schedule = dag.following_schedule(dttm)
if following_schedule + task.sla < timezone.utcnow():
session.merge(
SlaMiss(task_id=ti.task_id, dag_id=ti.dag_id, execution_date=dttm, timestamp=ts)
)
dttm = dag.following_schedule(dttm)
session.commit()
# pylint: disable=singleton-comparison
slas: List[SlaMiss] = (
session.query(SlaMiss)
.filter(SlaMiss.notification_sent == False, SlaMiss.dag_id == dag.dag_id) # noqa
.all()
)
# pylint: enable=singleton-comparison
if slas: # pylint: disable=too-many-nested-blocks
sla_dates: List[datetime.datetime] = [sla.execution_date for sla in slas]
fetched_tis: List[TI] = (
session.query(TI)
.filter(TI.state != State.SUCCESS, TI.execution_date.in_(sla_dates), TI.dag_id == dag.dag_id)
.all()
)
blocking_tis: List[TI] = []
for ti in fetched_tis:
if ti.task_id in dag.task_ids:
ti.task = dag.get_task(ti.task_id)
blocking_tis.append(ti)
else:
session.delete(ti)
session.commit()
task_list = "\n".join([sla.task_id + ' on ' + sla.execution_date.isoformat() for sla in slas])
blocking_task_list = "\n".join(
[ti.task_id + ' on ' + ti.execution_date.isoformat() for ti in blocking_tis]
)
# Track whether email or any alert notification sent
# We consider email or the alert callback as notifications
email_sent = False
notification_sent = False
if dag.sla_miss_callback:
# Execute the alert callback
self.log.info('Calling SLA miss callback')
try:
dag.sla_miss_callback(dag, task_list, blocking_task_list, slas, blocking_tis)
notification_sent = True
except Exception: # pylint: disable=broad-except
self.log.exception("Could not call sla_miss_callback for DAG %s", dag.dag_id)
email_content = f"""\
Here's a list of tasks that missed their SLAs:
<pre><code>{task_list}\n<code></pre>
Blocking tasks:
<pre><code>{blocking_task_list}<code></pre>
Airflow Webserver URL: {conf.get(section='webserver', key='base_url')}
"""
tasks_missed_sla = []
for sla in slas:
try:
task = dag.get_task(sla.task_id)
except TaskNotFound:
# task already deleted from DAG, skip it
self.log.warning(
"Task %s doesn't exist in DAG anymore, skipping SLA miss notification.", sla.task_id
)
continue
tasks_missed_sla.append(task)
emails: Set[str] = set()
for task in tasks_missed_sla:
if task.email:
if isinstance(task.email, str):
emails |= set(get_email_address_list(task.email))
elif isinstance(task.email, (list, tuple)):
emails |= set(task.email)
if emails:
try:
send_email(emails, f"[airflow] SLA miss on DAG={dag.dag_id}", email_content)
email_sent = True
notification_sent = True
except Exception: # pylint: disable=broad-except
Stats.incr('sla_email_notification_failure')
self.log.exception("Could not send SLA Miss email notification for DAG %s", dag.dag_id)
# If we sent any notification, update the sla_miss table
if notification_sent:
for sla in slas:
sla.email_sent = email_sent
sla.notification_sent = True
session.merge(sla)
session.commit()
@staticmethod
def update_import_errors(session: Session, dagbag: DagBag) -> None:
"""
For the DAGs in the given DagBag, record any associated import errors and clears
errors for files that no longer have them. These are usually displayed through the
Airflow UI so that users know that there are issues parsing DAGs.
:param session: session for ORM operations
:type session: sqlalchemy.orm.session.Session
:param dagbag: DagBag containing DAGs with import errors
:type dagbag: airflow.DagBag
"""
# Clear the errors of the processed files
for dagbag_file in dagbag.file_last_changed:
session.query(errors.ImportError).filter(errors.ImportError.filename == dagbag_file).delete()
# Add the errors of the processed files
for filename, stacktrace in dagbag.import_errors.items():
session.add(
errors.ImportError(filename=filename, timestamp=timezone.utcnow(), stacktrace=stacktrace)
)
session.commit()
@provide_session
def execute_callbacks(
self, dagbag: DagBag, callback_requests: List[CallbackRequest], session: Session = None
) -> None:
"""
Execute on failure callbacks. These objects can come from SchedulerJob or from
DagFileProcessorManager.
:param dagbag: Dag Bag of dags
:param callback_requests: failure callbacks to execute
:type callback_requests: List[airflow.utils.callback_requests.CallbackRequest]
:param session: DB session.
"""
for request in callback_requests:
try:
if isinstance(request, TaskCallbackRequest):
self._execute_task_callbacks(dagbag, request)
elif isinstance(request, SlaCallbackRequest):
self.manage_slas(dagbag.dags.get(request.dag_id))
elif isinstance(request, DagCallbackRequest):
self._execute_dag_callbacks(dagbag, request, session)
except Exception: # pylint: disable=broad-except
self.log.exception(
"Error executing %s callback for file: %s",
request.__class__.__name__,
request.full_filepath,
)
session.commit()
@provide_session
def _execute_dag_callbacks(self, dagbag: DagBag, request: DagCallbackRequest, session: Session):
dag = dagbag.dags[request.dag_id]
dag_run = dag.get_dagrun(execution_date=request.execution_date, session=session)
dag.handle_callback(
dagrun=dag_run, success=not request.is_failure_callback, reason=request.msg, session=session
)
def _execute_task_callbacks(self, dagbag: DagBag, request: TaskCallbackRequest):
simple_ti = request.simple_task_instance
if simple_ti.dag_id in dagbag.dags:
dag = dagbag.dags[simple_ti.dag_id]
if simple_ti.task_id in dag.task_ids:
task = dag.get_task(simple_ti.task_id)
ti = TI(task, simple_ti.execution_date)
# Get properties needed for failure handling from SimpleTaskInstance.
ti.start_date = simple_ti.start_date
ti.end_date = simple_ti.end_date
ti.try_number = simple_ti.try_number
ti.state = simple_ti.state
ti.test_mode = self.UNIT_TEST_MODE
if request.is_failure_callback:
ti.handle_failure_with_callback(error=request.msg, test_mode=ti.test_mode)
self.log.info('Executed failure callback for %s in state %s', ti, ti.state)
@provide_session
def process_file(
self,
file_path: str,
callback_requests: List[CallbackRequest],
pickle_dags: bool = False,
session: Session = None,
) -> Tuple[int, int]:
"""
Process a Python file containing Airflow DAGs.
This includes:
1. Execute the file and look for DAG objects in the namespace.
2. Execute any Callbacks if passed to this method.
3. Serialize the DAGs and save it to DB (or update existing record in the DB).
4. Pickle the DAG and save it to the DB (if necessary).
5. Record any errors importing the file into ORM
:param file_path: the path to the Python file that should be executed
:type file_path: str
:param callback_requests: failure callback to execute
:type callback_requests: List[airflow.utils.dag_processing.CallbackRequest]
:param pickle_dags: whether serialize the DAGs found in the file and
save them to the db
:type pickle_dags: bool
:param session: Sqlalchemy ORM Session
:type session: Session
:return: number of dags found, count of import errors
:rtype: Tuple[int, int]
"""
self.log.info("Processing file %s for tasks to queue", file_path)
try:
dagbag = DagBag(file_path, include_examples=False, include_smart_sensor=False)
except Exception: # pylint: disable=broad-except
self.log.exception("Failed at reloading the DAG file %s", file_path)
Stats.incr('dag_file_refresh_error', 1, 1)
return 0, 0
if len(dagbag.dags) > 0:
self.log.info("DAG(s) %s retrieved from %s", dagbag.dags.keys(), file_path)
else:
self.log.warning("No viable dags retrieved from %s", file_path)
self.update_import_errors(session, dagbag)
return 0, len(dagbag.import_errors)
self.execute_callbacks(dagbag, callback_requests)
# Save individual DAGs in the ORM
dagbag.sync_to_db()
if pickle_dags:
paused_dag_ids = DagModel.get_paused_dag_ids(dag_ids=dagbag.dag_ids)
unpaused_dags: List[DAG] = [
dag for dag_id, dag in dagbag.dags.items() if dag_id not in paused_dag_ids
]
for dag in unpaused_dags:
dag.pickle(session)
# Record import errors into the ORM
try:
self.update_import_errors(session, dagbag)
except Exception: # pylint: disable=broad-except
self.log.exception("Error logging import errors!")
return len(dagbag.dags), len(dagbag.import_errors)
class SchedulerJob(BaseJob): # pylint: disable=too-many-instance-attributes
"""
This SchedulerJob runs for a specific time interval and schedules the jobs
that are ready to run. It figures out the latest runs for each
task and sees if the dependencies for the next schedules are met.
If so, it creates appropriate TaskInstances and sends run commands to the
executor. It does this for each task in each DAG and repeats.
:param dag_id: if specified, only schedule tasks with this DAG ID
:type dag_id: str
:param dag_ids: if specified, only schedule tasks with these DAG IDs
:type dag_ids: list[str]
:param subdir: directory containing Python files with Airflow DAG
definitions, or a specific path to a file
:type subdir: str
:param num_runs: The number of times to run the scheduling loop. If you
have a large number of DAG files this could complete before each file
has been parsed. -1 for unlimited times.
:type num_runs: int
:param num_times_parse_dags: The number of times to try to parse each DAG file.
-1 for unlimited times.
:type num_times_parse_dags: int
:param processor_poll_interval: The number of seconds to wait between
polls of running processors
:type processor_poll_interval: int
:param do_pickle: once a DAG object is obtained by executing the Python
file, whether to serialize the DAG object to the DB
:type do_pickle: bool
"""
__mapper_args__ = {'polymorphic_identity': 'SchedulerJob'}
heartrate: int = conf.getint('scheduler', 'SCHEDULER_HEARTBEAT_SEC')
def __init__(
self,
subdir: str = settings.DAGS_FOLDER,
num_runs: int = conf.getint('scheduler', 'num_runs'),
num_times_parse_dags: int = -1,
processor_poll_interval: float = conf.getfloat('scheduler', 'processor_poll_interval'),
do_pickle: bool = False,
log: Any = None,
*args,
**kwargs,
):
self.subdir = subdir
self.num_runs = num_runs
# In specific tests, we want to stop the parse loop after the _files_ have been parsed a certain
# number of times. This is only to support testing, and isn't something a user is likely to want to
# configure -- they'll want num_runs
self.num_times_parse_dags = num_times_parse_dags
self._processor_poll_interval = processor_poll_interval
self.do_pickle = do_pickle
super().__init__(*args, **kwargs)
if log:
self._log = log
# Check what SQL backend we use
sql_conn: str = conf.get('core', 'sql_alchemy_conn').lower()
self.using_sqlite = sql_conn.startswith('sqlite')
self.using_mysql = sql_conn.startswith('mysql')
self.max_tis_per_query: int = conf.getint('scheduler', 'max_tis_per_query')
self.processor_agent: Optional[DagFileProcessorAgent] = None
self.dagbag = DagBag(dag_folder=self.subdir, read_dags_from_db=True)
def register_signals(self) -> None:
"""Register signals that stop child processes"""
signal.signal(signal.SIGINT, self._exit_gracefully)
signal.signal(signal.SIGTERM, self._exit_gracefully)
signal.signal(signal.SIGUSR2, self._debug_dump)
def _exit_gracefully(self, signum, frame) -> None: # pylint: disable=unused-argument
"""Helper method to clean up processor_agent to avoid leaving orphan processes."""
self.log.info("Exiting gracefully upon receiving signal %s", signum)
if self.processor_agent:
self.processor_agent.end()
sys.exit(os.EX_OK)
def _debug_dump(self, signum, frame): # pylint: disable=unused-argument
try:
sig_name = signal.Signals(signum).name # pylint: disable=no-member
except Exception: # pylint: disable=broad-except
sig_name = str(signum)
self.log.info("%s\n%s received, printing debug\n%s", "-" * 80, sig_name, "-" * 80)
self.executor.debug_dump()
self.log.info("-" * 80)
def is_alive(self, grace_multiplier: Optional[float] = None) -> bool:
"""
Is this SchedulerJob alive?
We define alive as in a state of running and a heartbeat within the
threshold defined in the ``scheduler_health_check_threshold`` config
setting.
``grace_multiplier`` is accepted for compatibility with the parent class.
:rtype: boolean
"""
if grace_multiplier is not None:
# Accept the same behaviour as superclass
return super().is_alive(grace_multiplier=grace_multiplier)
scheduler_health_check_threshold: int = conf.getint('scheduler', 'scheduler_health_check_threshold')
return (
self.state == State.RUNNING
and (timezone.utcnow() - self.latest_heartbeat).total_seconds() < scheduler_health_check_threshold
)
@provide_session
def _change_state_for_tis_without_dagrun(
self, old_states: List[str], new_state: str, session: Session = None
) -> None:
"""
For all DAG IDs in the DagBag, look for task instances in the
old_states and set them to new_state if the corresponding DagRun
does not exist or exists but is not in the running state. This
normally should not happen, but it can if the state of DagRuns are
changed manually.
:param old_states: examine TaskInstances in this state
:type old_states: list[airflow.utils.state.State]
:param new_state: set TaskInstances to this state
:type new_state: airflow.utils.state.State
"""
tis_changed = 0
query = (
session.query(models.TaskInstance)
.outerjoin(models.TaskInstance.dag_run)
.filter(models.TaskInstance.dag_id.in_(list(self.dagbag.dag_ids)))
.filter(models.TaskInstance.state.in_(old_states))
.filter(
or_(
# pylint: disable=comparison-with-callable
models.DagRun.state != State.RUNNING,
# pylint: disable=no-member
models.DagRun.state.is_(None),
)
)
)
# We need to do this for mysql as well because it can cause deadlocks
# as discussed in https://issues.apache.org/jira/browse/AIRFLOW-2516
if self.using_sqlite or self.using_mysql:
tis_to_change: List[TI] = with_row_locks(query, of=TI, **skip_locked(session=session)).all()
for ti in tis_to_change:
ti.set_state(new_state, session=session)
tis_changed += 1
else:
subq = query.subquery()
current_time = timezone.utcnow()
ti_prop_update = {
models.TaskInstance.state: new_state,
models.TaskInstance.start_date: current_time,
}
# Only add end_date and duration if the new_state is 'success', 'failed' or 'skipped'
if new_state in State.finished:
ti_prop_update.update(
{
models.TaskInstance.end_date: current_time,
models.TaskInstance.duration: 0,
}
)
tis_changed = (
session.query(models.TaskInstance)
.filter(
models.TaskInstance.dag_id == subq.c.dag_id,
models.TaskInstance.task_id == subq.c.task_id,
models.TaskInstance.execution_date == subq.c.execution_date,
)
.update(ti_prop_update, synchronize_session=False)
)
if tis_changed > 0:
session.flush()
self.log.warning(
"Set %s task instances to state=%s as their associated DagRun was not in RUNNING state",
tis_changed,
new_state,
)
Stats.gauge('scheduler.tasks.without_dagrun', tis_changed)
@provide_session
def __get_concurrency_maps(
self, states: List[str], session: Session = None
) -> Tuple[DefaultDict[str, int], DefaultDict[Tuple[str, str], int]]:
"""
Get the concurrency maps.
:param states: List of states to query for
:type states: list[airflow.utils.state.State]
:return: A map from (dag_id, task_id) to # of task instances and
a map from (dag_id, task_id) to # of task instances in the given state list
:rtype: tuple[dict[str, int], dict[tuple[str, str], int]]
"""
ti_concurrency_query: List[Tuple[str, str, int]] = (
session.query(TI.task_id, TI.dag_id, func.count('*'))
.filter(TI.state.in_(states))
.group_by(TI.task_id, TI.dag_id)
).all()
dag_map: DefaultDict[str, int] = defaultdict(int)
task_map: DefaultDict[Tuple[str, str], int] = defaultdict(int)
for result in ti_concurrency_query:
task_id, dag_id, count = result
dag_map[dag_id] += count
task_map[(dag_id, task_id)] = count
return dag_map, task_map
# pylint: disable=too-many-locals,too-many-statements
@provide_session
def _executable_task_instances_to_queued(self, max_tis: int, session: Session = None) -> List[TI]:
"""
Finds TIs that are ready for execution with respect to pool limits,
dag concurrency, executor state, and priority.
:param max_tis: Maximum number of TIs to queue in this loop.
:type max_tis: int
:return: list[airflow.models.TaskInstance]
"""
executable_tis: List[TI] = []
# Get the pool settings. We get a lock on the pool rows, treating this as a "critical section"
# Throws an exception if lock cannot be obtained, rather than blocking
pools = models.Pool.slots_stats(lock_rows=True, session=session)
# If the pools are full, there is no point doing anything!
# If _somehow_ the pool is overfull, don't let the limit go negative - it breaks SQL
pool_slots_free = max(0, sum(pool['open'] for pool in pools.values()))
if pool_slots_free == 0:
self.log.debug("All pools are full!")
return executable_tis
max_tis = min(max_tis, pool_slots_free)
# Get all task instances associated with scheduled
# DagRuns which are not backfilled, in the given states,
# and the dag is not paused
query = (
session.query(TI)
.outerjoin(TI.dag_run)
.filter(or_(DR.run_id.is_(None), DR.run_type != DagRunType.BACKFILL_JOB))
.join(TI.dag_model)
.filter(not_(DM.is_paused))
.filter(TI.state == State.SCHEDULED)
.options(selectinload('dag_model'))
.limit(max_tis)
)
task_instances_to_examine: List[TI] = with_row_locks(
query,
of=TI,
**skip_locked(session=session),
).all()
# TODO[HA]: This was wrong before anyway, as it only looked at a sub-set of dags, not everything.
# Stats.gauge('scheduler.tasks.pending', len(task_instances_to_examine))
if len(task_instances_to_examine) == 0:
self.log.debug("No tasks to consider for execution.")
return executable_tis
# Put one task instance on each line
task_instance_str = "\n\t".join([repr(x) for x in task_instances_to_examine])
self.log.info("%s tasks up for execution:\n\t%s", len(task_instances_to_examine), task_instance_str)
pool_to_task_instances: DefaultDict[str, List[models.Pool]] = defaultdict(list)
for task_instance in task_instances_to_examine:
pool_to_task_instances[task_instance.pool].append(task_instance)
# dag_id to # of running tasks and (dag_id, task_id) to # of running tasks.
dag_concurrency_map: DefaultDict[str, int]
task_concurrency_map: DefaultDict[Tuple[str, str], int]
dag_concurrency_map, task_concurrency_map = self.__get_concurrency_maps(
states=list(EXECUTION_STATES), session=session
)
num_tasks_in_executor = 0
# Number of tasks that cannot be scheduled because of no open slot in pool
num_starving_tasks_total = 0
# Go through each pool, and queue up a task for execution if there are
# any open slots in the pool.
# pylint: disable=too-many-nested-blocks
for pool, task_instances in pool_to_task_instances.items():
pool_name = pool
if pool not in pools:
self.log.warning("Tasks using non-existent pool '%s' will not be scheduled", pool)
continue
open_slots = pools[pool]["open"]
num_ready = len(task_instances)
self.log.info(
"Figuring out tasks to run in Pool(name=%s) with %s open slots "
"and %s task instances ready to be queued",
pool,
open_slots,
num_ready,
)
priority_sorted_task_instances = sorted(
task_instances, key=lambda ti: (-ti.priority_weight, ti.execution_date)
)
num_starving_tasks = 0
for current_index, task_instance in enumerate(priority_sorted_task_instances):
if open_slots <= 0:
self.log.info("Not scheduling since there are %s open slots in pool %s", open_slots, pool)
# Can't schedule any more since there are no more open slots.
num_unhandled = len(priority_sorted_task_instances) - current_index
num_starving_tasks += num_unhandled
num_starving_tasks_total += num_unhandled
break
# Check to make sure that the task concurrency of the DAG hasn't been
# reached.
dag_id = task_instance.dag_id
current_dag_concurrency = dag_concurrency_map[dag_id]
dag_concurrency_limit = task_instance.dag_model.concurrency
self.log.info(
"DAG %s has %s/%s running and queued tasks",
dag_id,
current_dag_concurrency,
dag_concurrency_limit,
)
if current_dag_concurrency >= dag_concurrency_limit:
self.log.info(
"Not executing %s since the number of tasks running or queued "
"from DAG %s is >= to the DAG's task concurrency limit of %s",
task_instance,
dag_id,
dag_concurrency_limit,
)
continue
task_concurrency_limit: Optional[int] = None
if task_instance.dag_model.has_task_concurrency_limits:
# Many dags don't have a task_concurrency, so where we can avoid loading the full
# serialized DAG the better.
serialized_dag = self.dagbag.get_dag(dag_id, session=session)
if serialized_dag.has_task(task_instance.task_id):
task_concurrency_limit = serialized_dag.get_task(
task_instance.task_id
).task_concurrency
if task_concurrency_limit is not None:
current_task_concurrency = task_concurrency_map[
(task_instance.dag_id, task_instance.task_id)
]
if current_task_concurrency >= task_concurrency_limit:
self.log.info(
"Not executing %s since the task concurrency for"
" this task has been reached.",
task_instance,
)
continue
if task_instance.pool_slots > open_slots:
self.log.info(
"Not executing %s since it requires %s slots "
"but there are %s open slots in the pool %s.",
task_instance,
task_instance.pool_slots,
open_slots,
pool,
)
num_starving_tasks += 1
num_starving_tasks_total += 1
# Though we can execute tasks with lower priority if there's enough room
continue
executable_tis.append(task_instance)
open_slots -= task_instance.pool_slots
dag_concurrency_map[dag_id] += 1
task_concurrency_map[(task_instance.dag_id, task_instance.task_id)] += 1
Stats.gauge(f'pool.starving_tasks.{pool_name}', num_starving_tasks)
Stats.gauge('scheduler.tasks.starving', num_starving_tasks_total)
Stats.gauge('scheduler.tasks.running', num_tasks_in_executor)
Stats.gauge('scheduler.tasks.executable', len(executable_tis))
task_instance_str = "\n\t".join([repr(x) for x in executable_tis])
self.log.info("Setting the following tasks to queued state:\n\t%s", task_instance_str)
# set TIs to queued state
filter_for_tis = TI.filter_for_tis(executable_tis)
session.query(TI).filter(filter_for_tis).update(
# TODO[ha]: should we use func.now()? How does that work with DB timezone on mysql when it's not
# UTC?
{TI.state: State.QUEUED, TI.queued_dttm: timezone.utcnow(), TI.queued_by_job_id: self.id},
synchronize_session=False,
)
for ti in executable_tis:
make_transient(ti)
return executable_tis
def _enqueue_task_instances_with_queued_state(self, task_instances: List[TI]) -> None:
"""
Takes task_instances, which should have been set to queued, and enqueues them
with the executor.
:param task_instances: TaskInstances to enqueue
:type task_instances: list[TaskInstance]
"""
# actually enqueue them
for ti in task_instances:
command = TI.generate_command(
ti.dag_id,
ti.task_id,
ti.execution_date,
local=True,
mark_success=False,
ignore_all_deps=False,
ignore_depends_on_past=False,
ignore_task_deps=False,
ignore_ti_state=False,
pool=ti.pool,
file_path=ti.dag_model.fileloc,
pickle_id=ti.dag_model.pickle_id,
)
priority = ti.priority_weight
queue = ti.queue
self.log.info("Sending %s to executor with priority %s and queue %s", ti.key, priority, queue)
self.executor.queue_command(
ti,
command,
priority=priority,
queue=queue,
)
def _critical_section_execute_task_instances(self, session: Session) -> int:
"""
Attempts to execute TaskInstances that should be executed by the scheduler.
There are three steps:
1. Pick TIs by priority with the constraint that they are in the expected states
and that we do exceed max_active_runs or pool limits.
2. Change the state for the TIs above atomically.
3. Enqueue the TIs in the executor.
HA note: This function is a "critical section" meaning that only a single executor process can execute
this function at the same time. This is achieved by doing ``SELECT ... from pool FOR UPDATE``. For DBs
that support NOWAIT, a "blocked" scheduler will skip this and continue on with other tasks (creating
new DAG runs, progressing TIs from None to SCHEDULED etc.); DBs that don't support this (such as
MariaDB or MySQL 5.x) the other schedulers will wait for the lock before continuing.
:param session:
:type session: sqlalchemy.orm.Session
:return: Number of task instance with state changed.
"""
if self.max_tis_per_query == 0:
max_tis = self.executor.slots_available
else:
max_tis = min(self.max_tis_per_query, self.executor.slots_available)
queued_tis = self._executable_task_instances_to_queued(max_tis, session=session)
self._enqueue_task_instances_with_queued_state(queued_tis)
return len(queued_tis)
@provide_session
def _change_state_for_tasks_failed_to_execute(self, session: Session = None):
"""
If there are tasks left over in the executor,
we set them back to SCHEDULED to avoid creating hanging tasks.
:param session: session for ORM operations
"""
if not self.executor.queued_tasks:
return
filter_for_ti_state_change = [
and_(
TI.dag_id == dag_id,
TI.task_id == task_id,
TI.execution_date == execution_date,
# The TI.try_number will return raw try_number+1 since the
# ti is not running. And we need to -1 to match the DB record.
TI._try_number == try_number - 1, # pylint: disable=protected-access
TI.state == State.QUEUED,
)
for dag_id, task_id, execution_date, try_number in self.executor.queued_tasks.keys()
]
ti_query = session.query(TI).filter(or_(*filter_for_ti_state_change))
tis_to_set_to_scheduled: List[TI] = with_row_locks(ti_query).all()
if not tis_to_set_to_scheduled:
return
# set TIs to queued state
filter_for_tis = TI.filter_for_tis(tis_to_set_to_scheduled)
session.query(TI).filter(filter_for_tis).update(
{TI.state: State.SCHEDULED, TI.queued_dttm: None}, synchronize_session=False
)
for task_instance in tis_to_set_to_scheduled:
self.executor.queued_tasks.pop(task_instance.key)
task_instance_str = "\n\t".join(repr(x) for x in tis_to_set_to_scheduled)
self.log.info("Set the following tasks to scheduled state:\n\t%s", task_instance_str)
@provide_session
def _process_executor_events(self, session: Session = None) -> int:
"""Respond to executor events."""
if not self.processor_agent:
raise ValueError("Processor agent is not started.")
ti_primary_key_to_try_number_map: Dict[Tuple[str, str, datetime.datetime], int] = {}
event_buffer = self.executor.get_event_buffer()
tis_with_right_state: List[TaskInstanceKey] = []
# Report execution
for ti_key, value in event_buffer.items():
state: str
state, _ = value
# We create map (dag_id, task_id, execution_date) -> in-memory try_number
ti_primary_key_to_try_number_map[ti_key.primary] = ti_key.try_number
self.log.info(
"Executor reports execution of %s.%s execution_date=%s "
"exited with status %s for try_number %s",
ti_key.dag_id,
ti_key.task_id,
ti_key.execution_date,
state,
ti_key.try_number,
)
if state in (State.FAILED, State.SUCCESS, State.QUEUED):
tis_with_right_state.append(ti_key)
# Return if no finished tasks
if not tis_with_right_state:
return len(event_buffer)
# Check state of finished tasks
filter_for_tis = TI.filter_for_tis(tis_with_right_state)
tis: List[TI] = session.query(TI).filter(filter_for_tis).options(selectinload('dag_model')).all()
for ti in tis:
try_number = ti_primary_key_to_try_number_map[ti.key.primary]
buffer_key = ti.key.with_try_number(try_number)
state, info = event_buffer.pop(buffer_key)
# TODO: should we fail RUNNING as well, as we do in Backfills?
if state == State.QUEUED:
ti.external_executor_id = info
self.log.info("Setting external_id for %s to %s", ti, info)
continue
if ti.try_number == buffer_key.try_number and ti.state == State.QUEUED:
Stats.incr('scheduler.tasks.killed_externally')
msg = (
"Executor reports task instance %s finished (%s) although the "
"task says its %s. (Info: %s) Was the task killed externally?"
)
self.log.error(msg, ti, state, ti.state, info)
request = TaskCallbackRequest(
full_filepath=ti.dag_model.fileloc,
simple_task_instance=SimpleTaskInstance(ti),
msg=msg % (ti, state, ti.state, info),
)
self.processor_agent.send_callback_to_execute(request)
return len(event_buffer)
def _execute(self) -> None:
self.log.info("Starting the scheduler")
# DAGs can be pickled for easier remote execution by some executors
pickle_dags = self.do_pickle and self.executor_class not in UNPICKLEABLE_EXECUTORS
self.log.info("Processing each file at most %s times", self.num_times_parse_dags)
# When using sqlite, we do not use async_mode
# so the scheduler job and DAG parser don't access the DB at the same time.
async_mode = not self.using_sqlite
processor_timeout_seconds: int = conf.getint('core', 'dag_file_processor_timeout')
processor_timeout = timedelta(seconds=processor_timeout_seconds)
self.processor_agent = DagFileProcessorAgent(
dag_directory=self.subdir,
max_runs=self.num_times_parse_dags,
processor_factory=type(self)._create_dag_file_processor,
processor_timeout=processor_timeout,
dag_ids=[],
pickle_dags=pickle_dags,
async_mode=async_mode,
)
try:
self.executor.job_id = self.id
self.executor.start()
self.register_signals()
self.processor_agent.start()
execute_start_time = timezone.utcnow()
self._run_scheduler_loop()
# Stop any processors
self.processor_agent.terminate()
# Verify that all files were processed, and if so, deactivate DAGs that
# haven't been touched by the scheduler as they likely have been
# deleted.
if self.processor_agent.all_files_processed:
self.log.info(
"Deactivating DAGs that haven't been touched since %s", execute_start_time.isoformat()
)
models.DAG.deactivate_stale_dags(execute_start_time)
self.executor.end()
settings.Session.remove() # type: ignore
except Exception: # pylint: disable=broad-except
self.log.exception("Exception when executing SchedulerJob._run_scheduler_loop")
finally:
self.processor_agent.end()
self.log.info("Exited execute loop")
@staticmethod
def _create_dag_file_processor(
file_path: str,
callback_requests: List[CallbackRequest],
dag_ids: Optional[List[str]],
pickle_dags: bool,
) -> DagFileProcessorProcess:
"""Creates DagFileProcessorProcess instance."""
return DagFileProcessorProcess(
file_path=file_path, pickle_dags=pickle_dags, dag_ids=dag_ids, callback_requests=callback_requests
)
def _run_scheduler_loop(self) -> None:
"""
The actual scheduler loop. The main steps in the loop are:
#. Harvest DAG parsing results through DagFileProcessorAgent
#. Find and queue executable tasks
#. Change task instance state in DB
#. Queue tasks in executor
#. Heartbeat executor
#. Execute queued tasks in executor asynchronously
#. Sync on the states of running tasks
Following is a graphic representation of these steps.
.. image:: ../docs/apache-airflow/img/scheduler_loop.jpg
:rtype: None
"""
if not self.processor_agent:
raise ValueError("Processor agent is not started.")
is_unit_test: bool = conf.getboolean('core', 'unit_test_mode')
timers = sched.scheduler()
def call_regular_interval(
delay: float,
action: Callable,
arguments=(),
kwargs={},
): # pylint: disable=dangerous-default-value
def repeat(*args, **kwargs):
action(*args, **kwargs)
# This is not perfect. If we want a timer every 60s, but action
# takes 10s to run, this will run it every 70s.
# Good enough for now
timers.enter(delay, 1, repeat, args, kwargs)
timers.enter(delay, 1, repeat, arguments, kwargs)
# Check on start up, then every configured interval
self.adopt_or_reset_orphaned_tasks()
call_regular_interval(
conf.getfloat('scheduler', 'orphaned_tasks_check_interval', fallback=300.0),
self.adopt_or_reset_orphaned_tasks,
)
call_regular_interval(
conf.getfloat('scheduler', 'pool_metrics_interval', fallback=5.0),
self._emit_pool_metrics,
)
call_regular_interval(
conf.getfloat('scheduler', 'clean_tis_without_dagrun_interval', fallback=15.0),
self._clean_tis_without_dagrun,
)
for loop_count in itertools.count(start=1):
with Stats.timer() as timer:
if self.using_sqlite:
self.processor_agent.run_single_parsing_loop()
# For the sqlite case w/ 1 thread, wait until the processor
# is finished to avoid concurrent access to the DB.
self.log.debug("Waiting for processors to finish since we're using sqlite")
self.processor_agent.wait_until_finished()
with create_session() as session:
num_queued_tis = self._do_scheduling(session)
self.executor.heartbeat()
session.expunge_all()
num_finished_events = self._process_executor_events(session=session)
self.processor_agent.heartbeat()
# Heartbeat the scheduler periodically
self.heartbeat(only_if_necessary=True)
# Run any pending timed events
next_event = timers.run(blocking=False)
self.log.debug("Next timed event is in %f", next_event)
self.log.debug("Ran scheduling loop in %.2f seconds", timer.duration)
if not is_unit_test and not num_queued_tis and not num_finished_events:
# If the scheduler is doing things, don't sleep. This means when there is work to do, the
# scheduler will run "as quick as possible", but when it's stopped, it can sleep, dropping CPU
# usage when "idle"
time.sleep(min(self._processor_poll_interval, next_event))
if loop_count >= self.num_runs > 0:
self.log.info(
"Exiting scheduler loop as requested number of runs (%d - got to %d) has been reached",
self.num_runs,
loop_count,
)
break
if self.processor_agent.done:
self.log.info(
"Exiting scheduler loop as requested DAG parse count (%d) has been reached after %d"
" scheduler loops",
self.num_times_parse_dags,
loop_count,
)
break
@provide_session
def _clean_tis_without_dagrun(self, session):
with prohibit_commit(session) as guard:
try:
self._change_state_for_tis_without_dagrun(
old_states=[State.UP_FOR_RETRY], new_state=State.FAILED, session=session
)
self._change_state_for_tis_without_dagrun(
old_states=[State.QUEUED, State.SCHEDULED, State.UP_FOR_RESCHEDULE, State.SENSING],
new_state=State.NONE,
session=session,
)
guard.commit()
except OperationalError as e:
if is_lock_not_available_error(error=e):
self.log.debug("Lock held by another Scheduler")
session.rollback()
else:
raise
guard.commit()
def _do_scheduling(self, session) -> int:
"""
This function is where the main scheduling decisions take places. It:
- Creates any necessary DAG runs by examining the next_dagrun_create_after column of DagModel
Since creating Dag Runs is a relatively time consuming process, we select only 10 dags by default
(configurable via ``scheduler.max_dagruns_to_create_per_loop`` setting) - putting this higher will
mean one scheduler could spend a chunk of time creating dag runs, and not ever get around to
scheduling tasks.
- Finds the "next n oldest" running DAG Runs to examine for scheduling (n=20 by default, configurable
via ``scheduler.max_dagruns_per_loop_to_schedule`` config setting) and tries to progress state (TIs
to SCHEDULED, or DagRuns to SUCCESS/FAILURE etc)
By "next oldest", we mean hasn't been examined/scheduled in the most time.
The reason we don't select all dagruns at once because the rows are selected with row locks, meaning
that only one scheduler can "process them", even it it is waiting behind other dags. Increasing this
limit will allow more throughput for smaller DAGs but will likely slow down throughput for larger
(>500 tasks.) DAGs
- Then, via a Critical Section (locking the rows of the Pool model) we queue tasks, and then send them
to the executor.
See docs of _critical_section_execute_task_instances for more.
:return: Number of TIs enqueued in this iteration
:rtype: int
"""
# Put a check in place to make sure we don't commit unexpectedly
with prohibit_commit(session) as guard:
if settings.USE_JOB_SCHEDULE:
query = DagModel.dags_needing_dagruns(session)
self._create_dag_runs(query.all(), session)
# commit the session - Release the write lock on DagModel table.
guard.commit()
# END: create dagruns
dag_runs = DagRun.next_dagruns_to_examine(session)
# Bulk fetch the currently active dag runs for the dags we are
# examining, rather than making one query per DagRun
# TODO: This query is probably horribly inefficient (though there is an
# index on (dag_id,state)). It is to deal with the case when a user
# clears more than max_active_runs older tasks -- we don't want the
# scheduler to suddenly go and start running tasks from all of the
# runs. (AIRFLOW-137/GH #1442)
#
# The longer term fix would be to have `clear` do this, and put DagRuns
# in to the queued state, then take DRs out of queued before creating
# any new ones
# Build up a set of execution_dates that are "active" for a given
# dag_id -- only tasks from those runs will be scheduled.
active_runs_by_dag_id = defaultdict(set)
query = (
session.query(
TI.dag_id,
TI.execution_date,
)
.filter(
TI.dag_id.in_(list({dag_run.dag_id for dag_run in dag_runs})),
TI.state.notin_(list(State.finished) + [State.REMOVED]),
)
.group_by(TI.dag_id, TI.execution_date)
)
for dag_id, execution_date in query:
active_runs_by_dag_id[dag_id].add(execution_date)
for dag_run in dag_runs:
self._schedule_dag_run(dag_run, active_runs_by_dag_id.get(dag_run.dag_id, set()), session)
guard.commit()
# Without this, the session has an invalid view of the DB
session.expunge_all()
# END: schedule TIs
try:
if self.executor.slots_available <= 0:
# We know we can't do anything here, so don't even try!
self.log.debug("Executor full, skipping critical section")
return 0
timer = Stats.timer('scheduler.critical_section_duration')
timer.start()
# Find anything TIs in state SCHEDULED, try to QUEUE it (send it to the executor)
num_queued_tis = self._critical_section_execute_task_instances(session=session)
# Make sure we only sent this metric if we obtained the lock, otherwise we'll skew the
# metric, way down
timer.stop(send=True)
except OperationalError as e:
timer.stop(send=False)
if is_lock_not_available_error(error=e):
self.log.debug("Critical section lock held by another Scheduler")
Stats.incr('scheduler.critical_section_busy')
session.rollback()
return 0
raise
guard.commit()
return num_queued_tis
def _create_dag_runs(self, dag_models: Iterable[DagModel], session: Session) -> None:
"""
Unconditionally create a DAG run for the given DAG, and update the dag_model's fields to control
if/when the next DAGRun should be created
"""
for dag_model in dag_models:
dag = self.dagbag.get_dag(dag_model.dag_id, session=session)
dag_hash = self.dagbag.dags_hash.get(dag.dag_id)
dag.create_dagrun(
run_type=DagRunType.SCHEDULED,
execution_date=dag_model.next_dagrun,
start_date=timezone.utcnow(),
state=State.RUNNING,
external_trigger=False,
session=session,
dag_hash=dag_hash,
creating_job_id=self.id,
)
self._update_dag_next_dagruns(dag_models, session)
# TODO[HA]: Should we do a session.flush() so we don't have to keep lots of state/object in
# memory for larger dags? or expunge_all()
def _update_dag_next_dagruns(self, dag_models: Iterable[DagModel], session: Session) -> None:
"""
Bulk update the next_dagrun and next_dagrun_create_after for all the dags.
We batch the select queries to get info about all the dags at once
"""
# Check max_active_runs, to see if we are _now_ at the limit for any of
# these dag? (we've just created a DagRun for them after all)
active_runs_of_dags = dict(
session.query(DagRun.dag_id, func.count('*'))
.filter(
DagRun.dag_id.in_([o.dag_id for o in dag_models]),
DagRun.state == State.RUNNING, # pylint: disable=comparison-with-callable
DagRun.external_trigger.is_(False),
)
.group_by(DagRun.dag_id)
.all()
)
for dag_model in dag_models:
dag = self.dagbag.get_dag(dag_model.dag_id, session=session)
active_runs_of_dag = active_runs_of_dags.get(dag.dag_id, 0)
if dag.max_active_runs and active_runs_of_dag >= dag.max_active_runs:
self.log.info(
"DAG %s is at (or above) max_active_runs (%d of %d), not creating any more runs",
dag.dag_id,
active_runs_of_dag,
dag.max_active_runs,
)
dag_model.next_dagrun_create_after = None
else:
dag_model.next_dagrun, dag_model.next_dagrun_create_after = dag.next_dagrun_info(
dag_model.next_dagrun
)
def _schedule_dag_run(
self,
dag_run: DagRun,
currently_active_runs: Set[datetime.datetime],
session: Session,
) -> int:
"""
Make scheduling decisions about an individual dag run
``currently_active_runs`` is passed in so that a batch query can be
used to ask this for all dag runs in the batch, to avoid an n+1 query.
:param dag_run: The DagRun to schedule
:param currently_active_runs: Number of currently active runs of this DAG
:return: Number of tasks scheduled
"""
dag = dag_run.dag = self.dagbag.get_dag(dag_run.dag_id, session=session)
if not dag:
self.log.error("Couldn't find dag %s in DagBag/DB!", dag_run.dag_id)
return 0
if (
dag_run.start_date
and dag.dagrun_timeout
and dag_run.start_date < timezone.utcnow() - dag.dagrun_timeout
):
dag_run.state = State.FAILED
dag_run.end_date = timezone.utcnow()
self.log.info("Run %s of %s has timed-out", dag_run.run_id, dag_run.dag_id)
session.flush()
# Work out if we should allow creating a new DagRun now?
self._update_dag_next_dagruns([session.query(DagModel).get(dag_run.dag_id)], session)
callback_to_execute = DagCallbackRequest(
full_filepath=dag.fileloc,
dag_id=dag.dag_id,
execution_date=dag_run.execution_date,
is_failure_callback=True,
msg='timed_out',
)
# Send SLA & DAG Success/Failure Callbacks to be executed
self._send_dag_callbacks_to_processor(dag_run, callback_to_execute)
return 0
if dag_run.execution_date > timezone.utcnow() and not dag.allow_future_exec_dates:
self.log.error("Execution date is in future: %s", dag_run.execution_date)
return 0
if dag.max_active_runs:
if (
len(currently_active_runs) >= dag.max_active_runs
and dag_run.execution_date not in currently_active_runs
):
self.log.info(
"DAG %s already has %d active runs, not queuing any tasks for run %s",
dag.dag_id,
len(currently_active_runs),
dag_run.execution_date,
)
return 0
self._verify_integrity_if_dag_changed(dag_run=dag_run, session=session)
# TODO[HA]: Rename update_state -> schedule_dag_run, ?? something else?
schedulable_tis, callback_to_run = dag_run.update_state(session=session, execute_callbacks=False)
self._send_dag_callbacks_to_processor(dag_run, callback_to_run)
# This will do one query per dag run. We "could" build up a complex
# query to update all the TIs across all the execution dates and dag
# IDs in a single query, but it turns out that can be _very very slow_
# see #11147/commit ee90807ac for more details
return dag_run.schedule_tis(schedulable_tis, session)
@provide_session
def _verify_integrity_if_dag_changed(self, dag_run: DagRun, session=None):
"""Only run DagRun.verify integrity if Serialized DAG has changed since it is slow"""
latest_version = SerializedDagModel.get_latest_version_hash(dag_run.dag_id, session=session)
if dag_run.dag_hash == latest_version:
self.log.debug("DAG %s not changed structure, skipping dagrun.verify_integrity", dag_run.dag_id)
return
dag_run.dag_hash = latest_version
# Refresh the DAG
dag_run.dag = self.dagbag.get_dag(dag_id=dag_run.dag_id, session=session)
# Verify integrity also takes care of session.flush
dag_run.verify_integrity(session=session)
def _send_dag_callbacks_to_processor(
self, dag_run: DagRun, callback: Optional[DagCallbackRequest] = None
):
if not self.processor_agent:
raise ValueError("Processor agent is not started.")
dag = dag_run.get_dag()
self._send_sla_callbacks_to_processor(dag)
if callback:
self.processor_agent.send_callback_to_execute(callback)
def _send_sla_callbacks_to_processor(self, dag: DAG):
"""Sends SLA Callbacks to DagFileProcessor if tasks have SLAs set and check_slas=True"""
if not settings.CHECK_SLAS:
return
if not any(isinstance(ti.sla, timedelta) for ti in dag.tasks):
self.log.debug("Skipping SLA check for %s because no tasks in DAG have SLAs", dag)
return
if not self.processor_agent:
raise ValueError("Processor agent is not started.")
self.processor_agent.send_sla_callback_request_to_execute(
full_filepath=dag.fileloc, dag_id=dag.dag_id
)
@provide_session
def _emit_pool_metrics(self, session: Session = None) -> None:
pools = models.Pool.slots_stats(session=session)
for pool_name, slot_stats in pools.items():
Stats.gauge(f'pool.open_slots.{pool_name}', slot_stats["open"])
Stats.gauge(f'pool.queued_slots.{pool_name}', slot_stats[State.QUEUED]) # type: ignore
Stats.gauge(f'pool.running_slots.{pool_name}', slot_stats[State.RUNNING]) # type: ignore
@provide_session
def heartbeat_callback(self, session: Session = None) -> None:
Stats.incr('scheduler_heartbeat', 1, 1)
@provide_session
def adopt_or_reset_orphaned_tasks(self, session: Session = None):
"""
Reset any TaskInstance still in QUEUED or SCHEDULED states that were
enqueued by a SchedulerJob that is no longer running.
:return: the number of TIs reset
:rtype: int
"""
self.log.info("Resetting orphaned tasks for active dag runs")
timeout = conf.getint('scheduler', 'scheduler_health_check_threshold')
num_failed = (
session.query(SchedulerJob)
.filter(
SchedulerJob.state == State.RUNNING,
SchedulerJob.latest_heartbeat < (timezone.utcnow() - timedelta(seconds=timeout)),
)
.update({"state": State.FAILED})
)
if num_failed:
self.log.info("Marked %d SchedulerJob instances as failed", num_failed)
Stats.incr(self.__class__.__name__.lower() + '_end', num_failed)
resettable_states = [State.SCHEDULED, State.QUEUED, State.RUNNING]
query = (
session.query(TI)
.filter(TI.state.in_(resettable_states))
# outerjoin is because we didn't use to have queued_by_job
# set, so we need to pick up anything pre upgrade. This (and the
# "or queued_by_job_id IS NONE") can go as soon as scheduler HA is
# released.
.outerjoin(TI.queued_by_job)
.filter(or_(TI.queued_by_job_id.is_(None), SchedulerJob.state != State.RUNNING))
.join(TI.dag_run)
.filter(
DagRun.run_type != DagRunType.BACKFILL_JOB,
# pylint: disable=comparison-with-callable
DagRun.state == State.RUNNING,
)
.options(load_only(TI.dag_id, TI.task_id, TI.execution_date))
)
# Lock these rows, so that another scheduler can't try and adopt these too
tis_to_reset_or_adopt = with_row_locks(query, of=TI, **skip_locked(session=session)).all()
to_reset = self.executor.try_adopt_task_instances(tis_to_reset_or_adopt)
reset_tis_message = []
for ti in to_reset:
reset_tis_message.append(repr(ti))
ti.state = State.NONE
ti.queued_by_job_id = None
for ti in set(tis_to_reset_or_adopt) - set(to_reset):
ti.queued_by_job_id = self.id
Stats.incr('scheduler.orphaned_tasks.cleared', len(to_reset))
Stats.incr('scheduler.orphaned_tasks.adopted', len(tis_to_reset_or_adopt) - len(to_reset))
if to_reset:
task_instance_str = '\n\t'.join(reset_tis_message)
self.log.info(
"Reset the following %s orphaned TaskInstances:\n\t%s", len(to_reset), task_instance_str
)
# Issue SQL/finish "Unit of Work", but let @provide_session commit (or if passed a session, let caller
# decide when to commit
session.flush()
return len(to_reset)
|
network.py
|
import logging
from binascii import crc32
from enum import IntEnum
from paramiko import Channel
from queue import Empty, Queue
from selectors import SelectSelector, EVENT_WRITE
from socket import socket, error as SocketError
from struct import pack, unpack
from threading import Event, Thread
from time import sleep
from typing import Any, NamedTuple, Set, Tuple, Union
from .config import TIMEOUT, LEN_HEAD
Connection = Union[socket, Channel]
class Flag(IntEnum):
PUSH = 1 # 推送申请
PULL = 2 # 拉取申请
SID = 3 # 建立会话
ATTACH = 4 # 后续连接
MONOFILE = 5 # 传输模式
DIR_INFO = 6 # 目录信息
FILE_INFO = 7 # 文件信息
FILE_COUNT = 8 # 文件数量
FILE_READY = 9 # 文件就绪
FILE_CHUNK = 10 # 数据传输
DONE = 11 # 完成
EXCEPTION = 12 # 异常退出
@classmethod
def contains(cls, member: object) -> bool:
return member in cls.__members__.values()
class Packet(NamedTuple):
flag: Flag
body: bytes
def __str__(self) -> str:
return (f'Packet: {self.flag.name} '
f'len={self.length} '
f'chk={self.chksum:08x}')
@property
def length(self) -> int:
return len(self.body)
@property
def chksum(self) -> int:
return crc32(self.body)
@staticmethod
def load(flag: Flag, *args) -> 'Packet':
'''将包体封包'''
if flag == Flag.PULL or flag == Flag.PUSH:
if isinstance(args[0], bytes):
body = args[0]
else:
body = str(args[0]).encode('utf8')
elif flag == Flag.SID or flag == Flag.ATTACH:
body = pack('>16s', *args)
elif flag == Flag.MONOFILE:
body = pack('>?', *args)
elif flag == Flag.DIR_INFO:
length = len(args[-1])
body = pack(f'>IH{length}s', *args)
elif flag == Flag.FILE_INFO:
length = len(args[-1])
body = pack(f'>IHQd16s{length}s', *args)
elif flag == Flag.FILE_COUNT:
body = pack('>I', *args)
elif flag == Flag.FILE_READY:
body = pack('>I', *args)
elif flag == Flag.FILE_CHUNK:
length = len(args[-1])
body = pack(f'>2I{length}s', *args)
elif flag == Flag.DONE:
body = pack('>?', True)
elif flag == Flag.EXCEPTION:
body = str(args[0]).encode('utf8')
else:
raise ValueError(f'{flag} is not a valid Flag')
return Packet(flag, body)
def pack(self) -> bytes:
'''封包'''
fmt = f'>BIH{self.length}s'
return pack(fmt, self.flag, self.chksum, self.length, self.body)
@staticmethod
def unpack_head(head: bytes) -> Tuple[Flag, int, int]:
'''解析 head'''
flag, chksum, length = unpack('>BIH', head)
if not Flag.contains(flag):
raise PacketError
else:
return Flag(flag), chksum, length
def unpack_body(self) -> Tuple[Any, ...]:
'''将 body 解包'''
if self.flag == Flag.PULL or self.flag == Flag.PUSH:
return (self.body.decode('utf-8'),) # dest path
elif self.flag == Flag.SID or self.flag == Flag.ATTACH:
return unpack('>16s', self.body) # Worker ID
elif self.flag == Flag.MONOFILE:
return unpack('>?', self.body) # is monofile
elif self.flag == Flag.DIR_INFO:
# file_id | perm | path
# 4B | 2B | ...
fmt = f'>IH{self.length - 6}s'
return unpack(fmt, self.body)
elif self.flag == Flag.FILE_INFO:
# file_id | perm | size | mtime | chksum | path
# 4B | 2B | 8B | 8B | 16B | ...
fmt = f'>IHQd16s{self.length - 38}s'
return unpack(fmt, self.body)
elif self.flag == Flag.FILE_COUNT:
return unpack('>I', self.body) # file count
elif self.flag == Flag.FILE_READY:
return unpack('>I', self.body) # file id
elif self.flag == Flag.FILE_CHUNK:
# file_id | seq | chunk
# 4B | 4B | ...
fmt = f'>2I{self.length - 8}s'
return unpack(fmt, self.body)
elif self.flag == Flag.DONE:
return unpack('>?', self.body)
elif self.flag == Flag.EXCEPTION:
return (self.body.decode('utf-8'),)
else:
raise ValueError(f'{self.flag} is not a valid Flag')
def is_valid(self, chksum: int):
'''是否是有效的包体'''
return self.chksum == chksum
class PacketError(Exception):
pass
def send_pkt(conn: Connection, packet: Packet):
'''发送数据报文'''
datagram = packet.pack()
conn.sendall(datagram)
def recv_all(conn: Connection, length: int) -> bytes:
'''接受完整数据'''
datagram = bytearray()
while length > 0:
_data = conn.recv(length)
n_recv = len(_data)
if n_recv > 0:
length -= n_recv
datagram += _data
else:
raise ConnectionResetError
return bytes(datagram)
def recv_pkt(conn: Connection) -> Packet:
'''接收数据报文'''
# 接收并解析 head 部分
head = recv_all(conn, LEN_HEAD)
flag, chksum, len_body = Packet.unpack_head(head)
# 接收 body 部分
body = recv_all(conn, len_body)
if crc32(body) == chksum:
return Packet(flag, body)
else:
raise PacketError
class Counter:
def __init__(self):
self.n_sent = 0
def acc(self, length):
self.n_sent += length
class ConnectionPool(Thread):
_max_size = 128
def __init__(self, size=16):
super().__init__(daemon=True)
self.size = min(size, self._max_size)
self.send_q = Queue(self.size)
self.recv_q = Queue()
self.done = Event()
self.sender = SelectSelector()
self.connections: Set[Connection] = set()
def send(self, packet: Packet):
self.send_q.put(packet)
def recv(self, timeout=TIMEOUT) -> Packet:
return self.recv_q.get(timeout)
def add(self, conn: Connection):
'''添加一个连接'''
# 检查数量是否达到上限
if len(self.connections) >= self._max_size:
return False
# 检查是否已添加过
if conn in self.connections:
return True
self.connections.add(conn)
self.sender.register(conn, EVENT_WRITE, data=Counter())
t_recv = Thread(target=self.listen_to_recv, args=(conn,), daemon=True)
t_recv.start()
return True
def pop(self, conn: Connection):
try:
self.sender.unregister(conn)
except (KeyError, ValueError):
pass
try:
self.connections.remove(conn)
except KeyError:
pass
finally:
conn.close()
def listen_to_send(self):
while not self.done.is_set():
# find the conn that sent the least data
keys = [key for key, _ in self.sender.select(timeout=1)]
if not keys:
continue
else:
key = min(keys, key=lambda k: k.data.n_sent)
# get data
try:
packet: Packet = self.send_q.get(timeout=TIMEOUT)
conn: Connection = key.fileobj
except Empty:
break
# send
try:
send_pkt(conn, packet)
key.data.acc(packet.length)
except SocketError as e:
self.pop(conn)
logging.warning(f'[Send] Conn-{id(conn):x}: {e}.')
def listen_to_recv(self, conn: Connection):
conn_name = f'{id(conn):x}'
while not self.done.is_set():
try:
packet = recv_pkt(conn)
self.recv_q.put(packet)
logging.debug(f'[Recv] conn-{conn_name}: {packet}')
except ConnectionResetError:
self.pop(conn)
return
except SocketError as e:
self.pop(conn)
logging.warning(f'[Recv] Conn-{conn_name}: {e}.')
except PacketError:
self.pop(conn)
logging.error(f'conn-{conn_name} received an error packet.')
return
def stop(self):
while not self.send_q.empty():
sleep(0.2)
self.done.set()
self.sender.close()
for conn in self.connections.copy():
conn.close()
def run(self):
if not self.connections:
raise ValueError('No connection')
self.done.clear()
self.listen_to_send()
self.stop()
|
sstvProxy.py
|
#!/usr/bin/env python3
###
###Copyright (c) 2016 by Joel Kaaberg and contributors. See AUTHORS
###for more details.
###
###Some rights reserved.
###
###Redistribution and use in source and binary forms of the software as well
###as documentation, with or without modification, are permitted provided
###that the following conditions are met:
###
###* Redistributions of source code must retain the above copyright
### notice, this list of conditions and the following disclaimer.
###
###* Redistributions in binary form must reproduce the above
### copyright notice, this list of conditions and the following
### disclaimer in the documentation and/or other materials provided
### with the distribution.
###
###* The names of the contributors may not be used to endorse or
### promote products derived from this software without specific
### prior written permission.
###
###THIS SOFTWARE AND DOCUMENTATION IS PROVIDED BY THE COPYRIGHT HOLDERS AND
###CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT
###NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
###A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
###OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
###EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
###PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
###PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
###LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
###NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
###SOFTWARE AND DOCUMENTATION, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
###DAMAGE.
###
import logging, os, sys, time, argparse, json, gzip, base64, platform, threading, subprocess, urllib, glob, sqlite3, \
array, socket, struct, ntpath, timeit, re
from datetime import datetime, timedelta
from json import load, dump
from logging.handlers import RotatingFileHandler
from xml.etree import ElementTree as ET
from socket import timeout
from io import StringIO
from xml.sax.saxutils import escape
import requests
import datetime as dt
import xml.sax.saxutils as saxutils
HEADLESS = False
try:
from urlparse import urljoin
import thread
except ImportError:
from urllib.parse import urljoin
import _thread
from flask import Flask, redirect, abort, request, Response, send_from_directory, jsonify, render_template, \
stream_with_context, url_for
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--debug", action='store_true', help="Console Debugging Enable")
parser.add_argument("-hl", "--headless", action='store_true', help="Force Headless mode")
parser.add_argument("-t", "--tvh", action='store_true', help="Force TVH scanning mode")
parser.add_argument("-i", "--install", action='store_true', help="Force install again")
args = parser.parse_args()
try:
import tkinter
except:
HEADLESS = True
if args.headless or 'headless' in sys.argv:
HEADLESS = True
app = Flask(__name__, static_url_path='')
__version__ = 1.837
# Changelog
# 1.837 - Web work
# 1.836 - Addition of sports.m3u8 which includes groups for current sports
# 1.8354 - Removal of trailing '==' from URLs.
# 1.8353 - Fallback fix
# 1.8352 - Small refactor for sstvLauncher
# 1.8351 - XML tag fix, xml path fix
# 1.835 - Emby fix for categories
# 1.834 - Dynamic mpegts added
# 1.833 - EPG category tinkering
# 1.832 - Escape characters for Emby EPG scanning
# 1.831 - Added translation of plex dvr transcode settings to SSTV quality settings.
# 1.83 - Rewrote create url to use chan api, added strm arg to playlist.m3u8 and static.m3u8 and dynamic playlists can be called using streamtype.m3u8 ie /sstv/hls.m3u8
# 1.8251 - Make pipe an option still and other small fixes
# 1.825 - Added support for enigma by adding in blank subtitle and desc fields to the EPG
# 1.8241 - Added user agent to log, Added new servers
# 1.824 - Backup server prompt added for headless
# 1.823 - Added -i for install trigger
# 1.822 - Added Auto server selection to Gui.
# 1.821 - Added CHECK_CHANNEL to adv settings
# 1.82 - Advanced settings added to web page, channel scanning work
# 1.815 - Restart option fix
# 1.814 - EPG Hotfix
# 1.813 - EPG Hotfix
# 1.812 - FixUrl Fix, readded EPG override (was inadvertantly removed in a commit revert), change of epg refresh to 4hrs
# 1.811 - Dev disable
# 1.81 - Improvement to Series Category detection.
# 1.8 - Added .gz support for EXTRA XML file/url.
# 1.731 - Correction of channel return type that had been removed
# 1.73 - HTML write exception fixed for settigns page, Vaders update
# 1.72 - Auto server selection based off of ping
# 1.71 - Channel parsing catch added for missing channels
# 1.7 - Static and dynamic xspf options added ip:port/sstv/static.xspf or ip:port/sstv/playlist.xspf, changed tkinter menu
# 1.691 - Updated FOG Urls
# 1.69 - Added more info to website, removed network discovery(isn't useful).
# 1.68 - Updated for MyStreams changes
# 1.672 - Changed mpegts output default quality from 1 to what user has set.
# 1.671 - Correction of MMATV url
# 1.67 - Finished JSON to XML, fixed quality setting and settings menu form posting
# 1.66 - Added extra m3u8 to the standard Plex Live output, make sure to use combined.xml in this scenario instead too.
# 1.65 - Addition of strmtype 'mpegts' utilises ffmpeg pipe prev used only by TVH/Plex Live. Enhancement of Webpage incl update and restart buttons.
# 1.64 - Bugfixes
# 1.63 - Added catch for clients with no user agent at all
# 1.62 - xmltv merger bugfix and speedup, kodi settings overwrite disabled
# 1.61 - Addition of test.m3u8 to help identify client requirements
# 1.60 - Addition of XMLTV merger /combined.xml, TVH CHNUM addition, Addition of MMA tv auth, change of returns based on detected client
# 1.59 - Removed need for TVH redirect, added a new path IP:PORT/tvh can be used in plex instead!
# 1.58 - A single dynamic channel can be requested with /ch##.m3u8 strm/qual options are still optional is /ch1.m3u8?strm=rtmp&qual=2
# 1.57 - Index.html enhancements
# 1.56 - Addition of TVH proxy core role to this proxy, will disable SSTV to plex live though
# 1.55 - Addition of Static m3u8
# 1.54 - Adjustment to kodi dynamic url links and fix to external hls usage.
# 1.53 - Sports only epg available at /sports.xml
# 1.52 - Addition of External Port
# 1.51 - Inclusion of an m3u8 merger to add another m3u8 files contents to the end of the kodi.m3u8 playlist result is called combined.m3u8 refer advanced settings.
# 1.50 - GUI Redesign
# 1.47 - TVH scanning fixed.
# 1.46 - REmoved startup gui from mac and linux exes, fixed linux url
# 1.45 - Added restart required message, Change of piping checks, manual trigger now for easy mux detection (forcing channel 1), use 'python sstvproxy install'
# 1.44 - Change of initial launch to use the gui, if not desired launch with 'python sstvproxy.py headless'. Added adv settings parsing see advancedsettings.json for example
# 1.43 - Bugfix settings menu
# 1.42 - External Playlist added, version check and download added
# 1.41 - Bug fix and switch put on network discovery
# 1.40 - Settings menu added to /index.html
# 1.37 - Network Discovery fixed hopefully
# 1.36 - Two path bug fixes
# 1.35 - Mac addon path fix and check
# 1.34 - Fixed Plex Discovery, TVH file creation fix and addition of writing of genres and template files
# 1.33 - Typo
# 1.32 - Change server name dots to hyphens.
# 1.31 - Tidying
# 1.3 - EPG - Changed zap2it references to the channel number for better readability in clients that use that field as the channel name. As a result the epgs from both sources share the same convention. Playlist generators adjusted to suit.
# 1.2 - TVH Completion and install
# 1.1 - Refactoring and TVH inclusion
# 1.0 - Initial post testing release
############################################################
# Logging
############################################################
# Setup logging
log_formatter = logging.Formatter(
'%(asctime)s - %(levelname)-10s - %(name)-10s - %(funcName)-25s- %(message)s')
logger = logging.getLogger('SmoothStreamsProxy v' + str(__version__))
logger.setLevel(logging.DEBUG)
logging.getLogger('werkzeug').setLevel(logging.ERROR)
# Console logging
console_handler = logging.StreamHandler()
if args.debug:
console_handler.setLevel(logging.DEBUG)
else:
console_handler.setLevel(logging.INFO)
console_handler.setFormatter(log_formatter)
logger.addHandler(console_handler)
# Rotating Log Files
if not os.path.isdir(os.path.join(os.path.dirname(sys.argv[0]), 'cache')):
os.mkdir(os.path.join(os.path.dirname(sys.argv[0]), 'cache'))
file_handler = RotatingFileHandler(os.path.join(os.path.dirname(sys.argv[0]), 'cache', 'status.log'),
maxBytes=1024 * 1024 * 2,
backupCount=5)
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(log_formatter)
logger.addHandler(file_handler)
USERAGENT = 'YAP - %s - %s - %s' % (sys.argv[0], platform.system(), str(__version__))
opener = urllib.request.build_opener()
opener.addheaders = [('User-agent', USERAGENT)]
urllib.request.install_opener(opener)
type = ""
latestfile = "https://raw.githubusercontent.com/vorghahn/sstvProxy/master/sstvProxy.py"
if not sys.argv[0].endswith('.py'):
if platform.system() == 'Linux':
type = "Linux/"
latestfile = "https://raw.githubusercontent.com/vorghahn/sstvProxy/master/Linux/sstvProxy"
elif platform.system() == 'Windows':
type = "Windows/"
latestfile = "https://raw.githubusercontent.com/vorghahn/sstvProxy/master/Windows/sstvproxy.exe"
elif platform.system() == 'Darwin':
type = "Macintosh/"
latestfile = "https://raw.githubusercontent.com/vorghahn/sstvProxy/master/Macintosh/sstvproxy"
url = "https://raw.githubusercontent.com/vorghahn/sstvProxy/master/%sversion.txt" % type
try:
latest_ver = float(json.loads(urllib.request.urlopen(url).read().decode('utf-8'))['Version'])
except:
latest_ver = float(0.0)
logger.info("Latest version check failed, check internet.")
token = {
'hash': '',
'expires': ''
}
playlist = ""
class channelinfo:
epg = ""
description = ""
channum = 0
channame = ""
class programinfo:
description = ""
channel = 0
channelname = ""
height = 0
startTime = 0
endTime = 0
timeRange = ""
_title = ""
_category = ""
_quality = ""
_language = ""
def get_title(self):
if len(self._title) == 0:
return ("none " + self.timeRange).strip()
else:
return (self._title + " " + self.quality + " " + self.timeRange).replace(" ", " ").strip()
def set_title(self, title):
self._title = title
if len(self._category) == 0 or self._category == "TVShows":
if title.startswith("NHL") or "hockey" in title.lower():
self._category = "Ice Hockey"
elif title.startswith("UEFA") or title.startswith("EPL") or title.startswith(
"Premier League") or title.startswith("La Liga") or title.startswith(
"Bundesliga") or title.startswith(
"Serie A") or "soccer" in title.lower():
self._category = "World Football"
elif title.startswith("MLB") or "baseball" in title.lower():
self._category = "Baseball"
elif title.startswith("MMA") or title.startswith("UFC") or "boxing" in title.lower():
self._category = "Boxing + MMA"
elif title.startswith("NCAAF") or title.startswith("CFB"):
self._category = "NCAAF"
elif title.startswith("ATP") or "tennis" in title.lower():
self._category = "Tennis"
elif title.startswith("WWE"):
self._category = "Wrestling"
elif title.startswith("NFL") or title.startswith("NBA"):
self._category = title.split(" ")[0].replace(":", "").strip()
elif 'nba' in title.lower() or 'nbl' in title.lower() or 'ncaam' in title.lower() or 'basketball' in title.lower():
self._category = "Basketball"
elif 'nfl' in title.lower() or 'football' in title.lower() or 'american football' in title.lower() or 'ncaaf' in title.lower() or 'cfb' in title.lower():
self._category = "Football"
elif 'EPL' in title or 'efl' in title.lower() or 'soccer' in title.lower() or 'ucl' in title.lower() or 'mls' in title.lower() or 'uefa' in title.lower() or 'fifa' in title.lower() or 'fc' in title.lower() or 'la liga' in title.lower() or 'serie a' in title.lower() or 'wcq' in title.lower():
self._category = "Soccer"
elif 'rugby' in title.lower() or 'nrl' in title.lower() or 'afl' in title.lower():
self._category = "Rugby"
elif 'cricket' in title.lower() or 't20' in title.lower():
self._category = "Cricket"
elif 'tennis' in title.lower() or 'squash' in title.lower() or 'atp' in title.lower():
self._category = "Tennis/Squash"
elif 'f1' in title.lower() or 'nascar' in title.lower() or 'motogp' in title.lower() or 'racing' in title.lower():
self._category = "Motor Sport"
elif 'golf' in title.lower() or 'pga' in title.lower():
self._category = "Golf"
elif 'boxing' in title.lower() or 'mma' in title.lower() or 'ufc' in title.lower() or 'wrestling' in title.lower() or 'wwe' in title.lower():
self._category = "Martial Sports"
elif 'hockey' in title.lower() or 'nhl' in title.lower() or 'ice hockey' in title.lower():
self._category = "Ice Hockey"
elif 'baseball' in title.lower() or 'mlb' in title.lower() or 'beisbol' in title.lower() or 'minor league' in title.lower():
self._category = "Baseball"
elif 'news' in title.lower():
self._category = "News"
title = property(get_title, set_title)
def get_category(self):
if (len(self._category) == 0 or self._category == "none") and (
self.title.lower().find("news") or self.description.lower().find("news")) > -1:
return "News"
else:
return self._category
def set_category(self, category):
if category == "tv":
self._category = ""
else:
self._category = category
category = property(get_category, set_category)
def get_language(self):
return self._language
def set_language(self, language):
if language.upper() == "US" or language.upper() == "EN":
self._language = ""
else:
self._language = language.upper()
language = property(get_language, set_language)
def get_quality(self):
return self._quality
def set_quality(self, quality):
if quality.endswith("x1080"):
self._quality = "1080i"
self.height = 1080
elif quality.endswith("x720") or quality.lower() == "720p":
self._quality = "720p"
self.height = 720
elif quality.endswith("x540") or quality.lower() == "hqlq":
self._quality = "540p"
self.height = 540
elif quality.find("x") > 2:
self._quality = quality
self.height = int(quality.split("x")[1])
else:
self._quality = quality
self.height = 0
quality = property(get_quality, set_quality)
def get_album(self):
if self._quality.upper() == "HQLQ" and self.channelname.upper().find(" 720P") > -1:
self._quality = "720p"
return (self._category + " " + self.quality + " " + self._language).strip().replace(" ", " ")
album = property(get_album)
class EST5EDT(dt.tzinfo):
def utcoffset(self, dt):
return timedelta(hours=-5) + self.dst(dt)
def utc_seconds(self):
return self.utcoffset(datetime.now()).total_seconds()
def dst(self, dt):
d = datetime(dt.year, 3, 8) # 2nd Sunday in March
self.dston = d + timedelta(days=6 - d.weekday())
d = datetime(dt.year, 11, 1) # 1st Sunday in Nov
self.dstoff = d + timedelta(days=6 - d.weekday())
if self.dston <= dt.replace(tzinfo=None) < self.dstoff:
return timedelta(hours=1)
else:
return timedelta(0)
def tzname(self, dt):
return 'EST5EDT'
############################################################
# CONFIG
############################################################
# These are just defaults, place your settings in a file called proxysettings.json in the same directory
USER = ""
PASS = ""
SITE = "viewstvn"
SRVR = "dnaw1"
SRVR_SPARE = "dnaw1"
AUTO_SERVER = False
CHECK_CHANNEL = True
STRM = "hls"
QUAL = "1"
QUALLIMIT = 70
LISTEN_IP = "127.0.0.1"
LISTEN_PORT = 6969
SERVER_HOST = "http://" + LISTEN_IP + ":" + str(LISTEN_PORT)
SERVER_PATH = "sstv"
KODIPORT = 8080
EXTIP = "127.0.0.1"
EXTPORT = 80
EXT_HOST = "http://" + EXTIP + ":" + str(EXTPORT)
KODIUSER = "kodi"
KODIPASS = ""
EXTM3URL = ''
EXTM3UNAME = ''
EXTM3UFILE = ''
EXTXMLURL = ''
TVHREDIRECT = False
TVHURL = '127.0.0.1'
TVHUSER = ''
TVHPASS = ''
OVRXML = ''
tvhWeight = 300 # subscription priority
tvhstreamProfile = 'pass' # specifiy a stream profile that you want to use for adhoc transcoding in tvh, e.g. mp4
GUIDELOOKAHEAD = 5 # minutes
PIPE = False
CHANAPI = None
FALLBACK = False
# LINUX/WINDOWS
if platform.system() == 'Linux':
FFMPEGLOC = '/usr/bin/ffmpeg'
if os.path.isdir(os.path.join(os.path.expanduser("~"), '.kodi', 'userdata', 'addon_data', 'pvr.iptvsimple')):
ADDONPATH = os.path.join(os.path.expanduser("~"), '.kodi', 'userdata', 'addon_data', 'pvr.iptvsimple')
else:
ADDONPATH = False
elif platform.system() == 'Windows':
FFMPEGLOC = os.path.join('C:\FFMPEG', 'bin', 'ffmpeg.exe')
if os.path.isdir(os.path.join(os.path.expanduser("~"), 'AppData', 'Roaming', 'Kodi', 'userdata', 'addon_data',
'pvr.iptvsimple')):
ADDONPATH = os.path.join(os.path.expanduser("~"), 'AppData', 'Roaming', 'Kodi', 'userdata', 'addon_data',
'pvr.iptvsimple')
else:
ADDONPATH = False
elif platform.system() == 'Darwin':
FFMPEGLOC = '/usr/local/bin/ffmpeg'
if os.path.isdir(
os.path.join(os.path.expanduser("~"), "Library", "Application Support", 'Kodi', 'userdata', 'addon_data',
'pvr.iptvsimple')):
ADDONPATH = os.path.join(os.path.expanduser("~"), "Library", "Application Support", 'Kodi', 'userdata',
'addon_data', 'pvr.iptvsimple')
else:
ADDONPATH = False
else:
print("Unknown OS detected... proxy may not function correctly")
############################################################
# INIT
############################################################
serverList = [
[' EU-Mix', 'deu'],
[' DE-Frankfurt', 'deu-de'],
[' FR-Paris', 'deu-fr1'],
[' NL-Mix', 'deu-nl'],
[' NL-1', 'deu-nl1'],
[' NL-2', 'deu-nl2'],
[' NL-3 Ams', 'deu-nl3'],
[' NL-4 Breda', 'deu-nl4'],
[' NL-5 Enschede', 'deu-nl5'],
[' UK-Mix', 'deu-uk'],
[' UK-London1', 'deu-uk1'],
[' UK-London2', 'deu-uk2'],
[' US-Mix', 'dna'],
[' East-Mix', 'dnae'],
[' West-Mix', 'dnaw'],
[' East-NJ', 'dnae1'],
[' East-VA', 'dnae2'],
# [' East-Mtl', 'dnae3'],
# [' East-Tor', 'dnae4'],
[' East-ATL', 'dnae5'],
[' East-NY', 'dnae6'],
[' West-Phx', 'dnaw1'],
[' West-LA', 'dnaw2'],
[' West-SJ', 'dnaw3'],
[' West-Chi', 'dnaw4'],
['Asia', 'dap'],
[' Asia1', 'dap1'],
[' Asia2', 'dap2'],
[' Asia3', 'dap3'],
[' Asia-Old', 'dsg']
]
vaders_channels = {"1": "2499", "2": "2500", "3": "2501", "4": "2502", "5": "2503", "6": "2504", "7": "2505",
"8": "2506", "9": "2507", "10": "2508", "11": "2509", "12": "2510", "13": "2511", "14": "2512",
"15": "2513", "16": "2514", "17": "2515", "18": "2516", "19": "2517", "20": "2518", "21": "2519",
"22": "2520", "23": "2521", "24": "2522", "25": "2523", "26": "2524", "27": "2525", "28": "2526",
"29": "2527", "30": "2528", "31": "2529", "32": "2530", "33": "2531", "34": "2532", "35": "2533",
"36": "2534", "37": "2535", "38": "2536", "39": "2537", "40": "2538", "41": "2541", "42": "2542",
"43": "2543", "44": "2544", "45": "2545", "46": "2546", "47": "2547", "48": "2548", "49": "2549",
"50": "2550", "51": "2551", "52": "2552", "53": "2553", "54": "2554", "55": "2555", "56": "2556",
"57": "2557", "58": "2606", "59": "2607", "60": "2608", "61": "2609", "62": "2610", "63": "2611",
"64": "2612", "65": "2613", "66": "2614", "67": "2615", "68": "2616", "69": "2617", "70": "2618",
"71": "2619", "72": "2620", "73": "2622", "74": "2621", "75": "2623", "76": "2624", "77": "2625",
"78": "2626", "79": "2627", "80": "2628", "81": "2629", "82": "2630", "83": "2631", "84": "2632",
"85": "2633", "86": "2634", "87": "2635", "88": "2636", "89": "2637", "90": "2638", "91": "2639",
"92": "2640", "93": "2641", "94": "2642", "95": "2643", "96": "2644", "97": "2645", "98": "2646",
"99": "2647", "100": "2648", "101": "2649", "102": "2650", "103": "2651", "104": "2652",
"105": "2653", "106": "2654", "107": "2655", "108": "2656", "109": "2657", "110": "2658",
"111": "2659", "112": "2660", "113": "2661", "114": "2662", "115": "2663", "116": "2664",
"117": "2665", "118": "2666", "119": "2667", "120": "2668", "121": "47381", "122": "2679",
"123": "2680", "124": "2681", "125": "2682", "126": "47376", "127": "47377", "128": "47378",
"129": "47379", "130": "47380", "131": "47718", "132": "47719", "133": "49217", "134": "50314",
"135": "50315", "136": "50319", "137": "50320", "138": "50321", "139": "50322", "141": "49215",
"140": "50394", "142": "49216", "143": "50395", "144": "50396", "145": "50397", "146": "50398",
"147": "50399", "148": "47707", "149": "47670", "150": "47716"}
providerList = [
['Live247', 'view247'],
['Mystreams', 'vaders'],
['StarStreams', 'viewss'],
['StreamTVnow', 'viewstvn'],
['MMA-TV/MyShout', 'viewmmasr']
]
streamtype = ['hls', 'hlsa', 'rtmp', 'mpegts', 'rtsp', 'dash', 'wss']
qualityList = [
['HD', '1'],
['HQ', '2'],
['LQ', '3']
]
STREAM_INFO = {'hls': {'domain': 'https', 'port': '443', 'playlist': '.stream/playlist.m3u8', 'quality': 'standard'},
'hlsa': {'domain': 'https', 'port': '443', 'playlist': '/playlist.m3u8', 'quality': '.smil'},
'rtmp': {'domain': 'rtmp', 'port': '3625', 'playlist': '.stream', 'quality': 'standard'},
'mpegts': {'domain': 'https', 'port': '443', 'playlist': '.stream/mpeg.2ts', 'quality': 'standard'},
'rtsp': {'domain': 'rtsp', 'port': '2935', 'playlist': '.stream', 'quality': 'standard'},
'dash': {'domain': 'https', 'port': '443', 'playlist': '/manifest.mpd', 'quality': '.smil'},
'wss': {'domain': 'wss', 'port': '443', 'playlist': '.stream', 'quality': 'standard'},
'wssa': {'domain': 'wss', 'port': '443', 'playlist': '', 'quality': '.smil'}
}
def adv_settings():
if os.path.isfile(os.path.join(os.path.dirname(sys.argv[0]), 'advancedsettings.json')):
logger.debug("Parsing advanced settings")
with open(os.path.join(os.path.dirname(sys.argv[0]), 'advancedsettings.json')) as advset:
advconfig = load(advset)
if "kodiuser" in advconfig:
logger.debug("Overriding kodi username")
global KODIUSER
KODIUSER = advconfig["kodiuser"]
if "kodipass" in advconfig:
logger.debug("Overriding kodi password")
global KODIPASS
KODIPASS = advconfig["kodipass"]
if "ffmpegloc" in advconfig:
logger.debug("Overriding ffmpeg location")
global FFMPEGLOC
FFMPEGLOC = advconfig["ffmpegloc"]
if "kodiport" in advconfig:
logger.debug("Overriding kodi port")
global KODIPORT
KODIPORT = advconfig["kodiport"]
if "extram3u8url" in advconfig:
logger.debug("Overriding EXTM3URL")
global EXTM3URL
EXTM3URL = advconfig["extram3u8url"]
if "extram3u8name" in advconfig:
logger.debug("Overriding EXTM3UNAME")
global EXTM3UNAME
EXTM3UNAME = advconfig["extram3u8name"]
if "extram3u8file" in advconfig:
logger.debug("Overriding EXTM3UFILE")
global EXTM3UFILE
EXTM3UFILE = advconfig["extram3u8file"]
if "extraxmlurl" in advconfig:
logger.debug("Overriding EXTXMLURL")
global EXTXMLURL
EXTXMLURL = advconfig["extraxmlurl"]
if "tvhredirect" in advconfig:
logger.debug("Overriding tvhredirect")
global TVHREDIRECT
TVHREDIRECT = advconfig["tvhredirect"]
if "tvhaddress" in advconfig:
logger.debug("Overriding tvhaddress")
global TVHURL
TVHURL = advconfig["tvhaddress"]
if "tvhuser" in advconfig:
logger.debug("Overriding tvhuser")
global TVHUSER
TVHUSER = advconfig["tvhuser"]
if "tvhpass" in advconfig:
logger.debug("Overriding tvhpass")
global TVHPASS
TVHPASS = advconfig["tvhpass"]
if "overridexml" in advconfig:
logger.debug("Overriding XML")
global OVRXML
OVRXML = advconfig["overridexml"]
if "checkchannel" in advconfig:
logger.debug("Overriding CheckChannel")
global CHECK_CHANNEL
CHECK_CHANNEL = advconfig["checkchannel"] == "True"
if "pipe" in advconfig:
logger.debug("Overriding Pipe")
global PIPE
PIPE = advconfig["pipe"] == "True"
def load_settings():
global QUAL, QUALLIMIT, USER, PASS, SRVR, SRVR_SPARE, SITE, STRM, KODIPORT, LISTEN_IP, LISTEN_PORT, SERVER_HOST, EXTIP, EXT_HOST, EXTPORT
if not os.path.isfile(os.path.join(os.path.dirname(sys.argv[0]), 'proxysettings.json')):
logger.debug("No config file found.")
try:
logger.debug("Parsing settings")
with open(os.path.join(os.path.dirname(sys.argv[0]), 'proxysettings.json')) as jsonConfig:
config = {}
config = load(jsonConfig)
if "quality" in config:
QUAL = config["quality"]
if "username" in config:
USER = config["username"]
if "password" in config:
PASS = config["password"]
if "server" in config:
SRVR = config["server"]
if "server_spare" in config:
SRVR_SPARE = config["server_spare"]
if "service" in config:
SITE = config["service"]
if SITE == "mmatv":
SITE = "viewmmasr"
if "stream" in config:
STRM = config["stream"]
if "kodiport" in config:
KODIPORT = config["kodiport"]
if "externalip" in config:
EXTIP = config["externalip"]
if "externalport" in config:
EXTPORT = config["externalport"]
if "ip" in config and "port" in config:
LISTEN_IP = config["ip"]
LISTEN_PORT = config["port"]
SERVER_HOST = "http://" + LISTEN_IP + ":" + str(LISTEN_PORT)
EXT_HOST = "http://" + EXTIP + ":" + str(EXTPORT)
logger.debug("Using config file.")
except:
if HEADLESS:
config = {}
config["username"] = input("Username?")
USER = config["username"]
config["password"] = input("Password?")
PASS = config["password"]
os.system('cls' if os.name == 'nt' else 'clear')
print("Type the number of the item you wish to select:")
for i in providerList:
print(providerList.index(i), providerList[providerList.index(i)][0])
config["service"] = providerList[int(input("Provider name?"))][1]
os.system('cls' if os.name == 'nt' else 'clear')
SITE = config["service"]
print("Type the number of the item you wish to select:")
for i in serverList:
print(serverList.index(i), serverList[serverList.index(i)][0])
result = input("Regional Server name? (or type 'auto')")
if result.lower() == 'auto':
testServers()
config["server"] = SRVR
config["server_spare"] = SRVR_SPARE
else:
config["server"] = serverList[int(result)][1]
os.system('cls' if os.name == 'nt' else 'clear')
for i in serverList:
print(serverList.index(i), serverList[serverList.index(i)][0])
result = input("Backup Regional Server name?")
config["server_spare"] = serverList[int(result)][1]
os.system('cls' if os.name == 'nt' else 'clear')
print("Type the number of the item you wish to select:")
for i in streamtype:
print(streamtype.index(i), i)
config["stream"] = streamtype[int(input("Dynamic Stream Type? (HLS/RTMP)"))]
os.system('cls' if os.name == 'nt' else 'clear')
for i in qualityList:
print(qualityList.index(i), qualityList[qualityList.index(i)][0])
config["quality"] = qualityList[int(input("Stream quality?"))][1]
os.system('cls' if os.name == 'nt' else 'clear')
config["ip"] = input("Listening IP address?(ie recommend 127.0.0.1 for beginners)")
config["port"] = int(input("and port?(ie 99, do not use 8080)"))
os.system('cls' if os.name == 'nt' else 'clear')
config["externalip"] = input("External IP?")
config["externalport"] = int(input("and ext port?(ie 99, do not use 8080)"))
os.system('cls' if os.name == 'nt' else 'clear')
QUAL = config["quality"]
SRVR = config["server"]
STRM = config["stream"]
LISTEN_IP = config["ip"]
LISTEN_PORT = config["port"]
EXTIP = config["externalip"]
EXTPORT = config["externalport"]
SERVER_HOST = "http://" + LISTEN_IP + ":" + str(LISTEN_PORT)
EXT_HOST = "http://" + EXTIP + ":" + str(EXTPORT)
with open(os.path.join(os.path.dirname(sys.argv[0]), 'proxysettings.json'), 'w') as fp:
dump(config, fp)
else:
root = tkinter.Tk()
root.title("YAP Setup")
# root.geometry('750x600')
app = GUI(root) # calling the class to run
root.mainloop()
installer()
adv_settings()
if args.install:
installer()
############################################################
# INSTALL
############################################################
def installer():
if os.path.isfile(os.path.join('/usr', 'bin', 'tv_find_grabbers')):
writetvGrabFile()
os.chmod('/usr/bin/tv_grab_sstv', 0o777)
proc = subprocess.Popen("/usr/bin/tv_find_grabbers")
if os.path.isdir(ADDONPATH):
writesettings()
writegenres()
if not os.path.isdir(os.path.join(os.path.dirname(sys.argv[0]), 'Templates')):
os.mkdir(os.path.join(os.path.dirname(sys.argv[0]), 'Templates'))
writetemplate()
def writetvGrabFile():
f = open(os.path.join('/usr', 'bin', 'tv_grab_sstv'), 'w')
tvGrabFile = '''#!/bin/sh
dflag=
vflag=
cflag=
#Save this file into /usr/bin ensure HTS user has read/write and the file is executable
URL="%s/%s/epg.xml"
DESCRIPTION="SmoothStreamsTV"
VERSION="1.1"
if [ $# -lt 1 ]; then
wget -q -O - $URL
exit 0
fi
for a in "$@"; do
[ "$a" = "-d" -o "$a" = "--description" ] && dflag=1
[ "$a" = "-v" -o "$a" = "--version" ] && vflag=1
[ "$a" = "-c" -o "$a" = "--capabilities" ] && cflag=1
done
if [ -n "$dflag" ]; then
echo $DESCRIPTION
fi
if [ -n "$vflag" ]; then
echo $VERSION
fi
if [ -n "$cflag" ]; then
echo "baseline"
fi''' % (SERVER_HOST, SERVER_PATH)
f.write(tvGrabFile)
f.close()
# lazy install, low priority
def writesettings():
f = open(os.path.join(ADDONPATH, 'settings.xml'), 'w')
xmldata = """<settings>
<setting id="epgCache" value="false" />
<setting id="epgPath" value="" />
<setting id="epgPathType" value="1" />
<setting id="epgTSOverride" value="true" />
<setting id="epgTimeShift" value="0.0" />
<setting id="epgUrl" value="%s/%s/epg.xml" />
<setting id="logoBaseUrl" value="" />
<setting id="logoFromEpg" value="1" />
<setting id="logoPath" value="" />
<setting id="logoPathType" value="1" />
<setting id="m3uCache" value="true" />
<setting id="m3uPath" value="" />
<setting id="m3uPathType" value="1" />
<setting id="m3uUrl" value="%s/%s/kodi.m3u8" />
<setting id="sep1" value="" />
<setting id="sep2" value="" />
<setting id="sep3" value="" />
<setting id="startNum" value="1" />
</settings>""" % (SERVER_HOST, SERVER_PATH, SERVER_HOST, SERVER_PATH)
f.write(xmldata)
f.close()
def writegenres():
f = open(os.path.join(ADDONPATH, 'genres.xml'), 'w')
xmldata = """<genres>
<!---UNDEFINED--->
<genre type="00">Undefined</genre>
<!---MOVIE/DRAMA--->
<genre type="16">Movie/Drama</genre>
<genre type="16" subtype="01">Detective/Thriller</genre>
<genre type="16" subtype="02">Adventure/Western/War</genre>
<genre type="16" subtype="03">Science Fiction/Fantasy/Horror</genre>
<genre type="16" subtype="04">Comedy</genre>
<genre type="16" subtype="05">Soap/Melodrama/Folkloric</genre>
<genre type="16" subtype="06">Romance</genre>
<genre type="16" subtype="07">Serious/Classical/Religious/Historical Movie/Drama</genre>
<genre type="16" subtype="08">Adult Movie/Drama</genre>
<!---NEWS/CURRENT AFFAIRS--->
<genre type="32">News/Current Affairs</genre>
<genre type="32" subtype="01">News/Weather Report</genre>
<genre type="32" subtype="02">News Magazine</genre>
<genre type="32" subtype="03">Documentary</genre>
<genre type="32" subtype="04">Discussion/Interview/Debate</genre>
<!---SHOW--->
<genre type="48">Show/Game Show</genre>
<genre type="48" subtype="01">Game Show/Quiz/Contest</genre>
<genre type="48" subtype="02">Variety Show</genre>
<genre type="48" subtype="03">Talk Show</genre>
<!---SPORTS--->
<genre type="64">Sports</genre>
<genre type="64" subtype="01">Special Event</genre>
<genre type="64" subtype="02">Sport Magazine</genre>
<genre type="96" subtype="03">Football</genre>
<genre type="144">Tennis/Squash</genre>
<genre type="64" subtype="05">Team Sports</genre>
<genre type="64" subtype="06">Athletics</genre>
<genre type="160">Motor Sport</genre>
<genre type="64" subtype="08">Water Sport</genre>
<genre type="64" subtype="09">Winter Sports</genre>
<genre type="64" subtype="10">Equestrian</genre>
<genre type="176">Martial Sports</genre>
<genre type="16">Basketball</genre>
<genre type="32">Baseball</genre>
<genre type="48">Soccer</genre>
<genre type="80">Ice Hockey</genre>
<genre type="112">Golf</genre>
<genre type="128">Cricket</genre>
<!---CHILDREN/YOUTH--->
<genre type="80">Children's/Youth Programmes</genre>
<genre type="80" subtype="01">Pre-school Children's Programmes</genre>
<genre type="80" subtype="02">Entertainment Programmes for 6 to 14</genre>
<genre type="80" subtype="03">Entertainment Programmes for 16 to 16</genre>
<genre type="80" subtype="04">Informational/Educational/School Programme</genre>
<genre type="80" subtype="05">Cartoons/Puppets</genre>
<!---MUSIC/BALLET/DANCE--->
<genre type="96">Music/Ballet/Dance</genre>
<genre type="96" subtype="01">Rock/Pop</genre>
<genre type="96" subtype="02">Serious/Classical Music</genre>
<genre type="96" subtype="03">Folk/Traditional Music</genre>
<genre type="96" subtype="04">Musical/Opera</genre>
<genre type="96" subtype="05">Ballet</genre>
<!---ARTS/CULTURE--->
<genre type="112">Arts/Culture</genre>
<genre type="112" subtype="01">Performing Arts</genre>
<genre type="112" subtype="02">Fine Arts</genre>
<genre type="112" subtype="03">Religion</genre>
<genre type="112" subtype="04">Popular Culture/Traditional Arts</genre>
<genre type="112" subtype="05">Literature</genre>
<genre type="112" subtype="06">Film/Cinema</genre>
<genre type="112" subtype="07">Experimental Film/Video</genre>
<genre type="112" subtype="08">Broadcasting/Press</genre>
<genre type="112" subtype="09">New Media</genre>
<genre type="112" subtype="10">Arts/Culture Magazines</genre>
<genre type="112" subtype="11">Fashion</genre>
<!---SOCIAL/POLITICAL/ECONOMICS--->
<genre type="128">Social/Political/Economics</genre>
<genre type="128" subtype="01">Magazines/Reports/Documentary</genre>
<genre type="128" subtype="02">Economics/Social Advisory</genre>
<genre type="128" subtype="03">Remarkable People</genre>
<!---EDUCATIONAL/SCIENCE--->
<genre type="144">Education/Science/Factual</genre>
<genre type="144" subtype="01">Nature/Animals/Environment</genre>
<genre type="144" subtype="02">Technology/Natural Sciences</genre>
<genre type="144" subtype="03">Medicine/Physiology/Psychology</genre>
<genre type="144" subtype="04">Foreign Countries/Expeditions</genre>
<genre type="144" subtype="05">Social/Spiritual Sciences</genre>
<genre type="144" subtype="06">Further Education</genre>
<genre type="144" subtype="07">Languages</genre>
<!---LEISURE/HOBBIES--->
<genre type="160">Leisure/Hobbies</genre>
<genre type="160" subtype="01">Tourism/Travel</genre>
<genre type="160" subtype="02">Handicraft</genre>
<genre type="160" subtype="03">Motoring</genre>
<genre type="160" subtype="04">Fitness & Health</genre>
<genre type="160" subtype="05">Cooking</genre>
<genre type="160" subtype="06">Advertisement/Shopping</genre>
<genre type="160" subtype="07">Gardening</genre>
<!---SPECIAL--->
<genre type="176">Special Characteristics</genre>
<genre type="176" subtype="01">Original Language</genre>
<genre type="176" subtype="02">Black & White</genre>
<genre type="176" subtype="03">Unpublished</genre>
<genre type="176" subtype="04">Live Broadcast</genre>
</genres>"""
f.write(xmldata)
f.close()
def writetemplate():
f = open(os.path.join(os.path.dirname(sys.argv[0]), 'Templates', 'device.xml'), 'w')
xmldata = """<root xmlns="urn:schemas-upnp-org:device-1-0">
<specVersion>
<major>1</major>
<minor>0</minor>
</specVersion>
<URLBase>{{ data.BaseURL }}</URLBase>
<device>
<deviceType>urn:schemas-upnp-org:device:MediaServer:1</deviceType>
<friendlyName>{{ data.FriendlyName }}</friendlyName>
<manufacturer>{{ data.Manufacturer }}</manufacturer>
<modelName>{{ data.ModelNumber }}</modelName>
<modelNumber>{{ data.ModelNumber }}</modelNumber>
<serialNumber></serialNumber>
<UDN>uuid:{{ data.DeviceID }}</UDN>
</device>
</root>"""
f.write(xmldata)
f.close()
############################################################
# INSTALL GUI
############################################################
if not HEADLESS:
class ToggledFrame(tkinter.Frame):
def __init__(self, parent, text="", *args, **options):
tkinter.Frame.__init__(self, parent, *args, **options)
self.show = tkinter.IntVar()
self.show.set(0)
self.title_frame = tkinter.Frame(self)
self.title_frame.pack(fill="x", expand=1)
tkinter.Label(self.title_frame, text=text).pack(side="left", fill="x", expand=1)
self.toggle_button = tkinter.Checkbutton(self.title_frame, width=2, text='+', command=self.toggle,
variable=self.show)
self.toggle_button.pack(side="left")
self.sub_frame = tkinter.Frame(self, relief="sunken", borderwidth=1)
def toggle(self):
if bool(self.show.get()):
self.sub_frame.pack(fill="x", expand=1)
self.toggle_button.configure(text='-')
else:
self.sub_frame.forget()
self.toggle_button.configure(text='+')
class GUI(tkinter.Frame):
def client_exit(self, root):
root.destroy()
def __init__(self, master):
tkinter.Frame.__init__(self, master)
self.t1 = tkinter.StringVar()
self.t1.set("Minimum Settings")
t1 = tkinter.Label(master, textvariable=self.t1, height=2)
t1.grid(row=1, column=2)
self.labelUsername = tkinter.StringVar()
self.labelUsername.set("Username")
labelUsername = tkinter.Label(master, textvariable=self.labelUsername, height=2)
labelUsername.grid(row=2, column=1)
#
userUsername = tkinter.StringVar()
userUsername.set("[email protected]")
self.username = tkinter.Entry(master, textvariable=userUsername, width=30)
self.username.grid(row=2, column=2)
#
self.noteUsername = tkinter.StringVar()
self.noteUsername.set("mystreams will not be an email address")
noteUsername = tkinter.Label(master, textvariable=self.noteUsername, height=2)
noteUsername.grid(row=2, column=3)
self.labelPassword = tkinter.StringVar()
self.labelPassword.set("Password")
labelPassword = tkinter.Label(master, textvariable=self.labelPassword, height=2)
labelPassword.grid(row=3, column=1)
#
userPassword = tkinter.StringVar()
userPassword.set("blogs123")
self.password = tkinter.Entry(master, textvariable=userPassword, width=30)
self.password.grid(row=3, column=2)
self.labelSite = tkinter.StringVar()
self.labelSite.set("Site")
labelSite = tkinter.Label(master, textvariable=self.labelSite, height=2)
labelSite.grid(row=4, column=1)
userSite = tkinter.StringVar()
userSite.set('StreamTVnow')
self.site = tkinter.OptionMenu(master, userSite, *[x[0] for x in providerList])
self.site.grid(row=4, column=2)
t2 = ToggledFrame(master, text='Optional', relief="raised", borderwidth=1)
t2.grid(row=5, column=1, columnspan=3)
self.labelServer = tkinter.StringVar()
self.labelServer.set("Server")
labelServer = tkinter.Label(t2.sub_frame, textvariable=self.labelServer, height=2)
labelServer.grid(row=1, column=1)
userServer = tkinter.StringVar()
userServer.set('Auto')
self.server = tkinter.OptionMenu(t2.sub_frame, userServer, *['Auto'] + [x[0] for x in serverList])
self.server.grid(row=1, column=2)
self.labelStream = tkinter.StringVar()
self.labelStream.set("Stream Type")
labelStream = tkinter.Label(t2.sub_frame, textvariable=self.labelStream, height=2)
labelStream.grid(row=2, column=1)
userStream = tkinter.StringVar()
userStream.set('HLS')
self.stream = tkinter.OptionMenu(t2.sub_frame, userStream, *[x.upper() for x in streamtype])
self.stream.grid(row=2, column=2)
self.labelQuality = tkinter.StringVar()
self.labelQuality.set("Quality")
labelQuality = tkinter.Label(t2.sub_frame, textvariable=self.labelQuality, height=2)
labelQuality.grid(row=3, column=1)
userQuality = tkinter.StringVar()
userQuality.set('HD')
self.quality = tkinter.OptionMenu(t2.sub_frame, userQuality, *[x[0] for x in qualityList])
self.quality.grid(row=3, column=2)
self.labelIP = tkinter.StringVar()
self.labelIP.set("Listen IP")
labelIP = tkinter.Label(t2.sub_frame, textvariable=self.labelIP, height=2)
labelIP.grid(row=4, column=1)
userIP = tkinter.StringVar()
userIP.set(LISTEN_IP)
self.ip = tkinter.Entry(t2.sub_frame, textvariable=userIP, width=30)
self.ip.grid(row=4, column=2)
self.noteIP = tkinter.StringVar()
self.noteIP.set("If using on other machines then set a static IP and use that.")
noteIP = tkinter.Label(t2.sub_frame, textvariable=self.noteIP, height=2)
noteIP.grid(row=4, column=3)
self.labelPort = tkinter.StringVar()
self.labelPort.set("Listen Port")
labelPort = tkinter.Label(t2.sub_frame, textvariable=self.labelPort, height=2)
labelPort.grid(row=5, column=1)
userPort = tkinter.IntVar()
userPort.set(LISTEN_PORT)
self.port = tkinter.Entry(t2.sub_frame, textvariable=userPort, width=30)
self.port.grid(row=5, column=2)
self.notePort = tkinter.StringVar()
self.notePort.set("If 80 doesn't work try 99")
notePort = tkinter.Label(t2.sub_frame, textvariable=self.notePort, height=2)
notePort.grid(row=5, column=3)
t3 = ToggledFrame(master, text='Advanced', relief="raised", borderwidth=1)
t3.grid(row=6, column=1, columnspan=3)
self.labelKodiPort = tkinter.StringVar()
self.labelKodiPort.set("KodiPort")
labelKodiPort = tkinter.Label(t3.sub_frame, textvariable=self.labelKodiPort, height=2)
labelKodiPort.grid(row=1, column=1)
userKodiPort = tkinter.IntVar(None)
userKodiPort.set(KODIPORT)
self.kodiport = tkinter.Entry(t3.sub_frame, textvariable=userKodiPort, width=30)
self.kodiport.grid(row=1, column=2)
self.noteKodiPort = tkinter.StringVar()
self.noteKodiPort.set("Only change if you've had to change the Kodi port")
noteKodiPort = tkinter.Label(t3.sub_frame, textvariable=self.noteKodiPort, height=2)
noteKodiPort.grid(row=1, column=3)
self.labelExternalIP = tkinter.StringVar()
self.labelExternalIP.set("External IP")
labelExternalIP = tkinter.Label(t3.sub_frame, textvariable=self.labelExternalIP, height=2)
labelExternalIP.grid(row=2, column=1)
userExternalIP = tkinter.StringVar()
userExternalIP.set(EXTIP)
self.externalip = tkinter.Entry(t3.sub_frame, textvariable=userExternalIP, width=30)
self.externalip.grid(row=2, column=2)
self.noteExternalIP = tkinter.StringVar()
self.noteExternalIP.set("Enter your public IP or Dynamic DNS,\nfor use when you wish to use this remotely.")
noteExternalIP = tkinter.Label(t3.sub_frame, textvariable=self.noteExternalIP, height=2)
noteExternalIP.grid(row=2, column=3)
self.labelExternalPort = tkinter.StringVar()
self.labelExternalPort.set("External Port")
labelExternalPort = tkinter.Label(t3.sub_frame, textvariable=self.labelExternalPort, height=2)
labelExternalPort.grid(row=3, column=1)
userExternalPort = tkinter.IntVar(None)
userExternalPort.set(EXTPORT)
self.extport = tkinter.Entry(t3.sub_frame, textvariable=userExternalPort, width=30)
self.extport.grid(row=3, column=2)
def gather():
global playlist, kodiplaylist, QUAL, QUALLIMIT, USER, PASS, SRVR, SITE, STRM, KODIPORT, LISTEN_IP, LISTEN_PORT, EXTIP, EXT_HOST, SERVER_HOST, EXTPORT
config = {}
config["username"] = userUsername.get()
config["password"] = userPassword.get()
config["stream"] = userStream.get().lower()
for sub in providerList:
if userSite.get() in sub[0]:
config["service"] = sub[1]
for sub in qualityList:
if userQuality.get() in sub[0]:
config["quality"] = sub[1]
config["ip"] = userIP.get()
config["port"] = userPort.get()
config["kodiport"] = userKodiPort.get()
config["externalip"] = userExternalIP.get()
config["externalport"] = userExternalPort.get()
QUAL = config["quality"]
USER = config["username"]
PASS = config["password"]
SITE = config["service"]
STRM = config["stream"]
KODIPORT = config["kodiport"]
LISTEN_IP = config["ip"]
LISTEN_PORT = config["port"]
EXTIP = config["externalip"]
EXTPORT = config["externalport"]
EXT_HOST = "http://" + EXTIP + ":" + str(EXTPORT)
SERVER_HOST = "http://" + LISTEN_IP + ":" + str(LISTEN_PORT)
if userServer.get() != 'Auto':
for sub in serverList:
if userServer.get() in sub[0]:
config["server"] = sub[1]
SRVR = config["server"]
else:
testServers(update_settings=True)
config["server"] = SRVR
config["server_spare"] = SRVR_SPARE
for widget in master.winfo_children():
widget.destroy()
with open(os.path.join(os.path.dirname(sys.argv[0]), 'proxysettings.json'), 'w') as fp:
dump(config, fp)
self.labelSetting1 = tkinter.StringVar()
self.labelSetting1.set(
"Open a web browser and go to %s for instructions and output URLs." % urljoin(SERVER_HOST,
SERVER_PATH))
labelSetting1 = tkinter.Label(master, textvariable=self.labelSetting1, height=2)
labelSetting1.grid(row=1)
self.labelFooter = tkinter.StringVar()
self.labelFooter.set("URLs can also be found later on the YAP main screen after each launch")
labelFooter = tkinter.Label(master, textvariable=self.labelFooter, height=4)
labelFooter.grid(row=2)
button1 = tkinter.Button(master, text="Launch YAP!!", width=20,
command=lambda: self.client_exit(master))
button1.grid(row=3)
button1 = tkinter.Button(master, text="Submit", width=20, command=lambda: gather())
button1.grid(row=7, column=1, columnspan=3)
############################################################
# MISC
############################################################
TOKEN_PATH = os.path.join(os.path.dirname(sys.argv[0]), 'cache', 'token.json')
def load_token():
global token
if os.path.exists(TOKEN_PATH):
with open(TOKEN_PATH, 'r') as fp:
token = load(fp)
logger.debug("Loaded token %r, expires at %s", token['hash'], token['expires'])
else:
dump_token()
def dump_token():
global token
with open(TOKEN_PATH, 'w') as fp:
dump(token, fp)
logger.debug("Dumped token.json")
def find_between(s, first, last):
try:
start = s.index(first) + len(first)
end = s.index(last, start)
return s[start:end]
except ValueError:
return ""
def dl_icons(channum):
# download icons to cache
logger.debug("Downloading icons")
icontemplate = 'https://guide.smoothstreams.tv/assets/images/channels/{0}.png'
# create blank icon
urllib.request.urlretrieve(icontemplate.format(150),
os.path.join(os.path.dirname(sys.argv[0]), 'cache', 'empty.png'))
for i in range(1, channum + 1):
name = str(i) + '.png'
try:
urllib.request.urlretrieve(icontemplate.format(i),
os.path.join(os.path.dirname(sys.argv[0]), 'cache', name))
except:
continue
# logger.debug("No icon for channel:%s"% i)
logger.debug("Icon download completed.")
def thread_updater():
# todo
while True:
time.sleep(21600)
try:
latest_ver = float(json.loads(urllib.request.urlopen(url).read().decode('utf-8'))['Version'])
except:
latest_ver = float(0.0)
logger.info("Latest version check failed, check internet.")
if __version__ < latest_ver:
logger.info(
"Your version (%s%s) is out of date, the latest is %s, which has now be downloaded for you into the 'updates' subdirectory." % (
type, __version__, latest_ver))
newfilename = ntpath.basename(latestfile)
if not os.path.isdir(os.path.join(os.path.dirname(sys.argv[0]), 'updates')):
os.mkdir(os.path.join(os.path.dirname(sys.argv[0]), 'updates'))
urllib.request.urlretrieve(latestfile, os.path.join(os.path.dirname(sys.argv[0]), 'updates', newfilename))
def find_client(useragent):
if 'kodi' in useragent.lower():
return 'kodi'
elif 'vlc' in useragent.lower():
return 'vlc'
elif 'mozilla' in useragent.lower():
return 'browser'
elif 'mozilla' in useragent.lower():
return 'browser'
elif 'dalvik' in useragent.lower():
return 'perfectplayer'
elif 'lavf' in useragent.lower():
return 'plex'
elif 'tvheadend' in useragent.lower():
return 'tvh'
elif 'apple tv' in useragent.lower():
return 'atv'
elif 'smarthub' in useragent.lower():
return 'samsung'
elif 'tv' in useragent.lower():
return 'tv'
else:
return 'unk'
def averageList(lst):
logger.debug(repr(lst))
avg_ping = 0
avg_ping_cnt = 0
for p in lst:
try:
avg_ping += float(p)
avg_ping_cnt += 1
except:
logger.debug("Couldn't convert %s to float" % repr(p))
return avg_ping / avg_ping_cnt
def testServers(update_settings=True):
# todo
global SRVR, SRVR_SPARE, AUTO_SERVER
service = SRVR
fails = []
res = None
res_host = None
res_spare = None
res_spare_host = None
ping = False
check_token()
# with util.xbmcDialogProgress('Testing servers...') as prog:
for name, host in serverList:
if 'mix' in name.lower():
continue
logger.info('Testing servers... %s' % name)
ping_results = False
try:
url = "https://" + host + ".SmoothStreams.tv:443/" + SITE + "/ch01q1.stream/playlist.m3u8?wmsAuthSign=" + \
token['hash']
logger.debug('Testing url %s' % url)
# if platform.system() == 'Windows':
# p = subprocess.Popen(["ping", "-n", "4", url], stdout=subprocess.PIPE,
# stderr=subprocess.PIPE, shell=True)
# else:
# p = subprocess.Popen(["ping", "-c", "4", url], stdout=subprocess.PIPE,
# stderr=subprocess.PIPE)
#
# ping_results = re.compile("time=(.*?)ms").findall(str(p.communicate()[0]))
t1 = time.time()
response = requests.get(url)
t2 = time.time()
if response.status_code == 200:
ping_results = t2 - t1
else:
fails.append(name)
except:
logger.info("Platform doesn't support ping. Disable auto server selection")
AUTO_SERVER = False
return None
if ping_results:
logger.debug("Server %s - %s: n%s" % (name, host, ping_results))
avg_ping = ping_results
if avg_ping != 0:
if avg_ping < ping or not ping:
res_spare = res
res_spare_host = res_host
res = name
res_host = host
ping = avg_ping
if update_settings:
logger.info("Updating settings")
SRVR = str(host)
SRVR_SPARE = str(res_spare_host)
else:
logger.info("Couldn't get ping")
if res != None:
logger.info('Done Server with lowest response time ({0}) set to:%s'.format(ping) % res)
AUTO_SERVER = False
if res_spare != None:
logger.info('Backup Server with second lowest response time set to:%s' % res_spare)
logger.info("Done %s: %s" % (res, ping))
logger.debug("Failed to access servers: %s" % fails)
return res
def findChannelURL(input_url=None, qual='1', target_serv=SRVR, fail=0):
# todo, rtmp
global SRVR
service = SRVR
qlist = [qual] # , '1', '2', '3']
res = None
ping = False
for q in range(len(qlist)):
if q != 0 and qlist[q] == qual:
continue
options = []
if target_serv.startswith('dna') and fail != 2:
if 'dnaw' in target_serv and fail == 0:
options = [(name, host) for (name, host) in serverList if host.startswith('dnaw')]
elif 'dnae' in target_serv and fail == 0:
options = [(name, host) for (name, host) in serverList if host.startswith('dnae')]
else:
options = [(name, host) for (name, host) in serverList if host.startswith('dna')]
elif target_serv.startswith('deu') and fail != 2:
if 'deu-nl' in target_serv and fail == 0:
options = [(name, host) for (name, host) in serverList if host.startswith('deu-nl')]
elif 'deu-uk' in target_serv and fail == 0:
options = [(name, host) for (name, host) in serverList if host.startswith('deu-uk')]
else:
options = [(name, host) for (name, host) in serverList if host.startswith('deu')]
else:
# asia
options = serverList
for name, host in options:
if 'mix' in name.lower():
continue
td = False
try:
url = input_url.replace('SRVR', host).replace('QUAL', qlist[q])
# url = find_between(url,"://",":")
logger.debug('Testing url %s' % url)
# if platform.system() == 'Windows':
# p = subprocess.Popen(["ping", "-n", "4", url], stdout=subprocess.PIPE,
# stderr=subprocess.PIPE, shell=True)
# else:
# p = subprocess.Popen(["ping", "-c", "4", url], stdout=subprocess.PIPE,
# stderr=subprocess.PIPE)
#
# ping_results = re.compile("time=(.*?)ms").findall(str(p.communicate()[0]))
t1 = time.time()
response = requests.get(url)
t2 = time.time()
if response.status_code == 200:
td = t2 - t1
except:
logger.info("Platform doesn't support ping. Disable auto server selection")
return None
if td:
logger.debug("Server %s - %s: %s" % (name, host, repr(td)))
avg_ping = td
if avg_ping != 0:
if avg_ping < ping or not ping:
res = url
res = input_url.replace('SRVR', host).replace('QUAL', qlist[q])
ping = avg_ping
else:
logger.info("Couldn't get ping")
if res != None:
logger.info('Done Server with lowest ping ({0}) set to:%s'.format(ping) % res)
return res
logger.info("Failed to find that channel on a similar quality or server")
if fail < 2:
return findChannelURL(input_url, qual='1', fail=fail + 1)
logger.info("Failed to find that channel on any quality or server")
return input_url
def launch_browser():
try:
import webbrowser
webbrowser.open('%s://%s:%i%s' % ('http', LISTEN_IP, LISTEN_PORT, '/sstv/index.html'))
except Exception as e:
logger.error(u"Could not launch browser: %s" % e)
############################################################
# EPG
############################################################
def dl_epg(source=1):
global chan_map, FALLBACK
# download epg xml
source = 2 if FALLBACK == True else 1
if os.path.isfile(os.path.join(os.path.dirname(sys.argv[0]), 'cache', 'epg.xml')):
existing = os.path.join(os.path.dirname(sys.argv[0]), 'cache', 'epg.xml')
cur_utc_hr = datetime.utcnow().replace(microsecond=0, second=0, minute=0).hour
target_utc_hr = (cur_utc_hr // 4) * 4
target_utc_datetime = datetime.utcnow().replace(microsecond=0, second=0, minute=0, hour=target_utc_hr)
logger.debug("utc time is: %s, utc target time is: %s, file time is: %s" % (
datetime.utcnow(), target_utc_datetime, datetime.utcfromtimestamp(os.stat(existing).st_mtime)))
if os.path.isfile(existing) and os.stat(existing).st_mtime > target_utc_datetime.timestamp():
logger.debug("Skipping download of epg")
return
to_process = []
# override the xml with one of your own
if source == 1:
if OVRXML != '':
if OVRXML.startswith('http://') or OVRXML.startswith('https://'):
if OVRXML.endswith('.gz') or OVRXML.endswith('.gz?raw=1'):
urllib.request.urlretrieve(OVRXML,
os.path.join(os.path.dirname(sys.argv[0]), 'cache', 'rawovrepg.xml.gz'))
unzipped = os.path.join(os.path.dirname(sys.argv[0]), 'cache', 'rawovrepg.xml.gz')
else:
urllib.request.urlretrieve(OVRXML,
os.path.join(os.path.dirname(sys.argv[0]), 'cache', 'rawovrepg.xml'))
unzipped = os.path.join(os.path.dirname(sys.argv[0]), 'cache', 'rawovrepg.xml')
else:
unzipped = OVRXML
else:
logger.info("Downloading epg")
urllib.request.urlretrieve("https://fast-guide.smoothstreams.tv/altepg/xmltv5.xml.gz",
os.path.join(os.path.dirname(sys.argv[0]), 'cache', 'rawepg.xml.gz'))
unzipped = os.path.join(os.path.dirname(sys.argv[0]), 'cache', 'rawepg.xml.gz')
to_process.append([unzipped, "epg.xml", 'fog' if OVRXML == '' else 'ovr'])
urllib.request.urlretrieve("https://fast-guide.smoothstreams.tv/feed.xml",
os.path.join(os.path.dirname(sys.argv[0]), 'cache', 'rawsports.xml'))
unzippedsports = os.path.join(os.path.dirname(sys.argv[0]), 'cache', 'rawsports.xml')
to_process.append([unzippedsports, "sports.xml", 'sstv'])
else:
logger.info("Downloading sstv epg")
urllib.request.urlretrieve("https://fast-guide.smoothstreams.tv/feed.xml",
os.path.join(os.path.dirname(sys.argv[0]), 'cache', 'rawepg.xml'))
unzipped = os.path.join(os.path.dirname(sys.argv[0]), 'cache', 'rawepg.xml')
to_process.append([unzipped, "epg.xml", 'sstv'])
to_process.append([unzipped, "sports.xml", 'sstv'])
for process in to_process:
# try to categorise the sports events
try:
if process[0].endswith('.gz'):
opened = gzip.open(process[0])
else:
opened = open(process[0], encoding="UTF-8")
source = ET.parse(opened)
root = source.getroot()
changelist = {}
with open(os.path.join(os.path.dirname(sys.argv[0]), 'cache', 'prep.xml'), 'w+') as f:
f.write('<?xml version="1.0" encoding="UTF-8"?>'.rstrip('\r\n'))
f.write(
'''<tv><channel id="static_refresh"><display-name lang="en">Static Refresh</display-name><icon src="http://speed.guide.smoothstreams.tv/assets/images/channels/150.png" /></channel><programme channel="static_refresh" start="20170118213000 +0000" stop="20201118233000 +0000"><title lang="us">Press to refresh rtmp channels</title><desc lang="en">Select this channel in order to refresh the RTMP playlist. Only use from the channels list and NOT the guide page. Required every 4hrs.</desc><category lang="us">Other</category><episode-num system="">1</episode-num></programme></tv>''')
desttree = ET.parse(os.path.join(os.path.dirname(sys.argv[0]), 'cache', 'prep.xml'))
desttreeroot = desttree.getroot()
for channel in source.iter('channel'):
if process[2] == 'fog':
b = channel.find('display-name')
newname = [chan_map[x].channum for x in range(len(chan_map) + 1) if
x != 0 and chan_map[x].epg == channel.attrib['id'] and chan_map[x].channame == b.text]
if len(newname) > 1:
logger.debug("EPG rename conflict %s" % ",".join(newname))
# It's a list regardless of length so first item is always wanted.
newname = newname[0]
changelist[channel.attrib['id']] = newname
channel.attrib['id'] = newname
b = channel.find('display-name')
b.text = saxutils.escape(str(b.text))
desttreeroot.append(channel)
for programme in source.iter('programme'):
if process[2] == 'fog':
try:
programme.attrib['channel'] = changelist[programme.attrib['channel']]
except:
logger.info("A programme was skipped as it couldn't be assigned to a channel, refer log.")
logger.debug(programme.find('title').text, programme.attrib)
desc = programme.find('desc')
if desc is None:
ET.SubElement(programme, 'desc')
desc = programme.find('desc')
desc.text = ""
elif desc.text == 'None':
desc.text = ""
else:
desc.text = saxutils.escape(str(desc.text))
sub = programme.find('sub-title')
if sub is None:
ET.SubElement(programme, 'sub-title')
sub = programme.find('sub-title')
sub.text = ""
else:
sub.text = saxutils.escape(str(sub.text))
title = programme.find('title')
title.text = saxutils.escape(str(title.text))
cat = programme.find('category')
if cat is None:
ET.SubElement(programme, 'category')
cat = programme.find('category')
if process[2] == 'sstv':
cat.text = 'Sports'
ep_num = programme.find('episode-num')
# emby
# sports|basketball|baseball|football|Rugby|Soccer|Cricket|Tennis/Squash|Motor Sport|Golf|Martial Sports|Ice Hockey|Alpine Sports|Darts
if cat.text == "Sports":
if any(sport in title.text.lower() for sport in
['nba', 'ncaam', 'nba', 'basquetebol', 'wnba', 'g-league']):
cat.text = "Basketball"
elif any(sport in title.text.lower() for sport in
['nfl', 'football', 'american football', 'ncaaf', 'cfb']):
cat.text = "Football"
elif any(sport in title.text.lower() for sport in
['epl', 'efl', 'fa cup', 'spl', 'taca de portugal', 'w-league', 'soccer', 'ucl',
'coupe de la ligue', 'league cup', 'mls', 'uefa', 'fifa', 'fc', 'la liga', 'serie a',
'wcq', 'khl:', 'shl:', '1.bl:', 'euroleague', 'knvb', 'superliga turca',
'liga holandesa']):
cat.text = "Soccer"
elif any(sport in title.text.lower() for sport in
['rugby', 'nrl', 'afl', 'rfu', 'french top 14:', "women's premier 15", 'guinness pro14']):
cat.text = "Rugby"
elif any(sport in title.text.lower() for sport in ['cricket', 't20', 't20i']):
cat.text = "Cricket"
elif any(sport in title.text.lower() for sport in ['tennis', 'squash', 'atp']):
cat.text = "Tennis/Squash"
elif any(sport in title.text.lower() for sport in ['f1', 'nascar', 'motogp', 'racing']):
cat.text = "Motor Sport"
elif any(sport in title.text.lower() for sport in ['golf', 'pga']):
cat.text = "Golf"
elif any(sport in title.text.lower() for sport in ['boxing', 'mma', 'ufc', 'wrestling', 'wwe']):
cat.text = "Martial Sports"
elif any(sport in title.text.lower() for sport in ['hockey', 'nhl', 'ice hockey', 'iihf']):
cat.text = "Ice Hockey"
elif any(sport in title.text.lower() for sport in
['baseball', 'mlb', 'beisbol', 'minor league', 'ncaab']):
cat.text = "Baseball"
elif any(sport in title.text.lower() for sport in ['news']):
cat.text = "News"
elif any(sport in title.text.lower() for sport in ['alpine', 'skiing', 'snow']):
cat.text = "Alpine Sports"
elif any(sport in title.text.lower() for sport in ['darts']):
cat.text = "Darts"
cat.text = saxutils.escape(str(cat.text))
desttreeroot.append(programme)
# tree.write('./cache/combined.xml')
# with open('./cache/combined.xml', 'r+') as f:
# content = f.read()
# f.seek(0, 0)
# f.write('<?xml version="1.0" encoding="UTF-8"?>'.rstrip('\r\n') + content)
# return
desttree.write(os.path.join(os.path.dirname(sys.argv[0]), 'cache', process[1]))
logger.debug("writing to %s" % process[1])
# add xml header to file for Kodi support
with open(os.path.join(os.path.dirname(sys.argv[0]), 'cache', process[1]), 'r+') as f:
content = f.read()
# staticinfo = '''<channel id="static_refresh"><display-name lang="en">Static Refresh</display-name><icon src="http://speed.guide.smoothstreams.tv/assets/images/channels/150.png" /></channel><programme channel="static_refresh" start="20170118213000 +0000" stop="20201118233000 +0000"><title lang="us">Press to refresh rtmp channels</title><desc lang="en">Select this channel in order to refresh the RTMP playlist. Only use from the channels list and NOT the guide page. Required every 4hrs.</desc><category lang="us">Other</category><episode-num system="">1</episode-num></programme></tv>'''
# content = content[:-5] + staticinfo
# f.write(content)
f.seek(0, 0)
f.write('<?xml version="1.0" encoding="UTF-8"?>'.rstrip('\r\n') + content)
except:
logger.exception(process[0])
if process[0] == "I:\\Video\\epg\\xmltv5.xml or URL":
logger.info("Proxy failed to parse the example XMLTV provided by the EXAMPLE advancedsettings.json")
else:
logger.info("Proxy failed to parse the XMLTV from %s" % process[0])
# started to create epg based off of the json but not needed
def dl_sstv_epg():
# download epg xml
# https://guide.smoothstreams.tv/feed-new-full-latest.zip
if os.path.isfile(os.path.join(os.path.dirname(sys.argv[0]), 'cache', 'sstv_full.xml')):
existing = os.path.join(os.path.dirname(sys.argv[0]), 'cache', 'sstv_full.xml')
cur_utc_hr = datetime.utcnow().replace(microsecond=0, second=0, minute=0).hour
target_utc_hr = (cur_utc_hr // 3) * 3
target_utc_datetime = datetime.utcnow().replace(microsecond=0, second=0, minute=0, hour=target_utc_hr)
logger.debug("utc time is: %s, utc target time is: %s, file time is: %s" % (
datetime.utcnow(), target_utc_datetime, datetime.utcfromtimestamp(os.stat(existing).st_mtime)))
if os.path.isfile(existing) and os.stat(existing).st_mtime > target_utc_datetime.timestamp():
logger.debug("Skipping download of epg")
return
logger.debug("Downloading sstv epg")
url = "https://guide.smoothstreams.tv/feed-new-full-latest.zip"
import zipfile
urllib.request.urlretrieve(url, os.path.join(os.path.dirname(sys.argv[0]), 'cache', 'testepg.zip'))
archive = zipfile.ZipFile(os.path.join(os.path.dirname(sys.argv[0]), 'cache', 'testepg.zip'), 'r')
jsonepg = archive.read('feed-new-full.json')
epg = json.loads(jsonepg.decode('utf-8'))
json2xml(epg)
def json2xml(json_obj):
master = ET.Element('tv')
mtree = ET.ElementTree(master)
mroot = mtree.getroot()
data = json_obj.get('data')
for i, j in data.items():
displayname = j['name']
id = j['number']
icon = j['img']
subelement = ET.SubElement(master, 'channel', {'id': id})
ET.SubElement(subelement, 'icon', {'src': icon})
ET.SubElement(subelement, 'display-name')
c = subelement.find('display-name')
c.text = displayname
# input
# '''"851224591": {"name": "SportsCenter With Scott Van Pelt",
# "description": "Scott Van Pelt presents the day in sports through his unique perspective with highlights, special guests and his ``One Big Thing'' commentary.",
# "time": "1515232800", "runtime": 60, "version": "", "language": "us", "channel": "83",
# "category": 0, "parent_id": "0", "quality": "HQLQ", "source": "XMLTV"}'''
# sample output from fog
# <programme channel="I58690.labs.zap2it.com" start="20180105170000 +0000" stop="20180105180000 +0000">
# <title lang="en">NHL Hockey Central</title>
# <desc lang="en">News and highlights from around the NHL.</desc>
# <category lang="en">Ice Hockey</category><
# episode-num system="">EP02022073.0008</episode-num></programme>
for event in j['events']:
program = j['events'][event]
category = ""
if 'nba' in program['name'].lower() or 'nba' in program['name'].lower() or 'ncaam' in program[
'name'].lower():
category = "Basketball"
elif 'nfl' in program['name'].lower() or 'football' in program['name'].lower() or 'american football' in \
program['name'].lower() or 'ncaaf' in program['name'].lower() or 'cfb' in program['name'].lower():
category = "Football"
elif 'epl' in program['name'].lower() or 'efl' in program['name'].lower() or 'soccer' in program[
'name'].lower() or 'ucl' in program['name'].lower() or 'mls' in program['name'].lower() or 'uefa' in \
program['name'].lower() or 'fifa' in program['name'].lower() or 'fc' in program[
'name'].lower() or 'la liga' in program['name'].lower() or 'serie a' in program[
'name'].lower() or 'wcq' in program['name'].lower():
category = "Soccer"
elif 'rugby' in program['name'].lower() or 'nrl' in program['name'].lower() or 'afl' in program[
'name'].lower():
category = "Rugby"
elif 'cricket' in program['name'].lower() or 't20' in program['name'].lower():
category = "Cricket"
elif 'tennis' in program['name'].lower() or 'squash' in program['name'].lower() or 'atp' in program[
'name'].lower():
category = "Tennis/Squash"
elif 'f1' in program['name'].lower() or 'nascar' in program['name'].lower() or 'motogp' in program[
'name'].lower() or 'racing' in program['name'].lower():
category = "Motor Sport"
elif 'golf' in program['name'].lower() or 'pga' in program['name'].lower():
category = "Golf"
elif 'boxing' in program['name'].lower() or 'mma' in program['name'].lower() or 'ufc' in program[
'name'].lower() or 'wrestling' in program['name'].lower() or 'wwe' in program['name'].lower():
category = "Martial Sports"
elif 'hockey' in program['name'].lower() or 'nhl' in program['name'].lower() or 'ice hockey' in program[
'name'].lower():
category = "Ice Hockey"
elif 'baseball' in program['name'].lower() or 'mlb' in program['name'].lower() or 'beisbol' in program[
'name'].lower() or 'minor league' in program['name'].lower():
category = "Baseball"
start = datetime.utcfromtimestamp(int(program['time'])).strftime('%Y%m%d%H%M%S +0000')
stop = datetime.utcfromtimestamp(int(program['time']) + 60 * int(program['runtime'])).strftime(
'%Y%m%d%H%M%S +0000')
subelement = ET.SubElement(master, 'programme', {'id': id, 'start': start, 'stop': stop})
p_title = ET.SubElement(subelement, 'title', {'lang': program['language']})
p_title.text = program['name']
p_desc = ET.SubElement(subelement, 'desc', {'lang': program['language']})
p_desc.text = program['description']
p_genre = ET.SubElement(subelement, 'category', {'lang': program['language']})
p_genre.text = category
mtree.write(os.path.join(os.path.dirname(sys.argv[0]), 'cache', 'sstv_full.xml'))
return
def getProgram(channel, sports=False):
global jsonGuide1, jsonGuide2
tmNow = time.localtime(time.time() + GUIDELOOKAHEAD * 60)
sched_offest = EST5EDT().utc_seconds()
retVal = programinfo()
local_off = datetime.utcoffset(
datetime.utcnow().replace(tzinfo=dt.timezone.utc).astimezone(tz=None)).total_seconds()
if str(int(channel)) in jsonGuide1:
oChannel = jsonGuide1[str(int(channel))]
retVal.channel = channel
retVal.channelname = oChannel["name"].replace(format(channel, "02") + " - ", "").strip()
for item in oChannel["items"]:
startTime = time.localtime(time.mktime(
datetime.strptime(item["time"], '%Y-%m-%d %H:%M:%S').timetuple()) - sched_offest + local_off)
endTime = time.localtime(time.mktime(
datetime.strptime(item["end_time"], '%Y-%m-%d %H:%M:%S').timetuple()) - sched_offest + local_off)
if startTime < tmNow and endTime > tmNow:
retVal.category = item["category"].strip()
retVal.quality = item["quality"].upper()
retVal.language = item["language"].upper()
retVal.title = item["name"].strip()
retVal.description = item["description"].strip()
retVal.channel = channel
retVal.startTime = startTime
retVal.endTime = endTime
retVal.timeRange = time.strftime("%H:%M", startTime) + "-" + time.strftime("%H:%M", endTime)
return retVal
if not sports and str(int(channel)) in jsonGuide2:
oChannel = jsonGuide2[str(int(channel))]
retVal.channel = channel
retVal.channelname = oChannel["name"].replace(format(channel, "02") + " - ", "").strip()
for item in oChannel["items"]:
startTime = time.strptime(item["time"], '%Y-%m-%d %H:%M:%S')
endTime = time.strptime(item["end_time"], '%Y-%m-%d %H:%M:%S')
if startTime < tmNow and endTime > tmNow:
retVal.category = item["category"].strip()
retVal.quality = item["quality"].upper()
retVal.language = item["language"].upper()
retVal.title = item["name"].strip()
retVal.description = item["description"].strip()
retVal.channel = channel
retVal.startTime = startTime
retVal.endTime = endTime
retVal.timeRange = time.strftime("%H:%M", startTime) + "-" + time.strftime("%H:%M", endTime)
return retVal
return retVal
def getJSON(sFile, sURL, sURL2):
try:
if os.path.isfile(sFile) and time.time() - os.stat(sFile).st_mtime < 3600:
retVal = json.loads(open(sFile, 'r').read())
return retVal
except:
pass
try:
sJSON = urllib.request.urlopen(sURL).read().decode("utf-8")
retVal = json.loads(sJSON)
except:
try:
sJSON = urllib.request.urlopen(sURL2).read().decode("utf-8")
retVal = json.loads(sJSON)
except:
return json.loads("{}")
try:
file = open(sFile, "w+")
file.write(sJSON)
file.close()
except:
pass
return retVal
############################################################
# SSTV
############################################################
def get_auth_token(user, passwd, site):
if site == 'vaders':
baseUrl = "http://vapi.vaders.tv/vod/user?"
# will return userinfo but not hash, hash is just user+pass hashed together, refer playlist generation
return
elif site == 'viewmmasr' or site == 'mmatv':
baseUrl = 'https://www.mma-tv.net/loginForm.php?'
else:
baseUrl = 'https://auth.smoothstreams.tv/hash_api.php?'
params = {
"username": user,
"password": passwd,
"site": site
}
headers = {'User-Agent': USERAGENT}
session = requests.Session()
url = baseUrl + urllib.parse.urlencode(params)
try:
data = session.post(url, params, headers=headers).json()
except:
data = json.loads(urllib.request.urlopen(url).read().decode("utf--8"))
# old
# data = json.loads(urllib.request.urlopen('http://auth.SmoothStreams.tv/hash_api.php?username=%s&password=%s&site=%s' % (user,passwd,site)).read().decode("utf-8"))
if 'hash' not in data or 'valid' not in data:
logger.error("There was no hash auth token returned from auth.SmoothStreams.tv...")
return
else:
token['hash'] = data['hash']
token['expires'] = (datetime.now() + timedelta(minutes=data['valid'])).strftime("%Y-%m-%d %H:%M:%S.%f")
logger.info("Retrieved token %r, expires at %s", token['hash'], token['expires'])
return
def check_token():
if SITE == 'vaders':
return
# load and check/renew token
if not token['hash'] or not token['expires']:
# fetch fresh token
logger.info("There was no token loaded, retrieving your first token...")
get_auth_token(USER, PASS, SITE)
dump_token()
else:
# check / renew token
if datetime.now() > datetime.strptime(token['expires'], "%Y-%m-%d %H:%M:%S.%f"):
# token is expired, renew
logger.info("Token has expired, retrieving a new one...")
get_auth_token(USER, PASS, SITE)
dump_token()
def build_channel_map():
chan_map = {}
logger.debug("Loading channel list")
url = 'https://fast-guide.smoothstreams.tv/altepg/channels.json'
jsonChanList = json.loads(urllib.request.urlopen(url).read().decode("utf-8"))
for item in jsonChanList:
retVal = channelinfo()
oChannel = jsonChanList[item]
retVal.channum = oChannel["channum"]
channel = int(oChannel["channum"])
retVal.channame = oChannel["channame"].replace(format(channel, "02") + " - ", "").strip()
if retVal.channame == 'Empty':
retVal.channame = retVal.channum
retVal.epg = oChannel["xmltvid"]
chan_map[channel] = {}
chan_map[channel] = retVal
logger.debug("Built channel map with %d channels", len(chan_map))
return chan_map
def build_channel_map_sstv():
chan_map = {}
logger.debug("Loading channel list (fallback)")
url = 'https://speed.guide.smoothstreams.tv/feed-new.json'
jsonChanList = json.loads(urllib.request.urlopen(url).read().decode("utf-8"))
jsonEPG = jsonChanList['data']
for item in jsonEPG:
retVal = channelinfo()
oChannel = jsonEPG[item]
retVal.channum = oChannel["number"]
channel = int(oChannel["number"])
retVal.channame = oChannel["name"].replace(format(channel, "02") + " - ", "").strip()
if retVal.channame == 'Empty':
retVal.channame = retVal.channum
retVal.epg = oChannel["number"]
chan_map[channel] = {}
chan_map[channel] = retVal
logger.debug("Built channel map with %d channels", len(chan_map))
return chan_map
def build_playlist(host, strmType=None):
# standard dynamic playlist
global chan_map
if not strmType or strmType not in streamtype: strmType = STRM
# build playlist using the data we have
new_playlist = "#EXTM3U x-tvg-url='%s/epg.xml'\n" % urljoin(host, SERVER_PATH)
for pos in range(1, len(chan_map) + 1):
try:
# build channel url
url = "{0}/playlist.m3u8?ch={1}&strm={2}&qual={3}"
if strmType == 'mpegts':
url = "{0}/mpeg.2ts?ch={1}&strm={2}&qual={3}"
vaders_url = "http://vapi.vaders.tv/play/{0}.{1}?"
if SITE == 'vaders':
tokenDict = {"username": "vsmystreams_" + USER, "password": PASS}
jsonToken = json.dumps(tokenDict)
tokens = base64.b64encode(jsonToken.encode('utf-8'))
strm = 'ts' if STRM == 'mpegts' else 'm3u8'
tokens = urllib.parse.urlencode({"token": str(tokens)[1:]})
channel_url = vaders_url.format(vaders_channels[str(pos)], strm) + tokens
else:
urlformatted = url.format(SERVER_PATH, chan_map[pos].channum, strmType, QUAL)
channel_url = urljoin(host, urlformatted)
# build playlist entry
new_playlist += '#EXTINF:-1 tvg-id="%s" tvg-name="%s" tvg-logo="%s/%s/%s.png" channel-id="%s",%s\n' % (
chan_map[pos].channum, chan_map[pos].channame, host, SERVER_PATH, chan_map[pos].channum,
chan_map[pos].channum,
chan_map[pos].channame)
new_playlist += '%s\n' % channel_url
except:
logger.exception("Channel #%s failed. Channel missing from Fog's channels.json" % pos)
logger.info("Built Dynamic playlist")
return new_playlist
def build_sports_playlist(host, strmType=None):
global chan_map
if not strmType or strmType not in streamtype: strmType = STRM
new_playlist = "#EXTM3U x-tvg-url='%s/epg.xml'\n" % urljoin(host, SERVER_PATH)
for pos in range(1, len(chan_map) + 1):
try:
prog = getProgram(pos, sports=True)
# build channel url
url = "{0}/playlist.m3u8?ch={1}&strm={2}&qual={3}"
if strmType == 'mpegts':
url = "{0}/mpeg.2ts?ch={1}&strm={2}&qual={3}"
urlformatted = url.format(SERVER_PATH, chan_map[pos].channum, strmType, QUAL)
channel_url = urljoin(host, urlformatted)
# build playlist entry
group = prog.category
new_playlist += '#EXTINF:-1 tvg-id="%s" tvg-name="%s" tvg-logo="%s/%s/%s.png" channel-id="%s" group-title="%s",%s\n' % (
chan_map[pos].channum, chan_map[pos].channame, host, SERVER_PATH, chan_map[pos].channum,
chan_map[pos].channum, group, chan_map[pos].channame)
new_playlist += '%s\n' % channel_url
except:
logger.exception("Channel #%s failed. Channel missing from Fog's channels.json" % pos)
logger.info("Built Dynamic playlist")
return new_playlist
def build_xspf(host, request_file):
# standard dynamic playlist
global chan_map
xspfBodyTemplate = ('<?xml version="1.0" encoding="UTF-8"?>\n' +
'<playlist xmlns="http://xspf.org/ns/0/" xmlns:vlc="http://www.videolan.org/vlc/playlist/ns/0/" version="1">\n' +
'\t<title>Playlist</title>\n' +
'\t<trackList>\n' +
'{0}' +
'\t</trackList>\n' +
'\t<extension application="http://www.videolan.org/vlc/playlist/0">\n' +
'{1}' +
'\t</extension>\n' +
'</playlist>')
xspfTrackTemplate = ('\t\t<track>\n' +
'\t\t\t<location>{5}</location>\n' +
'\t\t\t<title>{3}</title>\n' +
'\t\t\t<creator>{8}</creator>\n' +
'\t\t\t<album>{0}</album>\n' +
'\t\t\t<trackNum>{6}</trackNum>\n' +
'\t\t\t<annotation>{9}</annotation>\n' +
'\t\t\t<extension application="http://www.videolan.org/vlc/playlist/0">\n' +
'\t\t\t\t<vlc:id>{7}</vlc:id>\n' +
'\t\t\t</extension>\n' +
'\t\t</track>\n')
xspfTrack2Template = '\t\t<vlc:item tid="{0}"/>\n'
xspfTracks = ""
xspfTracks2 = ""
# build playlist using the data we have
for pos in range(1, len(chan_map) + 1):
# build channel url
program = getProgram(pos)
url = "{0}/playlist.m3u8?ch={1}"
vaders_url = "http://vapi.vaders.tv/play/{0}.{1}?"
# quality = '720p' if QUAL == '1' or pos > QUALLIMIT else '540p' if QUAL == '2' else '360p'
if SITE == 'vaders':
tokenDict = {"username": "vsmystreams_" + USER, "password": PASS}
jsonToken = json.dumps(tokenDict)
tokens = base64.b64encode(jsonToken.encode('utf-8'))
strm = 'ts' if STRM == 'mpegts' else 'm3u8'
tokens = urllib.parse.urlencode({"token": str(tokens)[1:]})
channel_url = vaders_url.format(vaders_channels[str(pos)], strm) + tokens
else:
urlformatted = url.format(SERVER_PATH, chan_map[pos].channum)
template = '{0}://{1}.smoothstreams.tv:{2}/{3}/ch{4}q{5}.stream{6}?wmsAuthSign={7}'
if not 'static' in request_file:
channel_url = urljoin(host, urlformatted)
else:
channel_url = createURL(pos, QUAL, STRM, token)
# channel_url = template.format('https' if STRM == 'hls' else 'rtmp', SRVR, '443' if STRM == 'hls' else '3625',
# SITE, "{:02}".format(pos), QUAL if pos <= QUALLIMIT else '1',
# '/playlist.m3u8' if STRM == 'hls' else '', token['hash'])
# build playlist entry
try:
xspfTracks += xspfTrackTemplate.format(escape(program.album), escape(program.quality),
escape(program.language), escape(program.title),
str(program.channel), channel_url,
str(int(chan_map[pos].channum)),
str(int(chan_map[pos].channum) - 1),
escape(program.channelname), escape(program.description))
xspfTracks2 += xspfTrack2Template.format(str(int(chan_map[pos].channum) - 1))
except:
logger.exception("Exception while updating playlist: ")
xspf = xspfBodyTemplate.format(xspfTracks, xspfTracks2)
logger.debug("Built xspf playlist")
return xspf
def build_static_playlist(strmType=None):
global chan_map
if not strmType or strmType not in streamtype:
strmType = STRM
# build playlist using the data we have
new_playlist = "#EXTM3U x-tvg-url='%s/epg.xml'\n" % urljoin(SERVER_HOST, SERVER_PATH)
for pos in range(1, len(chan_map) + 1):
# build channel url
# template = '{0}://{1}.smoothstreams.tv:{2}/{3}/ch{4}q{5}.stream{6}?wmsAuthSign={7}'
# urlformatted = template.format('https' if STRM == 'hls' else 'rtmp', SRVR, '443' if STRM == 'hls' else '3625',
# SITE, "{:02}".format(pos), QUAL if pos <= QUALLIMIT else '1',
# '/playlist.m3u8' if STRM == 'hls' else '', token['hash'])
# build playlist entry
try:
new_playlist += '#EXTINF:-1 tvg-id="%s" tvg-name="%s" tvg-logo="%s/%s/%s.png" channel-id="%s",%s\n' % (
chan_map[pos].channum, chan_map[pos].channame, SERVER_HOST, SERVER_PATH, chan_map[pos].channum,
chan_map[pos].channum,
chan_map[pos].channame)
new_playlist += '%s\n' % createURL(pos, QUAL, strmType, token)
except:
logger.exception("Exception while updating static playlist: ")
logger.info("Built static playlist")
return new_playlist
def build_test_playlist(hosts):
# build playlist using the data we have
new_playlist = "#EXTM3U x-tvg-url='%s/epg.xml'\n" % urljoin(SERVER_HOST, SERVER_PATH)
template = '{0}://{1}.smoothstreams.tv:{2}/{3}/ch{4}q{5}.stream{6}?wmsAuthSign={7}'
url = "{0}/sstv/playlist.m3u8?ch=1&strm=hls&qual=1&type={1}"
# build playlist entry
new_playlist += '#EXTINF:-1 tvg-id="1" tvg-name="Static HLS" channel-id="1","Static HLS"\n'
new_playlist += '%s\n' % template.format('https', 'dnaw1', '443', SITE, "01", 1, '/playlist.m3u8', token['hash'])
new_playlist += '#EXTINF:-1 tvg-id="2" tvg-name="Static RTMP" channel-id="2","Static RTMP"\n'
new_playlist += '%s\n' % template.format('rtmp', 'dnaw1', '3625', SITE, "01", 1, '', token['hash'])
count = 3
for host in hosts:
new_playlist += '#EXTINF:-1 tvg-id="%s" tvg-name="Redirect" channel-id="%s","Redirect"\n' % (count, count)
new_playlist += '%s\n' % url.format(host, '1')
count += 1
new_playlist += '#EXTINF:-1 tvg-id="%s" tvg-name="File" channel-id="%s","File"\n' % (count, count)
new_playlist += '%s\n' % url.format(host, '2')
count += 1
new_playlist += '#EXTINF:-1 tvg-id="%s" tvg-name="Variable" channel-id="%s","Variable"\n' % (count, count)
new_playlist += '%s\n' % url.format(host, '3')
count += 1
new_playlist += '#EXTINF:-1 tvg-id="%s" tvg-name="URL" channel-id="%s","URL"\n' % (count, count)
new_playlist += '%s\n' % url.format(host, '4')
count += 1
logger.info("Built static playlist")
return new_playlist
def build_server_playlist():
# build playlist using the data we have
new_playlist = "#EXTM3U x-tvg-url='%s/epg.xml'\n" % urljoin(SERVER_HOST, SERVER_PATH)
template = '{0}://{1}.smoothstreams.tv:{2}/{3}/ch{4}q{5}.stream{6}?wmsAuthSign={7}'
url = "{0}/sstv/playlist.m3u8?ch=1&strm=mpeg&qual=1"
# build playlist entry
for server in serverList:
new_playlist += '#EXTINF:-1 tvg-id="1" tvg-name="%s" channel-id="1","%s"\n' % (server[0], server[0])
new_playlist += '%s\n' % template.format('https', server[1], '443', SITE, "01", 1, '/mpeg.2ts', token['hash'])
logger.info("Built server playlist")
return new_playlist
def thread_playlist():
global playlist
while True:
time.sleep(86400)
logger.info("Updating playlist...")
try:
tmp_playlist = build_playlist(SERVER_HOST)
playlist = tmp_playlist
logger.info("Updated playlist!")
except:
logger.exception("Exception while updating playlist: ")
def create_channel_playlist(sanitized_channel, qual, strm, hash):
rtmpTemplate = 'rtmp://{0}.smoothstreams.tv:3625/{1}/ch{2}q{3}.stream?wmsAuthSign={4}'
hlsTemplate = 'https://{0}.smoothstreams.tv:443/{1}/ch{2}q{3}.stream/playlist.m3u8?wmsAuthSign={4}'
hls_url = hlsTemplate.format(SRVR, SITE, sanitized_channel, qual, hash)
rtmp_url = rtmpTemplate.format(SRVR, SITE, sanitized_channel, qual, hash)
file = urllib.request.urlopen(hls_url, timeout=2).read().decode("utf-8")
if not os.path.isfile(os.path.join(os.path.dirname(sys.argv[0]), 'cache', 'playlist.m3u8')):
f = open(os.path.join(os.path.dirname(sys.argv[0]), 'cache', 'playlist.m3u8'), 'w')
f.close()
if strm == 'hls':
# Used to support HLS HTTPS urllib.request
template = 'https://{0}.smoothstreams.tv:443/{1}/ch{2}q{3}.stream/chunks'
file = file.replace('chunks', template.format(SRVR, SITE, sanitized_channel, qual))
with open(os.path.join(os.path.dirname(sys.argv[0]), 'cache', 'playlist.m3u8'), 'r+') as f:
f.write(file)
return file
else:
# not used currently
template = 'http://{0}.smoothstreams.tv:9100/{1}/ch{2}q{3}.stream/chunks'
file = '#EXTM3U\n#EXTINF:' + file[43:110] + "\n" + rtmp_url
# with open(os.path.join(os.path.dirname(sys.argv[0]), 'cache', 'playlist.m3u8'), 'r+') as f:
# f.write(file)
return rtmp_url
def create_channel_file(url):
strm = 'hls'
if url.startswith('rtmp'):
strm = 'rtmp'
file = urllib.request.urlopen(url, timeout=2).read().decode("utf-8")
if not os.path.isfile(os.path.join(os.path.dirname(sys.argv[0]), 'cache', 'playlist.m3u8')):
f = open(os.path.join(os.path.dirname(sys.argv[0]), 'cache', 'playlist.m3u8'), 'w')
f.close()
if strm == 'hls':
# Used to support HLS HTTPS urllib.request
# https://dnaw1.smoothstreams.tv:443/viewmmasr/ch69q2.stream/playlist.m3u8?wmsAuthSign=c2VydmVyX3RpbWU9OS82LzIwMTggOToxOTowMCBQTSZoYXNoX3ZhbHVlPTZ4R0QzNlhNMW5OTTgzaXBseXpsY2c9PSZ2YWxpZG1pbnV0ZXM9MjQwJmlkPXZpZXdtbWFzci0yNDI2NjY =
template = find_between(url, '',
'playlist') + "chunks" # 'https://{0}.smoothstreams.tv:443/{1}/ch{2}q{3}.stream/chunks'
file = file.replace('chunks', template)
with open(os.path.join(os.path.dirname(sys.argv[0]), 'cache', 'playlist.m3u8'), 'r+') as f:
f.write(file)
return file
else:
# not used currently
template = 'http://{0}.smoothstreams.tv:9100/{1}/ch{2}q{3}.stream/chunks'
file = '#EXTM3U\n#EXTINF:' + file[43:110] + "\n" + rtmp_url
# with open(os.path.join(os.path.dirname(sys.argv[0]), 'cache', 'playlist.m3u8'), 'r+') as f:
# f.write(file)
return rtmp_url
def checkChannelURL(url):
try:
session = requests.Session()
code = session.get(url)
# code = urllib.request.urlopen(url, timeout=10).getcode()
if code.status_code != 200:
logger.debug("Exception on url %s with code %s" % (url, code.status_code))
return False
return True
except timeout:
logger.debug("Timeout on url %s" % url)
return False
except:
logger.debug("Exception on url %s" % url)
return False
def fixURL(strm, ch, qual, hash):
template = '{0}://{1}.smoothstreams.tv:{2}/{3}/ch{4}q{5}.stream{6}?wmsAuthSign={7}'
urlformatted = template.format('https' if strm == 'hls' else 'rtmp', 'SRVR', '443' if strm == 'hls' else '3625',
SITE, "{:02}".format(int(ch)), 'QUAL', '/playlist.m3u8' if strm == 'hls' else '',
hash)
if checkChannelURL(urlformatted.replace('SRVR', SRVR).replace('QUAL', str(1))):
return urlformatted.replace('SRVR', SRVR).replace('QUAL', str(1))
# Check spare
if checkChannelURL(urlformatted.replace('SRVR', SRVR_SPARE).replace('QUAL', str(qual))):
return urlformatted.replace('SRVR', SRVR_SPARE).replace('QUAL', str(qual))
else:
# Check other qualities
for q in range(1, 4):
if checkChannelURL(urlformatted.replace('SRVR', SRVR).replace('QUAL', str(q))):
return urlformatted.replace('SRVR', SRVR).replace('QUAL', str(q))
elif checkChannelURL(urlformatted.replace('SRVR', SRVR_SPARE).replace('QUAL', str(q))):
return urlformatted.replace('SRVR', SRVR_SPARE).replace('QUAL', str(q))
# oh boy we're in trouble now
return findChannelURL(input_url=urlformatted, qual=qual)
def createURL(chan, qual, strm, token):
chan = int(chan)
qualOptions = {}
chanData = CHANAPI[str(chan)]
for i in chanData:
if i['id'] == '3':
qualOptions['720'] = 'q' + i['stream']
elif i['id'] == '4':
qualOptions['540'] = 'q' + i['stream']
elif i['id'] == '5':
qualOptions['360'] = 'q' + i['stream']
if int(qual) == 1:
quality = "720" # HD - 2800k
elif int(qual) == 2:
quality = "540" # LD - 1250k
elif int(qual) == 3:
quality = "360" # Mobile - 400k ( Not in settings)
else:
quality = "720"
sanitizedQuality = 'q1'
if quality in qualOptions:
sanitizedQuality = qualOptions[quality]
sanitized_channel = "{:02.0f}".format(int(chan))
URLBASE = '{0}://{1}.smoothstreams.tv:{2}/{3}/ch{4}{5}{6}?wmsAuthSign={7}'
url = URLBASE.format(STREAM_INFO[strm]['domain'],
SRVR,
STREAM_INFO[strm]['port'],
SITE,
sanitized_channel,
sanitizedQuality if STREAM_INFO[strm]['quality'] == 'standard' else '.smil',
STREAM_INFO[strm]['playlist'],
token['hash'])
return url
############################################################
# m3u8 merger
############################################################
def obtain_m3u8():
formatted_m3u8 = ''
url = EXTM3URL
name = EXTM3UNAME
file = EXTM3UFILE
if url != '':
logger.debug("extra m3u8 url")
inputm3u8 = urllib.request.urlopen(url).read().decode('utf-8')
inputm3u8 = inputm3u8.split("\n")[1:]
elif file != '':
logger.debug("extra m3u8 file")
f = open(file, 'r')
inputm3u8 = f.readlines()
inputm3u8 = inputm3u8[1:]
inputm3u8 = [x.strip("\n") for x in inputm3u8]
else:
logger.debug("extra m3u8 nothing")
return formatted_m3u8
for i in range(len(inputm3u8)):
if inputm3u8[i] != "" or inputm3u8[i] != "\n":
try:
if inputm3u8[i].startswith("#"):
grouper = inputm3u8[i]
grouper = grouper.split(',')
grouper = grouper[0] + ' group-title="%s"' % (name) + "," + grouper[1]
if i != 0:
formatted_m3u8 += "\n"
formatted_m3u8 += grouper
else:
formatted_m3u8 += "\n" + inputm3u8[i]
except:
logger.debug("skipped:", inputm3u8[i])
return formatted_m3u8
def obtain_epg():
if os.path.isfile(os.path.join(os.path.dirname(sys.argv[0]), 'cache', 'combined.xml')):
existing = os.path.join(os.path.dirname(sys.argv[0]), 'cache', 'combined.xml')
cur_utc_hr = datetime.utcnow().replace(microsecond=0, second=0, minute=0).hour
target_utc_hr = (cur_utc_hr // 3) * 3
target_utc_datetime = datetime.utcnow().replace(microsecond=0, second=0, minute=0, hour=target_utc_hr)
logger.debug("utc time is: %s, utc target time is: %s, file time is: %s" % (
datetime.utcnow(), target_utc_datetime, datetime.utcfromtimestamp(os.stat(existing).st_mtime)))
if os.path.isfile(existing) and os.stat(existing).st_mtime > target_utc_datetime.timestamp():
logger.debug("Skipping download of epg")
return
# clear epg file
f = open('./cache/combined.xml', 'w')
f.write('<?xml version="1.0" encoding="UTF-8"?>'.rstrip('\r\n'))
f.write('''<tv></tv>''')
f.close()
list_of_xmltv = [EXTXMLURL]
for i in list_of_xmltv:
if i != '' and i != 'www.testurl.com/epg.xml':
xmltv_merger(i)
def xmltv_merger(xml_url):
response = requests.get(xml_url)
if response.history:
logger.debug("Request was redirected")
for resp in response.history:
logger.debug("%s %s" % (resp.status_code, resp.url))
logger.debug("Final destination: %s %s" % (response.status_code, response.url))
xml_url = response.url
else:
logger.debug("Request was not redirected")
if xml_url.endswith('.gz'):
urllib.request.urlretrieve(xml_url, './cache/raw.xml.gz')
opened = gzip.open('./cache/raw.xml.gz')
else:
urllib.request.urlretrieve(xml_url, './cache/raw.xml')
opened = open('./cache/raw.xml', encoding="UTF-8")
tree = ET.parse('./cache/epg.xml')
treeroot = tree.getroot()
try:
source = ET.parse(opened)
except:
# Try file as gzip instead
urllib.request.urlretrieve(xml_url, './cache/raw.xml.gz')
opened = gzip.open('./cache/raw.xml.gz')
source = ET.parse(opened)
for channel in source.iter('channel'):
treeroot.append(channel)
for programme in source.iter('programme'):
treeroot.append(programme)
tree.write('./cache/combined.xml')
with open('./cache/combined.xml', 'r+') as f:
content = f.read()
f.seek(0, 0)
f.write('<?xml version="1.0" encoding="UTF-8"?>'.rstrip('\r\n') + content)
return
############################################################
# TVHeadend
############################################################
def build_tvh_playlist():
global chan_map
# build playlist using the data we have
new_playlist = "#EXTM3U\n"
for pos in range(1, len(chan_map) + 1):
try:
# build channel url
template = "{0}/{1}/auto/v{2}"
channel_url = template.format(SERVER_HOST, SERVER_PATH, chan_map[pos].channum)
name = str(pos) + " " + chan_map[pos].channame
# build playlist entry
new_playlist += '#EXTINF:-1 tvg-id="%s" tvh-chnum="%s" tvg-name="%s" tvg-logo="%s/%s/%s.png" channel-id="%s",%s\n' % (
chan_map[pos].channum, chan_map[pos].channum, name, SERVER_HOST, SERVER_PATH, chan_map[pos].channum,
chan_map[pos].channum,
name)
new_playlist += '%s\n' % channel_url
except:
logger.exception("Exception while updating playlist: ")
logger.info("Built TVH playlist")
return new_playlist
def get_tvh_channels():
url = 'HTTP://%s:9981/api/channel/grid?start=0&limit=999999' % TVHURL
try:
r = requests.get(url, auth=requests.auth.HTTPBasicAuth(TVHUSER, TVHPASS)).text
data = json.loads(r)
return (data['entries'])
except:
print('An error occured')
############################################################
# PLEX Live
############################################################
def discover():
discoverData = {
'FriendlyName': 'SSTVProxy',
'Manufacturer': 'Silicondust',
'ModelNumber': 'HDTC-2US',
'FirmwareName': 'hdhomeruntc_atsc',
'TunerCount': 6,
'FirmwareVersion': '20150826',
'DeviceID': '12345678',
'DeviceAuth': 'test1234',
'BaseURL': SERVER_HOST + "/" + SERVER_PATH,
'LineupURL': '%s/lineup.json' % urljoin(SERVER_HOST, SERVER_PATH)
}
return jsonify(discoverData)
def tvh_discover():
tvhdiscoverData = {
'FriendlyName': 'SSTVProxy',
'Manufacturer': 'Silicondust',
'ModelNumber': 'HDTC-2US',
'FirmwareName': 'hdhomeruntc_atsc',
'TunerCount': 6,
'FirmwareVersion': '20150826',
'DeviceID': '12345678',
'DeviceAuth': 'test1234',
'BaseURL': SERVER_HOST + "/tvh",
'LineupURL': '%s/lineup.json' % (SERVER_HOST + "/tvh")
}
return jsonify(tvhdiscoverData)
def status():
return jsonify({
'ScanInProgress': 0,
'ScanPossible': 1,
'Source': "Cable",
'SourceList': ['Cable']
})
def m3u8_plex(lineup, inputm3u8):
for i in range(len(inputm3u8)):
if inputm3u8[i] != "" or inputm3u8[i] != "\n":
try:
if inputm3u8[i].startswith("#"):
grouper = inputm3u8[i]
grouper = grouper.split(',')
name = grouper[1]
lineup.append({'GuideNumber': str(len(lineup) + 1),
'GuideName': name,
'URL': 'empty'
})
elif inputm3u8[i].startswith("rtmp") or inputm3u8[i].startswith("http"):
template = "{0}/{1}/auto/v{2}?url={3}"
url = template.format(SERVER_HOST, SERVER_PATH, str(len(lineup)), inputm3u8[i])
lineup[-1]['URL'] = url
except:
logger.debug("skipped:", inputm3u8[i])
return lineup
def createLineup(chan_map):
lineup = []
for c in range(1, len(chan_map) + 1):
template = "{0}/{1}/auto/v{2}"
url = template.format(SERVER_HOST, SERVER_PATH, chan_map[c].channum)
lineup.append({'GuideNumber': str(chan_map[c].channum),
'GuideName': chan_map[c].channame,
'URL': url
})
formatted_m3u8 = ''
if EXTM3URL != '':
logger.debug("extra m3u8 url")
inputm3u8 = urllib.request.urlopen(EXTM3URL).read().decode('utf-8')
inputm3u8 = inputm3u8.split("\n")[1:]
return jsonify(m3u8_plex(lineup, inputm3u8))
elif EXTM3UFILE != '':
logger.debug("extra m3u8 file")
f = open(EXTM3UFILE, 'r')
inputm3u8 = f.readlines()
inputm3u8 = inputm3u8[1:]
inputm3u8 = [x.strip("\n") for x in inputm3u8]
return jsonify(m3u8_plex(lineup, inputm3u8))
return jsonify(lineup)
def tvh_lineup():
lineup = []
for c in get_tvh_channels():
if c['enabled']:
url = 'http://%s:%s@%s:9981/stream/channel/%s?profile=%s&weight=%s' % (
TVHUSER, TVHPASS, TVHURL, c['uuid'], tvhstreamProfile, int(tvhWeight))
lineup.append({'GuideNumber': str(c['number']),
'GuideName': c['name'],
'URL': url
})
return jsonify(lineup)
def lineup_post():
return ''
def device():
discoverData = {
'FriendlyName': 'SSTVProxy',
'Manufacturer': 'Silicondust',
'ModelNumber': 'HDTC-2US',
'FirmwareName': 'hdhomeruntc_atsc',
'TunerCount': 6,
'FirmwareVersion': '20150826',
'DeviceID': '12345678',
'DeviceAuth': 'test1234',
'BaseURL': SERVER_HOST + "/" + SERVER_PATH,
'LineupURL': '%s/lineup.json' % urljoin(SERVER_HOST, SERVER_PATH)
}
return render_template('device.xml', data=discoverData), {'Content-Type': 'application/xml'}
def tvh_device():
tvhdiscoverData = {
'FriendlyName': 'SSTVProxy',
'Manufacturer': 'Silicondust',
'ModelNumber': 'HDTC-2US',
'FirmwareName': 'hdhomeruntc_atsc',
'TunerCount': 6,
'FirmwareVersion': '20150826',
'DeviceID': '12345678',
'DeviceAuth': 'test1234',
'BaseURL': SERVER_HOST + "/tvh",
'LineupURL': '%s/lineup.json' % (SERVER_HOST + "/tvh")
}
return render_template('device.xml', data=tvhdiscoverData), {'Content-Type': 'application/xml'}
def ffmpegPipe(url):
logger.debug("starting generate function")
cmdline = list()
cmdline.append(FFMPEGLOC)
cmdline.append("-i")
cmdline.append(url)
cmdline.append("-vcodec")
cmdline.append("copy")
cmdline.append("-acodec")
cmdline.append("copy")
cmdline.append("-f")
cmdline.append("mpegts")
cmdline.append("pipe:1")
logger.debug(cmdline)
FNULL = open(os.devnull, 'w')
proc = subprocess.Popen(cmdline, stdout=subprocess.PIPE, stderr=FNULL)
logger.debug("pipe started")
try:
f = proc.stdout
byte = f.read(512)
while byte:
yield byte
byte = f.read(512)
finally:
proc.kill()
############################################################
# Kodi
############################################################
def build_kodi_playlist():
# kodi playlist contains two copies of channels, first is dynmaic HLS and the second is static rtmp
global chan_map
# build playlist using the data we have
new_playlist = "#EXTM3U x-tvg-url='%s/epg.xml'\n" % urljoin(SERVER_HOST, SERVER_PATH)
for pos in range(1, len(chan_map) + 1):
try:
# build channel url
url = "{0}/playlist.m3u8?ch={1}&strm={2}&qual={3}"
rtmpTemplate = 'rtmp://{0}.smoothstreams.tv:3625/{1}/ch{2}q{3}.stream?wmsAuthSign={4}'
urlformatted = url.format(SERVER_PATH, chan_map[pos].channum, 'hls', QUAL)
channel_url = urljoin(SERVER_HOST, urlformatted)
# build playlist entry
new_playlist += '#EXTINF:-1 tvg-id="%s" tvg-name="%s" tvg-logo="%s/%s/%s.png" channel-id="%s" group-title="Dynamic",%s\n' % (
chan_map[pos].channum, chan_map[pos].channame, SERVER_HOST, SERVER_PATH, chan_map[pos].channum,
chan_map[pos].channum,
chan_map[pos].channame)
new_playlist += '%s\n' % channel_url
new_playlist += '#EXTINF:-1 tvg-id="%s" tvg-name="%s" tvg-logo="%s/%s/%s.png" channel-id="%s" group-title="Static RTMP",%s\n' % (
chan_map[pos].channum, chan_map[pos].channame, SERVER_HOST, SERVER_PATH, chan_map[pos].channum,
chan_map[pos].channum,
chan_map[pos].channame)
new_playlist += '%s\n' % createURL(pos, QUAL, 'rtmp',
token) # rtmpTemplate.format(SRVR, SITE, "{:02}".format(pos), QUAL if pos <= QUALLIMIT else '1',token['hash'])
prog = getProgram(pos)
if prog.title != 'none':
new_playlist += '#EXTINF:-1 tvg-id="%s" tvg-name="%s" tvg-logo="%s/%s/%s.png" channel-id="%s" group-title="LIVE",%s\n' % (
chan_map[pos].channum, chan_map[pos].channame, SERVER_HOST, SERVER_PATH, chan_map[pos].channum,
chan_map[pos].channum,
prog.title)
new_playlist += '%s\n' % channel_url
except:
logger.exception("Exception while updating kodi playlist on channel #%s." % pos)
new_playlist += '#EXTINF:-1 tvg-id="static_refresh" tvg-name="Static Refresh" tvg-logo="%s/%s/empty.png" channel-id="0" group-title="Static RTMP",Static Refresh\n' % (
SERVER_HOST, SERVER_PATH)
new_playlist += '%s/%s/refresh.m3u8\n' % (SERVER_HOST, SERVER_PATH)
logger.info("Built Kodi playlist")
# if ADDONPATH and os.path.isdir(ADDONPATH):
# #lazy install, low priority tbh
# tree = ET.parse(os.path.join(ADDONPATH, 'settings.xml'))
# root = tree.getroot()
# for child in root:
# if child.attrib['id'] == 'epgUrl':
# child.attrib['value'] = '%s/%s/epg.xml' % (SERVER_HOST, SERVER_PATH)
# elif child.attrib['id'] == 'm3uUrl':
# child.attrib['value'] = '%s/%s/kodi.m3u8' % (SERVER_HOST, SERVER_PATH)
# elif child.attrib['id'] == 'epgPathType':
# child.attrib['value'] = '1'
# elif child.attrib['id'] == 'm3uPathType':
# child.attrib['value'] = '1'
# tree.write(os.path.join(ADDONPATH, 'settings.xml'))
return new_playlist
def rescan_channels():
credentials = str.encode(KODIUSER + ':' + KODIPASS)
encoded_credentials = base64.b64encode(credentials)
authorization = b'Basic ' + encoded_credentials
apiheaders = {'Content-Type': 'application/json', 'Authorization': authorization}
apidata = {"jsonrpc": "2.0", "method": "Addons.SetAddonEnabled",
"params": {"addonid": "pvr.iptvsimple", "enabled": "toggle"}, "id": 1}
apiurl = 'http://%s:%s/jsonrpc' % (request.environ.get('REMOTE_ADDR'), KODIPORT)
json_data = json.dumps(apidata)
post_data = json_data.encode('utf-8')
apirequest = urllib.request.Request(apiurl, post_data, apiheaders)
# has to happen twice to toggle off then back on
result = urllib.request.urlopen(apirequest)
result = urllib.request.urlopen(apirequest)
logger.info("Forcing Kodi to rescan, result:%s " % result.read())
############################################################
# Html
############################################################
# Change this to change the style of the web page generated
style = """
<style type="text/css">
body { background: white url("https://guide.smoothstreams.tv/assets/images/channels/150.png") no-repeat fixed center center; background-size: 500px 500px; color: black; }
h1 { color: white; background-color: black; padding: 0.5ex }
h2 { color: white; background-color: black; padding: 0.3ex }
.container {display: table; width: 100%;}
.left-half {position: absolute; left: 0px; width: 50%;}
.right-half {position: absolute; right: 0px; width: 50%;}
</style>
"""
def create_menu():
footer = '<p>Donations: PayPal to [email protected] or BTC - 19qvdk7JYgFruie73jE4VvW7ZJBv8uGtFb</p>'
with open("./cache/settings.html", "w") as html:
html.write("""<html>
<head>
<meta charset="UTF-8">
%s
<title>YAP</title>
</head>
<body>\n""" % (style,))
html.write('<section class="container"><div class="left-half">')
html.write("<h1>YAP Settings</h1>")
template = "<a href='{1}/{2}/{0}.html'>{3}</a>"
html.write(
"<p>" + template.format("settings", SERVER_HOST, SERVER_PATH, "Options") + " " + template.format("howto",
SERVER_HOST,
SERVER_PATH,
"Instructions") + " " + template.format(
"channels", SERVER_HOST, SERVER_PATH, "Channels List") + " " + template.format("adv_settings",
SERVER_HOST, SERVER_PATH,
"Advanced Settings") + " " + template.format("paths",
SERVER_HOST, SERVER_PATH,
"Proxy Paths") + "</p>")
html.write('<form action="%s/%s/handle_data" method="post">' % (SERVER_HOST, SERVER_PATH))
channelmap = {}
chanindex = 0
list = ["Username", "Password", "Quality", "Stream", "Server", "Service", "IP", "Port",
"ExternalIP", "ExternalPort"]
html.write('<table width="300" border="2">')
for setting in list:
if setting.lower() == 'service':
html.write('<tr><td>Service:</td><td><select name="Service" size="1">')
for option in providerList:
html.write('<option value="%s"%s>%s</option>' % (
option[0], ' selected' if SITE == option[1] else "", option[0]))
html.write('</select></td></tr>')
elif setting.lower() == 'server':
html.write('<tr><td>Server:</td><td><select name="Server" size="1">')
for option in serverList:
html.write('<option value="%s"%s>%s</option>' % (
option[0], ' selected' if SRVR == option[1] else "", option[0]))
html.write('</select></td></tr>')
elif setting.lower() == 'stream':
html.write('<tr><td>Stream:</td><td><select name="Stream" size="1">')
for option in streamtype:
html.write(
'<option value="%s"%s>%s</option>' % (option, ' selected' if STRM == option else "", option))
html.write('</select></td></tr>')
elif setting.lower() == 'quality':
html.write('<tr><td>Quality:</td><td><select name="Quality" size="1">')
for option in qualityList:
html.write('<option value="%s"%s>%s</option>' % (
option[0], ' selected' if QUAL == option[1] else "", option[0]))
html.write('</select></td></tr>')
elif setting.lower() == 'password':
html.write('<tr><td>%s:</td><td><input name="%s" type="Password" value="%s"></td></tr>' % (
setting, setting, PASS))
else:
val = "Unknown"
if setting == "Username":
val = USER
elif setting == "IP":
val = LISTEN_IP
elif setting == "Port":
val = LISTEN_PORT
elif setting == "ExternalIP":
val = EXTIP
elif setting == "ExternalPort":
val = EXTPORT
html.write(
'<tr><td>%s:</td><td><input name="%s" type="text" value="%s"></td></tr>' % (setting, setting, val))
html.write('</table>')
html.write('<input type="submit" value="Submit">')
html.write('</form>')
html.write("<p>You are running version (%s %s), the latest is %s</p>" % (type, __version__, latest_ver))
html.write("</br><p>Restarts can take a while, it is not immediate.</p>")
html.write('<form action="%s/%s/handle_data" method="post">' % (SERVER_HOST, SERVER_PATH))
html.write('<input type="hidden" name="restart" value="1">')
html.write('<input type="submit" value="Restart">')
html.write('</form>')
html.write('<form action="%s/%s/handle_data" method="post">' % (SERVER_HOST, SERVER_PATH))
html.write('<input type="hidden" name="restart" value="2">')
html.write('<input type="submit" value="Update + Restart">')
html.write('</form>')
html.write('<form action="%s/%s/handle_data" method="post">' % (SERVER_HOST, SERVER_PATH))
html.write('<input type="hidden" name="restart" value="3">')
devname = latestfile.replace('master', 'dev')
html.write('<input type="submit" value="Update(Dev Branch) + Restart">')
html.write('</form>')
html.write("<p><a href='%s'>Manual Download Master link</a></p>" % latestfile)
html.write("<p><a href='%s'>Manual Download Dev link</a></p>" % devname)
# html.write('<p> </p>')
# html.write('<p> </p>')
html.write('<p> </p>')
html.write('<p> </p>')
html.write(footer)
html.write('</div><div class="right-half"><h1>YAP Outputs</h1>')
html.write("<table><tr><td rowspan='2'>Standard Outputs</td><td>m3u8 - %s/playlist.m3u8</td></tr>" % urljoin(
SERVER_HOST, SERVER_PATH))
html.write("<tr><td>EPG - %s/epg.xml</td></tr>" % urljoin(SERVER_HOST, SERVER_PATH))
html.write("<tr><td> </td><td> </td></tr>")
html.write(
"<tr><td>Sports Playlist</td><td>%s/sports.m3u8</td></tr>" % urljoin(SERVER_HOST, SERVER_PATH))
html.write(
"<tr><td>Sports EPG (Alternative)</td><td>%s/sports.xml</td></tr>" % urljoin(SERVER_HOST, SERVER_PATH))
html.write("<tr><td> </td><td> </td></tr>")
html.write(
"<tr><td>Kodi RTMP supported</td><td>m3u8 - %s/kodi.m3u8</td></tr>" % urljoin(SERVER_HOST, SERVER_PATH))
html.write("<tr><td> </td><td> </td></tr>")
html.write("<tr><td rowspan='2'>Plex Live<sup>1</sup></td><td>Tuner - %s</td></tr>" % urljoin(SERVER_HOST,
SERVER_PATH))
html.write("<tr><td>EPG - %s/epg.xml</td></tr>" % urljoin(SERVER_HOST, SERVER_PATH))
html.write("<tr><td> </td><td> </td></tr>")
html.write("<tr><td>TVHeadend<sup>1</sup></td><td>%s/tvh.m3u8</td></tr>" % urljoin(SERVER_HOST, SERVER_PATH))
html.write("<tr><td> </td><td> </td></tr>")
html.write(
"<tr><td rowspan='2'>Remote Internet access<sup>2</sup></td><td>m3u8 - %s/external.m3u8</td></tr>" % urljoin(
EXT_HOST, SERVER_PATH))
html.write("<tr><td>EPG - %s/epg.xml</td></tr>" % urljoin(EXT_HOST, SERVER_PATH))
html.write("<tr><td> </td><td> </td></tr>")
html.write(
"<tr><td rowspan='2'>Combined Outputs<sup>2</sup></td><td>m3u8 - %s/combined.m3u8</td></tr>" % urljoin(
SERVER_HOST, SERVER_PATH))
html.write("<tr><td>epg - %s/combined.xml</td></tr>" % urljoin(SERVER_HOST, SERVER_PATH))
html.write("<tr><td> </td><td> </td></tr>")
html.write(
"<tr><td>Static Playlist</td><td>m3u8 - %s/static.m3u8</td></tr>" % urljoin(SERVER_HOST, SERVER_PATH))
html.write("<tr><td> </td><td> </td></tr>")
html.write(
"<tr><td rowspan='2'>TVHProxy<sup>3</sup></td><td>Tuner - %s</td></tr>" % urljoin(SERVER_HOST, 'tvh'))
html.write("<tr><td>EPG - http://%s:9981/xmltv/channels</td></tr>" % TVHURL)
html.write("<tr><td> </td><td> </td></tr>")
html.write("<tr><td>Test Playlist for troubleshooting</td><td>%s/test.m3u8</td></tr>" % urljoin(SERVER_HOST,
SERVER_PATH))
html.write("<tr><td> </td><td> </td></tr>")
html.write(
"<tr><td>Dynamic xspf, includes currently showing programs</td><td>%s/playlist.xspf</td></tr>" % urljoin(
SERVER_HOST,
SERVER_PATH))
html.write("<tr><td>Static xspf</td><td>%s/static.xspf</td></tr>" % urljoin(SERVER_HOST,
SERVER_PATH))
html.write("<tr><td> </td><td> </td></tr>")
html.write("<tr><td>Note 1:</td><td>Requires FFMPEG installation and setup</td></tr>")
html.write("<tr><td>Note 2:</td><td>Requires External IP and port in advancedsettings</td></tr>")
html.write("<tr><td>Note 3:</td><td>Requires TVH proxy setup in advancedsettings</td></tr></table>")
html.write("</div></section></body></html>\n")
with open("./cache/adv_settings.html", "w") as html:
html.write("""<html>
<head>
<meta charset="UTF-8">
%s
<title>YAP</title>
</head>
<body>\n""" % (style,))
html.write('<section class="container"><div class="left-half">')
html.write("<h1>YAP Settings</h1>")
template = "<a href='{1}/{2}/{0}.html'>{3}</a>"
html.write(
"<p>" + template.format("settings", SERVER_HOST, SERVER_PATH, "Options") + " " + template.format("howto",
SERVER_HOST,
SERVER_PATH,
"Instructions") + " " + template.format(
"channels", SERVER_HOST, SERVER_PATH, "Channels List") + " " + template.format("adv_settings",
SERVER_HOST, SERVER_PATH,
"Advanced Settings") + " " + template.format("paths",
SERVER_HOST, SERVER_PATH,
"Proxy Paths") + "</p>")
html.write('<form action="%s/%s/handle_data" method="post">' % (SERVER_HOST, SERVER_PATH))
channelmap = {}
chanindex = 0
adv_set = ["kodiuser", "kodipass", "ffmpegloc", "kodiport", "extram3u8url", "extram3u8name", "extram3u8file",
"extraxmlurl", "tvhredirect", "tvhaddress", "tvhuser", "tvhpass", "overridexml", "checkchannel",
"pipe"]
html.write('<table width="300" border="2">')
for setting in adv_set:
if setting.lower() == 'kodipass':
html.write('<tr><td>%s:</td><td><input name="%s" type="Password" value="%s"></td></tr>' % (
setting, setting, KODIPASS))
elif setting == "checkchannel":
html.write(
'<tr><td>%s:</td><td><select name="%s" size="1"><option value="True" %s>Enabled</option><option value="False" %s>Disabled</option></select></td></tr>' % (
setting, setting, ' selected' if CHECK_CHANNEL == True else "",
' selected' if CHECK_CHANNEL == False else ""))
elif setting == "pipe":
html.write(
'<tr><td>%s:</td><td><select name="%s" size="1"><option value="True" %s>Enabled</option><option value="False" %s>Disabled</option></select></td></tr>' % (
setting, setting, ' selected' if PIPE == True else "", ' selected' if PIPE == False else ""))
else:
val = "Unknown"
if setting == "kodiuser":
val = KODIUSER
elif setting == "kodiport":
val = KODIPORT
elif setting == "ffmpegloc":
val = FFMPEGLOC
elif setting == "extram3u8url":
val = EXTM3URL
elif setting == "extram3u8file":
val = EXTM3UFILE
elif setting == "extram3u8name":
val = EXTM3UNAME
elif setting == "extraxmlurl":
val = EXTXMLURL
elif setting == "tvhredirect":
val = TVHREDIRECT
elif setting == "tvhaddress":
val = TVHURL
elif setting == "tvhuser":
val = TVHUSER
elif setting == "tvhpass":
val = TVHPASS
elif setting == "overridexml":
val = OVRXML
if not (setting == "ffmpegloc" and not platform.system() == 'Windows'):
html.write('<tr><td>%s:</td><td><input name="%s" type="text" value="%s"></td></tr>' % (
setting, setting, val))
html.write('</table>')
html.write('<input type="submit" value="Submit">')
html.write('</form>')
html.write("<p>You are running version (%s %s), the latest is %s</p>" % (type, __version__, latest_ver))
html.write("</br><p>Restarts can take a while, it is not immediate.</p>")
html.write('<form action="%s/%s/handle_data" method="post">' % (SERVER_HOST, SERVER_PATH))
html.write('<input type="hidden" name="restart" value="1">')
html.write('<input type="submit" value="Restart">')
html.write('</form>')
html.write('<form action="%s/%s/handle_data" method="post">' % (SERVER_HOST, SERVER_PATH))
html.write('<input type="hidden" name="restart" value="2">')
html.write('<input type="submit" value="Update + Restart">')
html.write('</form>')
html.write('<form action="%s/%s/handle_data" method="post">' % (SERVER_HOST, SERVER_PATH))
html.write('<input type="hidden" name="restart" value="3">')
html.write('<input type="submit" value="Update(Dev Branch) + Restart">')
html.write('</form>')
html.write('<p> </p>')
html.write('<p> </p>')
html.write('<p> </p>')
html.write('<p> </p>')
html.write(footer)
html.write("</div></section></body></html>\n")
with open("./cache/channels.html", "w") as html:
global chan_map
html.write("""<html><head><title>YAP</title><meta charset="UTF-8">%s</head><body>\n""" % (style,))
html.write("<h1>Channel List and Upcoming Shows</h1>")
template = "<a href='{1}/{2}/{0}.html'>{3}</a>"
html.write(
"<p>" + template.format("settings", SERVER_HOST, SERVER_PATH, "Options") + " " + template.format("howto",
SERVER_HOST,
SERVER_PATH,
"Instructions") + " " + template.format(
"channels", SERVER_HOST, SERVER_PATH, "Channels List") + " " + template.format("adv_settings",
SERVER_HOST, SERVER_PATH,
"Advanced Settings") + " " + template.format("paths",
SERVER_HOST, SERVER_PATH,
"Proxy Paths") + "</p>")
html.write("<a href='https://guide.smoothstreams.tv/'>Click here to go to the SmoothStreams Official Guide</a>")
html.write('<section class="container"><div class="left-half"><table width="300" border="1">')
template = "<td>{0}</td><td><a href='{2}/{3}/playlist.m3u8?ch={0}'><img src='{2}/{3}/{0}.png'></a></td></td>"
for i in chan_map:
if i % 5 == 1:
html.write("<tr>")
html.write(template.format(chan_map[i].channum, chan_map[i].channame, SERVER_HOST, SERVER_PATH))
if i % 5 == 0:
html.write("</tr>")
html.write("</table>")
html.write("</br>%s</div>" % footer)
html.write('<div class="right-half"><h3>Coming up</h3>')
template = "{0} - <a href='{2}/{3}/playlist.m3u8?ch={0}'>{1}</a></br>"
for i in chan_map:
prog = getProgram(i)
if prog.title != 'none':
try:
html.write(
template.format(chan_map[i].channum, str(prog.title).encode('utf-8'), SERVER_HOST, SERVER_PATH))
except:
logger.exception(prog.title)
html.write("</div></section>")
html.write("</body></html>\n")
with open("./cache/index.html", "w") as html:
html.write("""<html><head><title>YAP</title><meta charset="UTF-8">%s</head><body>\n""" % (style,))
template = "<h2><a href='{1}/{2}/{0}.html'>{3}</a></h2>"
html.write("<h1>Welcome to YAP!</h1>")
html.write(template.format("settings", SERVER_HOST, SERVER_PATH, "Options"))
html.write(template.format("howto", SERVER_HOST, SERVER_PATH, "Instructions"))
html.write(template.format("channels", SERVER_HOST, SERVER_PATH, "Channels List"))
html.write(template.format("adv_settings", SERVER_HOST, SERVER_PATH, "Advanced Settings"))
html.write(template.format("paths", SERVER_HOST, SERVER_PATH, "Proxy Paths"))
html.write(footer)
html.write("</body></html>\n")
with open("./cache/howto.html", "w") as html:
html.write("""<html><head><title>YAP</title><meta charset="UTF-8">%s</head><body>\n""" % (style,))
template = "<a href='{1}/{2}/{0}.html'>{3}</a>"
html.write("<h1>Welcome to YAP!</h1>")
html.write(
"<p>" + template.format("settings", SERVER_HOST, SERVER_PATH, "Options") + " " + template.format("howto",
SERVER_HOST,
SERVER_PATH,
"Instructions") + " " + template.format(
"channels", SERVER_HOST, SERVER_PATH, "Channels List") + " " + template.format("adv_settings",
SERVER_HOST, SERVER_PATH,
"Advanced Settings") + "</p>")
html.write("<h2>Work in progress.</h2>")
html.write("""<h2>Commandline Arguments</h2></br><p>'install' - forces recreation of the install function which creates certain files, such as the tvh internal grabber</br></br>
'headless' - uses command line for initial setup rather than gui</br></br>
'tvh' - each call to a piped channel will return channel 01 which is a 24/7 channel so will always generate a positive result, this allows TVH to create all services</p></br>""")
html.write(
"<h2><a href='https://seo-michael.co.uk/how-to-setup-livetv-pvr-simple-xbmc-kodi/'>Kodi Setup</a></h2>")
html.write("<p>Use this information to populate the settings:</p>")
html.write("<p>m3u8 - %s/kodi.m3u8</p>" % urljoin(SERVER_HOST, SERVER_PATH))
html.write("<p>EPG - %s/epg.xml</p>" % urljoin(SERVER_HOST, SERVER_PATH))
html.write(
'''<p>RTMP is an issue so there's a special playlist for it (kodi.m3u8), it has two of every channel in both rtmp and hls, in kodi Tv use the Left hand menu and select group or filter. Then select dynamic (forced hls) or static rtmp.For static_refresh channel (151) don't use it on the guide page, use it on the channel list page. Otherwise kodi will crash. This will lock kodi for about 20secs but refresh the playlist.</p>''')
html.write("<h2>Ensure you can get YAP working in Kodi or VLC first before attmepting Plex or TVHeadend!</h2>")
html.write("<h2><a href='https://imgur.com/a/OZkN0'>Plex Setup</a></h2>")
html.write("<p></p>")
html.write("<h2>TVHeadend Setup</h2>")
html.write("""<p>
In a nutshell here is how to do it on Ubuntu.</br>Replace USERNAME with your linux user:</br>
<b>1 Download the latest sstvProxy binary (exe) from:</b></br>
http://smoothstreams.tv/board/index.php?topic=1832.0</br>
Save it to:</br>
<blockquote><i>/home/USERNAME/Desktop/sstv</i></blockquote></br>
</br>
<b>2 Delete proxysettings.json</b> (only if you're coming from an older version of sstvproxy)</br>
<blockquote><i>sudo rm /home/USERNAME/Desktop/sstv/proxysettings.json</i></blockquote></br>
</br>
<b>3 Install ffmpeg:</b></br>
<blockquote><i>sudo apt install ffmpeg jq</i></blockquote></br>
</br>
<b>4 Install tvheadend:</b></br>
<blockquote><i>sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 379CE192D401AB61 </i></blockquote></br>
<blockquote><i>echo "deb https://dl.bintray.com/tvheadend/deb xenial release-4.2" | sudo tee -a /etc/apt/sources.list</i></blockquote></br>
<blockquote><i>sudo apt-get update</i></blockquote></br>
<blockquote><i>sudo apt-get install tvheadend</i></blockquote></br>
You will need to enter a username and password to manage tvheadend as part of this install process.</br>
Check for the presence of /usr/bin/tv_find_grabbers If it doesnt exist then run:</br>
<blockquote><i>"apt-get install xmltv-util" </i></blockquote></br>
</br>
<b>5 Run sstvProxy:</b></br>
<blockquote><i>sudo chmod +x /home/USERNAME/Desktop/sstv/sstvProxy </i></blockquote></br>
<blockquote><i>sudo /home/USERNAME/Desktop/sstv/sstvProxy tvh</i></blockquote> <i>note the 'tvh' switch will enable it to scan all 150 channels</i></br>
Go through the setup steps, this will also setup the internal EPG grabber for TVHeadend</br>
</br>
<b>6 Restart TVHeadend:</b></br>
<blockquote><i>systemctl stop tvheadend </i></blockquote></br>
<blockquote><i>systemctl start tvheadend </i></blockquote></br>
</br>
<b>7 Configure TVHeadend:</b></br>
On your Ubuntu server browse <blockquote><i>http://127.0.0.1:9981</i></blockquote></br>
Use the username and password you set in Step 4</br>
</br>
Configuration -> Channel / EPG -> EPG Grabber Modules</br>
On the left side, highlight 'Internal: XMLTV: SmoothstreamsTV'</br>
On the right side, tick 'Enabled'</br>
Click 'Save'</br>
Configuration -> DVB Inputs -> Networks</br>
Click 'Add'</br>
Type = IPTV Automatic Network</br>
Network Name = SmoothstreamsTV</br>
URL = http://127.0.0.1:99/sstv/tvh.m3u8</br>
Maximum # input streams = 3</br>
Click Create</br>
Click Force Scan if it doesn't start scanned for muxes - wait for all the muxes to be scanned - there are 150 channels</br>
Go to the 'Services' tab</br>
Map Services -> Map all services</br>
</br>
Configuration -> Channel / EPG -> EPG Grabber Modules</br>
Click the button labeled 'Re-run Internal EPG Grabbers'</br>
**This will take a while to process** View the log down the bottom of the page. After it has run you should now see the channels in the EPG.</br>
<b>8 Restart sstvProxy:</b></br>
<blockquote><i>sudo /home/USERNAME/Desktop/sstv/sstvProxy</i></blockquote> <i>note no 'tvh' switch this time</i></p>""")
html.write("<h2>Advanced Settings</h2>")
html.write("""<p>
You can have as many or as few as you want and the file itself is optional. If you don't care for the option then don't even include it in the file, just delete it.</br></br>
There now exists an advanced settings example file on git. If this is in the same folder as the proxy it will detect it on launch and parse any settings that are within. </br></br>
Currently the accepted settings are:</br>
Custom ffmpeg locations "ffmpegloc":"C:\\ffmpeg\\bin\\ffmpeg.exe" (note the double slashes)</br>
Custom kodi control username "kodiuser":"string"</br>
Custom kodi control password "kodipass":"string"</br>
</br>
If you want to output a playlist that combines the SSTV channels with another playlist you already have then these options are for you:</br>
A url source for the above "extram3u8url":"url/string"</br>
A group name for the above, in order to filter between them in client "extram3u8name":"string"</br>
A file source for the above, url has priority though "extram3u8file":"path/string"</br>
</br>
If you want to output an EPG that combines the SSTV channels with another EPG you already have then:</br>
A url source for the above "extraxmlurl":"url/string"</br>
</br>
If you wish to use feed YAP into TVH and then TVH into Plex use the below:</br>
TVH url you use "tvhaddress": "127.0.0.1"</br>
username "tvhuser": ""</br>
password "tvhpass": ""</br>
</br>
If you want to override the EPG with your own one then:</br>
A url source for the epg "overridexml":"url/string"</p>""")
html.write(footer)
html.write("</body></html>\n")
with open("./cache/paths.html", "w") as html:
html.write("""<html><head><title>YAP</title><meta charset="UTF-8">%s</head><body>\n""" % (style,))
template = "<a href='{1}/{2}/{0}.html'>{3}</a>"
html.write("<h1>Welcome to YAP!</h1>")
html.write(
"<p>" + template.format("settings", SERVER_HOST, SERVER_PATH, "Options") + " " + template.format("howto",
SERVER_HOST,
SERVER_PATH,
"Instructions") + " " + template.format(
"channels", SERVER_HOST, SERVER_PATH, "Channels List") + " " + template.format("adv_settings",
SERVER_HOST, SERVER_PATH,
"Advanced Settings") + " " + template.format("paths",
SERVER_HOST, SERVER_PATH,
"Proxy Paths") + "</p>")
html.write("<h2>Work in progress.</h2>")
html.write("<h2>Proxy URL Paths</h2>")
html.write("""<p>m3u8 playlists can be called using http://ip:port/sstv/playlist.m3u8 optional arguments include 'strm' to overide defaults</p>
<p>alternatively using http://ip:port/sstv/{strm}.m3u8 strm options are 'hls', 'hlsa', 'rtmp', 'mpegts', 'rtsp', 'dash', 'wss'</p>
<p>a specific mpeg playlsit can also be accesed via http://ip:port/sstv/mpeg.2ts</p>
<p>single channels can be called using http://ip:port/sstv/playlist.m3u8?ch=# or http://ip:port/sstv/ch# optional arguments 'strm','qual' to overide defaults</p>
<p>kodi m3u8 url is http://ip:port/sstv/kodi.m3u8</p>
<p>sports m3u8 url is http://ip:port/sstv/sports.m3u8</p>
<p>EPG url is http://ip:port/sstv/epg.xml</p>
<p>Sports EPG url is http://ip:port/sstv/sports.xml</p>
<p>Plex Live TV url is http://ip:port/sstv</p>
<p>TVHeadend Proxy can be used using http://ip:port/sstv/tvh.m3u8</p>
<p>External m3u8 url is http://externalip:externalport/sstv/external.m3u8</p>
<p>Combined m3u8 url is http://ip:port/sstv/combined.m3u8</p>
<p>Combined EPG url is http://ip:port/sstv/combined.xml</p>
<p>Static m3u8 url is http://ip:port/sstv/static.m3u8 optional arguments include 'strm' to overide defaults</p>
<p>TVH's own EPG url is http://127.0.0.1:9981/xmltv/channels</p>
<p>Static XSPF url is http://ip:port/sstv/static.xspf</p>
<p>Dynamic XSPF url is http://ip:port/sstv/playlist.xspf</p>
<p></p>
<p></p>
<p></p>
<p></p>
<p></p>
<p></p>
<p></p>
<p></p>
<p></p>
""")
html.write(footer)
html.write("</body></html>\n")
def close_menu(restart):
with open("./cache/close.html", "w") as html:
html.write("""<html><head><title>YAP</title><meta charset="UTF-8">%s</head><body>\n""" % (style,))
html.write("<h1>Data Saved</h1>")
if restart:
html.write("<h1>You have change either the IP or Port, please restart this program.</h1>")
else:
html.write("<p>m3u8 url is %s/playlist.m3u8</p>" % urljoin(SERVER_HOST, SERVER_PATH))
html.write("<p>kodi m3u8 url is %s/kodi.m3u8</p>" % urljoin(SERVER_HOST, SERVER_PATH))
html.write("<p>EPG url is %s/epg.xml</p>" % urljoin(SERVER_HOST, SERVER_PATH))
html.write("<p>Sports EPG url is %s/sports.xml</p>" % urljoin(SERVER_HOST, SERVER_PATH))
html.write("<p>Plex Live TV url is %s</p>" % urljoin(SERVER_HOST, SERVER_PATH))
html.write("<p>TVHeadend network url is %s/tvh.m3u8</p>" % urljoin(SERVER_HOST, SERVER_PATH))
html.write("<p>External m3u8 url is %s/external.m3u8</p>" % urljoin(EXT_HOST, SERVER_PATH))
html.write("<p>Combined m3u8 url is %s/combined.m3u8</p>" % urljoin(SERVER_HOST, SERVER_PATH))
html.write("<p>Combined epg url is %s/combined.xml</p>" % urljoin(SERVER_HOST, SERVER_PATH))
html.write("<p>Static m3u8 url is %s/static.m3u8</p>" % urljoin(SERVER_HOST, SERVER_PATH))
if TVHREDIRECT == True:
html.write("<p>TVH's own EPG url is http://%s:9981/xmltv/channels</p>" % TVHURL)
html.write("</body></html>\n")
def restart_program():
os.system('cls' if os.name == 'nt' else 'clear')
args = sys.argv[:]
logger.info("YAP is restarting...")
FULL_PATH = sys.argv[0]
exe = sys.executable
args = [exe, FULL_PATH]
args += sys.argv[1:]
# Separate out logger so we can shutdown logger after
logger.info('Restarting YAP with %s', args)
# os.execv fails with spaced names on Windows
# https://bugs.python.org/issue19066
if os.name == 'nt':
subprocess.Popen(args, cwd=os.getcwd())
else:
os.execv(exe, args)
os._exit(0)
############################################################
# CLIENT <-> SSTV BRIDGE
############################################################
@app.route('/sstv/handle_data', methods=['POST'])
def handle_data():
request_page = request.referrer
config = {}
inc_data = request.form
if 'restart' in inc_data:
if inc_data["restart"] == '3':
logger.info('Updating YAP Dev')
newfilename = ntpath.basename(latestfile)
devname = latestfile.replace('master', 'dev')
urllib.request.urlretrieve(devname, os.path.join(os.path.dirname(sys.argv[0]), newfilename))
elif inc_data["restart"] == '2':
logger.info('Updating YAP')
newfilename = ntpath.basename(latestfile)
urllib.request.urlretrieve(latestfile, os.path.join(os.path.dirname(sys.argv[0]), newfilename))
logger.info('Restarting YAP')
restart_program()
return
if request_page.endswith("adv_settings.html"):
logger.info("Received new adv settings from %s", request.environ.get('REMOTE_ADDR'))
restartrequired = False
with open('./advancedsettings.json', 'w') as fp:
dump(inc_data, fp)
adv_settings()
logger.info("Updated adv Settings file.")
else:
logger.info("Received new settings from %s", request.environ.get('REMOTE_ADDR'))
global playlist, kodiplaylist, QUAL, QUALLIMIT, USER, PASS, SRVR, SITE, STRM, LISTEN_IP, LISTEN_PORT, EXTIP, EXT_HOST, SERVER_HOST, EXTPORT
config["username"] = inc_data['Username']
config["password"] = inc_data['Password']
config["stream"] = inc_data['Stream']
for sub in serverList:
if sub[0] == inc_data['Server']:
config["server"] = sub[1]
for sub in providerList:
if sub[0] == inc_data['Service']:
config["service"] = sub[1]
for sub in qualityList:
if sub[0] == inc_data['Quality']:
config["quality"] = sub[1]
config["ip"] = inc_data['IP']
config["port"] = int(inc_data['Port'])
config["externalip"] = inc_data['ExternalIP']
config["externalport"] = inc_data['ExternalPort']
QUAL = config["quality"]
USER = config["username"]
PASS = config["password"]
SRVR = config["server"]
SITE = config["service"]
STRM = config["stream"]
if LISTEN_IP != config["ip"] or LISTEN_PORT != config["port"]:
restartrequired = True
else:
restartrequired = False
LISTEN_IP = config["ip"]
LISTEN_PORT = config["port"]
EXTIP = config["externalip"]
EXTPORT = config["externalport"]
EXT_HOST = "http://" + EXTIP + ":" + str(EXTPORT)
SERVER_HOST = "http://" + LISTEN_IP + ":" + str(LISTEN_PORT)
with open('./proxysettings.json', 'w') as fp:
dump(config, fp)
logger.info("Updated Settings file.")
check_token()
playlist = build_playlist(SERVER_HOST)
kodiplaylist = build_kodi_playlist()
if restartrequired:
logger.info("You have changed either the IP or Port, please restart this program.")
close_menu(True)
else:
close_menu(False)
return redirect(request_page, code=302)
# return send_from_directory(os.path.join(os.path.dirname(sys.argv[0]), 'cache'), 'close.html')
@app.route('/')
@app.route('/sstv')
def landing_page():
logger.info("Index was requested by %s", request.environ.get('REMOTE_ADDR'))
create_menu()
return send_from_directory(os.path.join(os.path.dirname(sys.argv[0]), 'cache'), 'index.html')
@app.route('/<request_file>')
def index(request_file):
logger.info("%s requested by %s at root" % (request_file, request.environ.get('REMOTE_ADDR')))
if request_file.lower() == 'lineup_status.json':
return status()
elif request_file.lower() == 'discover.json':
return discover()
elif request_file.lower() == 'lineup.json':
return createLineup(chan_map)
elif request_file.lower() == 'lineup.post':
return lineup_post()
# logger.debug(request.headers)
elif request_file.lower() == 'device.xml':
return device()
elif request_file.lower() == 'favicon.ico':
return send_from_directory(os.path.join(os.path.dirname(sys.argv[0]), 'cache'), 'empty.png')
@app.route('/%s/<request_file>' % SERVER_PATH)
def bridge(request_file):
global playlist, token, chan_map, kodiplaylist, tvhplaylist, FALLBACK
check_token()
try:
client = find_client(request.headers['User-Agent'])
logger.debug("Client is %s, user agent is %s" % (client, request.headers['User-Agent']))
except:
logger.debug("No user-agent provided by %s", request.environ.get('REMOTE_ADDR'))
client = 'unk'
if request_file.lower() == ('version'):
resp = {'version': __version__, 'type': type}
return jsonify(resp)
if request_file.lower().endswith('.xspf'):
playlist = build_xspf(SERVER_HOST, request_file)
logger.info("XSPF playlist was requested by %s", request.environ.get('REMOTE_ADDR'))
return Response(playlist, mimetype='application/xspf+xml')
# return epg
if request_file.lower().startswith('epg.'):
logger.info("EPG was requested by %s", request.environ.get('REMOTE_ADDR'))
if not FALLBACK:
dl_epg()
else:
logger.exception("EPG build, EPG download failed. Trying SSTV.")
dl_epg(2)
with open(os.path.join(os.path.dirname(sys.argv[0]), 'cache', 'epg.xml'), 'r+') as f:
content = f.read()
response = Response(content, mimetype='text/xml')
headers = dict(response.headers)
headers.update(
{"Access-Control-Expose-Headers": "Accept-Ranges, Content-Encoding, Content-Length, Content-Range",
"Access-Control-Allow-Origin": "*", "Access-Control-Allow-Headers": "Range",
"Access-Control-Allow-Methods": "GET, POST, OPTIONS, HEAD"})
response.headers = headers
return response
# return sports only epg
if request_file.lower() == 'sports.xml':
logger.info("Sports EPG was requested by %s", request.environ.get('REMOTE_ADDR'))
if not FALLBACK:
dl_epg()
else:
logger.exception("Sports EPG build, EPG download failed. Trying SSTV.")
dl_epg(2)
return send_from_directory(os.path.join(os.path.dirname(sys.argv[0]), 'cache'), 'sports.xml')
# return combined epg
if request_file.lower() == 'combined.xml':
logger.info("Combined EPG was requested by %s", request.environ.get('REMOTE_ADDR'))
if not FALLBACK:
dl_epg()
else:
logger.exception("Combined EPG build, EPG download failed. Trying SSTV.")
dl_epg(2)
obtain_epg()
return send_from_directory(os.path.join(os.path.dirname(sys.argv[0]), 'cache'), 'combined.xml')
# return icons
elif request_file.lower().endswith('.png'):
logger.debug("Icon %s was requested by %s" % (request_file, request.environ.get('REMOTE_ADDR')))
try:
return send_from_directory(os.path.join(os.path.dirname(sys.argv[0]), 'cache'), request_file)
except:
return send_from_directory(os.path.join(os.path.dirname(sys.argv[0]), 'cache'), 'empty.png')
elif request_file.lower() == 'favicon.ico':
return send_from_directory(os.path.join(os.path.dirname(sys.argv[0]), 'cache'), 'empty.png')
# return main menu
elif request_file.lower().startswith('index'):
logger.info("Index was requested by %s", request.environ.get('REMOTE_ADDR'))
create_menu()
return send_from_directory(os.path.join(os.path.dirname(sys.argv[0]), 'cache'), 'index.html')
# return settings menu
elif request_file.lower().startswith('settings'):
logger.info("Settings was requested by %s", request.environ.get('REMOTE_ADDR'))
create_menu()
return send_from_directory(os.path.join(os.path.dirname(sys.argv[0]), 'cache'), 'settings.html')
# return settings menu
elif request_file.lower().startswith('adv_settings'):
logger.info("Adv_Settings was requested by %s", request.environ.get('REMOTE_ADDR'))
create_menu()
return send_from_directory(os.path.join(os.path.dirname(sys.argv[0]), 'cache'), 'adv_settings.html')
# return channels menu
elif request_file.lower().startswith('channels'):
logger.info("Channels was requested by %s", request.environ.get('REMOTE_ADDR'))
create_menu()
return send_from_directory(os.path.join(os.path.dirname(sys.argv[0]), 'cache'), 'channels.html')
# return paths menu
elif request_file.lower().startswith('paths'):
logger.info("Channels was requested by %s", request.environ.get('REMOTE_ADDR'))
create_menu()
return send_from_directory(os.path.join(os.path.dirname(sys.argv[0]), 'cache'), 'paths.html')
# return howto menu
elif request_file.lower().startswith('howto'):
logger.info("Howto was requested by %s", request.environ.get('REMOTE_ADDR'))
create_menu()
return send_from_directory(os.path.join(os.path.dirname(sys.argv[0]), 'cache'), 'howto.html')
# kodi static refresh
elif request_file.lower().startswith('refresh'):
# kodi force rescan 423-434
logger.info("Refresh was requested by %s", request.environ.get('REMOTE_ADDR'))
load_token()
check_token()
rescan_channels()
return send_from_directory(os.path.join(os.path.dirname(sys.argv[0]), 'cache'), 'empty.png')
# returns static playlist
elif request_file.lower().startswith('static'):
if request.args.get('strm'):
strmType = request.args.get('strm')
else:
strmType = STRM
staticplaylist = build_static_playlist(strmType)
logger.info("Static playlist was requested by %s", request.environ.get('REMOTE_ADDR'))
return Response(staticplaylist, mimetype='application/x-mpegURL')
# returns test playlist
elif request_file.lower() == "test.m3u8":
testplaylist = build_test_playlist([SERVER_HOST, EXT_HOST])
logger.info("Test playlist was requested by %s", request.environ.get('REMOTE_ADDR'))
return Response(testplaylist, mimetype='application/x-mpegURL')
elif request_file.lower() == "server.m3u8":
testplaylist = build_server_playlist()
logger.info("Server playlist was requested by %s", request.environ.get('REMOTE_ADDR'))
return Response(testplaylist, mimetype='application/x-mpegURL')
# returns kodi playlist
elif request_file.lower().startswith('kodi'):
kodiplaylist = build_kodi_playlist()
logger.info("Kodi channels playlist was requested by %s", request.environ.get('REMOTE_ADDR'))
return Response(kodiplaylist, mimetype='application/x-mpegURL')
# returns combined playlist
elif request_file.lower() == 'combined.m3u8':
extraplaylist = build_playlist(SERVER_HOST) + obtain_m3u8()
logger.info("Combined channels playlist was requested by %s", request.environ.get('REMOTE_ADDR'))
logger.info("Sending playlist to %s", request.environ.get('REMOTE_ADDR'))
return Response(extraplaylist, mimetype='application/x-mpegURL')
# returns external playlist
elif request_file.lower().startswith('external'):
extplaylist = build_playlist(EXT_HOST)
logger.info("External channels playlist was requested by %s", request.environ.get('REMOTE_ADDR'))
return Response(extplaylist, mimetype='application/x-mpegURL')
# returns tvh playlist
elif request_file.lower().startswith('tvh'):
tvhplaylist = build_tvh_playlist()
logger.info("TVH channels playlist was requested by %s", request.environ.get('REMOTE_ADDR'))
return Response(tvhplaylist, mimetype='application/x-mpegURL')
# returns sports playlist
elif request_file.lower() == ('sports.m3u8'):
sportsplaylist = build_sports_playlist(SERVER_HOST)
logger.info("Sports channels playlist was requested by %s", request.environ.get('REMOTE_ADDR'))
return Response(sportsplaylist, mimetype='application/x-mpegURL')
elif request_file.lower() == 'playlist.m3u8' or request_file.lower().startswith(
'ch') or request_file.lower() == 'mpeg.2ts':
# returning Dynamic channels
if request.args.get('ch') or request_file.lower().startswith('ch'):
if request_file.lower().startswith('ch'):
chan = request_file.lower().replace("ch", "").replace(".m3u8", "")
sanitized_channel = "{:02.0f}".format(int(chan))
else:
chan = request.args.get('ch')
sanitized_channel = ("0%d" % int(request.args.get('ch'))) if int(
request.args.get('ch')) < 10 else request.args.get('ch')
check_token()
logger.info("Channel %s playlist was requested by %s", sanitized_channel,
request.environ.get('REMOTE_ADDR'))
if SITE == 'vaders':
logger.info("Channel %s playlist was requested by %s", sanitized_channel,
request.environ.get('REMOTE_ADDR'))
vaders_url = "http://vapi.vaders.tv/play/{0}.{1}?"
tokenDict = {"username": "vsmystreams_" + USER, "password": PASS}
jsonToken = json.dumps(tokenDict)
tokens = base64.b64encode(jsonToken.encode('utf-8'))
strm = 'ts' if STRM == 'mpegts' else 'm3u8'
if request.args.get('strm'):
strm = request.args.get('strm')
tokens = urllib.parse.urlencode({"token": str(tokens)[1:]})
if int(chan) > 150:
channel = chan
else:
channel = vaders_channels[chan]
channel_url = vaders_url.format(channel, strm) + tokens
return redirect(channel_url, code=302)
qual = 1
if request.args.get('qual'): # and int(sanitized_channel) <= QUALLIMIT:
qual = request.args.get('qual')
elif int(sanitized_channel) <= QUALLIMIT:
qual = QUAL
if request.args.get('strm'):
strm = request.args.get('strm')
else:
strm = STRM
output_url = createURL(sanitized_channel, qual, strm, token)
# if strm == 'mpegts':
# return auto(sanitized_channel, qual)
# channel fixing for dead server/Quality
if CHECK_CHANNEL and strm == 'hls' and not checkChannelURL(output_url):
output_url = fixURL(strm, sanitized_channel, qual, token['hash'])
# creates the output playlist files and returns it as a variable as well
if strm == 'hls':
output_file = create_channel_file(output_url)
# useful for debugging
logger.debug("URL returned: %s" % output_url)
if request.args.get('type'):
returntype = request.args.get('type')
else:
returntype = 3
# different return types as different clients require it. Expect this to change as clients fail on certain things like dynamic hls
if strm == 'rtmp' or request.args.get('response'):
response = redirect(output_url, code=302)
headers = dict(response.headers)
headers.update({'Content-Type': 'application/x-mpegURL', "Access-Control-Allow-Origin": "*"})
response.headers = headers
logger.debug("returning response")
return response
elif returntype == 1 or client == 'kodi':
# hlsTemplate = 'https://{0}.smoothstreams.tv:443/{1}/ch{2}q{3}.stream/playlist.m3u8?wmsAuthSign={4}'
# ss_url = hlsTemplate.format(SRVR, SITE, sanitized_channel, qual, token['hash'])
# some players are having issues with http/https redirects
logger.debug("returning hls url redirect")
return redirect(output_url, code=302)
elif returntype == 2 or client == 'vlc':
logger.debug("returning m3u8 as file")
return send_from_directory(os.path.join(os.path.dirname(sys.argv[0]), 'cache'), 'playlist.m3u8')
elif returntype == 4:
logger.debug("returning hls url")
return output_url
else:
# some players are having issues with http/https redirects
try:
logger.debug("returning m3u8 as variable")
return output_file
except:
logger.debug("returning hls url")
return output_url
# returning dynamic playlist
else:
if request.args.get('strm'):
strmType = request.args.get('strm')
else:
strmType = STRM
playlist = build_playlist(SERVER_HOST, strmType)
logger.info(
"All channels playlist(%s) was requested by %s" % (strmType, request.environ.get('REMOTE_ADDR')))
return Response(playlist, mimetype='application/x-mpegURL')
elif '.m3u8' in request_file.lower():
strng = request_file.lower().replace('.m3u8', '')
if strng in streamtype:
strmType = strng
else:
strmType = STRM
playlist = build_playlist(SERVER_HOST, strmType)
logger.info("All channels playlist(%s) was requested by %s" % (strmType, request.environ.get('REMOTE_ADDR')))
return Response(playlist, mimetype='application/x-mpegURL')
# HDHomeRun emulated json files for Plex Live tv.
elif request_file.lower() == 'lineup_status.json':
return status()
elif request_file.lower() == 'discover.json':
return discover()
elif request_file.lower() == 'lineup.json':
if TVHREDIRECT == True:
return tvh_lineup()
else:
return createLineup(chan_map)
elif request_file.lower() == 'lineup.post':
return lineup_post()
elif request_file.lower() == 'device.xml':
return device()
else:
logger.info("Unknown requested %r by %s", request_file, request.environ.get('REMOTE_ADDR'))
abort(404, "Unknown request")
@app.route('/tvh/<request_file>')
def tvh_returns(request_file):
if request_file.lower() == 'lineup_status.json':
return status()
elif request_file.lower() == 'discover.json':
return tvh_discover()
elif request_file.lower() == 'lineup.json':
return tvh_lineup()
elif request_file.lower() == 'lineup.post':
return lineup_post()
elif request_file.lower() == 'device.xml':
return tvh_device()
else:
logger.info("Unknown requested %r by %s", request_file, request.environ.get('REMOTE_ADDR'))
abort(404, "Unknown request")
@app.route('/%s/auto/<request_file>' % SERVER_PATH)
# returns a piped stream, used for TVH/Plex Live TV
def auto(request_file, qual=""):
logger.debug("starting auto function")
if request.args.get('transcode') and request.args.get('transcode') != 'none':
desired = request.args.get('transcode')
if desired == 'heavy' or desired == 'mobile':
qual = '1'
elif desired == 'internet540' or desired == 'internet480':
qual = '2'
elif desired == 'internet360' or desired == 'internet240':
qual = '3'
if request.args.get('url'):
logger.info("Piping custom URL")
url = request.args.get('url')
if '|' in url:
url = url.split('|')[0]
logger.debug(url)
return Response(response=ffmpegPipe(url), status=200, mimetype='video/mp2t',
headers={'Access-Control-Allow-Origin': '*', "Content-Type": "video/mp2t",
"Content-Disposition": "inline", "Content-Transfer-Enconding": "binary"})
else:
check_token()
channel = request_file.replace("v", "")
logger.info("Channel %s playlist was requested by %s", channel,
request.environ.get('REMOTE_ADDR'))
sanitized_channel = ("0%d" % int(channel)) if int(channel) < 10 else channel
# find the quality to use for the url
sanitized_qual = '1'
if qual == "" and checkChannelURL(createURL(sanitized_channel, QUAL, 'hls', token)):
sanitized_qual = QUAL
elif qual != "" and checkChannelURL(createURL(sanitized_channel, qual, 'hls', token)):
sanitized_qual = qual
url = createURL(sanitized_channel, sanitized_qual, 'mpegts', token)
logger.debug(
"sanitized_channel: %s sanitized_qual: %s QUAL: %s qual: %s" % (
sanitized_channel, sanitized_qual, QUAL, qual))
# changing to mpegts
# if CHECK_CHANNEL and not checkChannelURL(url):
# url = fixURL('hls', sanitized_channel, qual, token['hash'])
logger.debug(url)
# try:
# urllib.request.urlopen(url, timeout=2).getcode()
# except:
# a = 1
# except timeout:
# #special arg for tricking tvh into saving every channel first time
# sanitized_channel = '01'
# sanitized_qual = '3'
# url = template.format(SRVR, SITE, sanitized_channel,sanitized_qual, token['hash'])
if args.tvh:
logger.debug("TVH Trickery happening")
sanitized_channel = '01'
sanitized_qual = '3'
url = createURL(sanitized_channel, sanitized_qual, 'mpegts', token)
logger.debug(url)
response = redirect(url, code=302)
headers = dict(response.headers)
headers.update({'Content-Type': 'video/mp2t', "Access-Control-Allow-Origin": "*"})
response.headers = headers
logger.debug("returning response")
if PIPE:
url = createURL(sanitized_channel, sanitized_qual, 'hls', token)
logger.debug("Piping")
return Response(response=ffmpegPipe(url), status=200, mimetype='video/mp2t',
headers={'Access-Control-Allow-Origin': '*', "Content-Type": "video/mp2t",
"Content-Disposition": "inline", "Content-Transfer-Enconding": "binary"})
return response
# return redirect(url, code=302)
############################################################
# MAIN
############################################################
def main():
logger.info("Initializing")
load_settings()
if os.path.exists(TOKEN_PATH):
load_token()
check_token()
logger.info("Building initial playlist...")
try:
global chan_map, FALLBACK, CHANAPI, jsonGuide1, jsonGuide2, playlist, kodiplaylist, tvhplaylist, sportsPlaylist
# fetch chan_map
try:
chan_map = build_channel_map()
except:
# cannot get response from fog, resorting to fallback
FALLBACK = True
chan_map = build_channel_map_sstv()
try:
chanAPIURL = 'https://guide.smoothstreams.tv/api/api-qualities-new.php'
CHANAPI = json.loads(urllib.request.urlopen(chanAPIURL).read().decode("utf-8"))
except:
CHANAPI = None
jsonGuide1 = getJSON("iptv.json", "https://iptvguide.netlify.com/iptv.json",
"https://fast-guide.smoothstreams.tv/altepg/feed1.json")
jsonGuide2 = getJSON("tv.json", "https://iptvguide.netlify.com/tv.json",
"https://fast-guide.smoothstreams.tv/altepg/feedall1.json")
playlist = build_playlist(SERVER_HOST)
sportsPlaylist = build_sports_playlist(SERVER_HOST)
kodiplaylist = build_kodi_playlist()
tvhplaylist = build_tvh_playlist()
# Download icons, runs in sep thread, takes ~1min
try:
di = threading.Thread(target=dl_icons, args=(len(chan_map),))
di.setDaemon(True)
di.start()
except (KeyboardInterrupt, SystemExit):
sys.exit()
dl_epg()
except:
logger.exception("Exception while building initial playlist: ")
exit(1)
try:
thread.start_new_thread(thread_playlist, ())
except:
_thread.start_new_thread(thread_playlist, ())
if AUTO_SERVER: testServers()
print("\n\n##############################################################")
print("Main Menu - %s/index.html" % urljoin(SERVER_HOST, SERVER_PATH))
print("Contains all the information located here and more!")
print("##############################################################\n\n")
print("\n##############################################################")
print("m3u8 url is %s/playlist.m3u8" % urljoin(SERVER_HOST, SERVER_PATH))
print("kodi m3u8 url is %s/kodi.m3u8" % urljoin(SERVER_HOST, SERVER_PATH))
print("EPG url is %s/epg.xml" % urljoin(SERVER_HOST, SERVER_PATH))
print("Sports EPG url is %s/sports.xml" % urljoin(SERVER_HOST, SERVER_PATH))
print("Plex Live TV url is %s" % urljoin(SERVER_HOST, SERVER_PATH))
print("TVHeadend network url is %s/tvh.m3u8" % urljoin(SERVER_HOST, SERVER_PATH))
print("External m3u8 url is %s/external.m3u8" % urljoin(EXT_HOST, SERVER_PATH))
print("Combined m3u8 url is %s/combined.m3u8" % urljoin(SERVER_HOST, SERVER_PATH))
print("Combined EPG url is %s/combined.xml" % urljoin(SERVER_HOST, SERVER_PATH))
print("Static m3u8 url is %s/static.m3u8" % urljoin(SERVER_HOST, SERVER_PATH))
print("TVH's own EPG url is http://%s:9981/xmltv/channels" % TVHURL)
print("Static XSPF url is %s/static.xspf" % urljoin(SERVER_HOST, SERVER_PATH))
print("Dynamic XSPF url is %s/playlist.xspf" % urljoin(SERVER_HOST, SERVER_PATH))
print("##############################################################\n")
if __version__ < latest_ver:
logger.info(
"Your version (%s%s) is out of date, the latest is %s, which has now be downloaded for you into the 'updates' subdirectory." % (
type, __version__, latest_ver))
newfilename = ntpath.basename(latestfile)
if not os.path.isdir(os.path.join(os.path.dirname(sys.argv[0]), 'updates')):
os.mkdir(os.path.join(os.path.dirname(sys.argv[0]), 'updates'))
urllib.request.urlretrieve(latestfile, os.path.join(os.path.dirname(sys.argv[0]), 'updates', newfilename))
else:
logger.info("Your version (%s) is up to date." % (__version__))
logger.info("Listening on %s:%d", LISTEN_IP, LISTEN_PORT)
try:
a = threading.Thread(target=thread_updater)
a.setDaemon(True)
a.start()
except (KeyboardInterrupt, SystemExit):
sys.exit()
launch_browser()
# debug causes it to load twice on initial startup and every time the script is saved, TODO disbale later
try:
app.run(host=LISTEN_IP, port=LISTEN_PORT, threaded=True, debug=False)
except:
os.system('cls' if os.name == 'nt' else 'clear')
logger.exception("Proxy failed to launch, try another port")
logger.info("Finished!")
if __name__ == "__main__":
main()
|
systrace_controller.py
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import threading
import zlib
from profile_chrome import controllers
from profile_chrome import util
from pylib.constants import host_paths
with host_paths.SysPath(host_paths.DEVIL_PATH):
from devil.utils import cmd_helper
_SYSTRACE_OPTIONS = [
# Compress the trace before sending it over USB.
'-z',
# Use a large trace buffer to increase the polling interval.
'-b', '16384'
]
# Interval in seconds for sampling systrace data.
_SYSTRACE_INTERVAL = 15
_TRACING_ON_PATH = '/sys/kernel/debug/tracing/tracing_on'
class SystraceController(controllers.BaseController):
def __init__(self, device, categories, ring_buffer):
controllers.BaseController.__init__(self)
self._device = device
self._categories = categories
self._ring_buffer = ring_buffer
self._done = threading.Event()
self._thread = None
self._trace_data = None
def __repr__(self):
return 'systrace'
@staticmethod
def GetCategories(device):
return device.RunShellCommand('atrace --list_categories')
def StartTracing(self, _):
self._thread = threading.Thread(target=self._CollectData)
self._thread.start()
def StopTracing(self):
self._done.set()
def PullTrace(self):
self._thread.join()
self._thread = None
if self._trace_data:
output_name = 'systrace-%s' % util.GetTraceTimestamp()
with open(output_name, 'w') as out:
out.write(self._trace_data)
return output_name
def IsTracingOn(self):
result = self._RunAdbShellCommand(['cat', _TRACING_ON_PATH])
return result.strip() == '1'
def _RunAdbShellCommand(self, command):
# We use a separate interface to adb because the one from AndroidCommands
# isn't re-entrant.
# TODO(jbudorick) Look at providing a way to unhandroll this once the
# adb rewrite has fully landed.
device_param = (['-s', str(self._device)] if str(self._device) else [])
cmd = ['adb'] + device_param + ['shell'] + command
return cmd_helper.GetCmdOutput(cmd)
def _RunATraceCommand(self, command):
cmd = ['atrace', '--%s' % command] + _SYSTRACE_OPTIONS + self._categories
return self._RunAdbShellCommand(cmd)
def _ForceStopAtrace(self):
# atrace on pre-M Android devices cannot be stopped asynchronously
# correctly. Use synchronous mode to force stop.
cmd = ['atrace', '-t', '0']
return self._RunAdbShellCommand(cmd)
def _CollectData(self):
trace_data = []
self._RunATraceCommand('async_start')
try:
while not self._done.is_set():
self._done.wait(_SYSTRACE_INTERVAL)
if not self._ring_buffer or self._done.is_set():
trace_data.append(
self._DecodeTraceData(self._RunATraceCommand('async_dump')))
finally:
trace_data.append(
self._DecodeTraceData(self._RunATraceCommand('async_stop')))
if self.IsTracingOn():
self._ForceStopAtrace()
self._trace_data = ''.join([zlib.decompress(d) for d in trace_data])
@staticmethod
def _DecodeTraceData(trace_data):
try:
trace_start = trace_data.index('TRACE:')
except ValueError:
raise RuntimeError('Systrace start marker not found')
trace_data = trace_data[trace_start + 6:]
# Collapse CRLFs that are added by adb shell.
if trace_data.startswith('\r\n'):
trace_data = trace_data.replace('\r\n', '\n')
# Skip the initial newline.
return trace_data[1:]
|
agent.py
|
#!/usr/bin/env python
#
# AzureMonitoringLinuxAgent Extension
#
# Copyright 2019 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import sys
# future imports have no effect on python 3 (verified in official docs)
# importing from source causes import errors on python 3, lets skip import
if sys.version_info[0] < 3:
from future import standard_library
standard_library.install_aliases()
from builtins import str
import os
import os.path
import datetime
import signal
import pwd
import grp
import re
import filecmp
import stat
import traceback
import time
import platform
import subprocess
import json
import base64
import inspect
import urllib.request, urllib.parse, urllib.error
import shutil
import crypt
import xml.dom.minidom
import re
import hashlib
from distutils.version import LooseVersion
from hashlib import sha256
from shutil import copyfile
from threading import Thread
import telegraf_utils.telegraf_config_handler as telhandler
import metrics_ext_utils.metrics_constants as metrics_constants
import metrics_ext_utils.metrics_ext_handler as me_handler
import metrics_ext_utils.metrics_common_utils as metrics_utils
try:
from Utils.WAAgentUtil import waagent
import Utils.HandlerUtil as HUtil
except Exception as e:
# These utils have checks around the use of them; this is not an exit case
print('Importing utils failed with error: {0}'.format(e))
# This code is taken from the omsagent's extension wrapper.
# This same monkey patch fix is relevant for AMA extension as well.
# This monkey patch duplicates the one made in the waagent import above.
# It is necessary because on 2.6, the waagent monkey patch appears to be overridden
# by the python-future subprocess.check_output backport.
if sys.version_info < (2,7):
def check_output(*popenargs, **kwargs):
r"""Backport from subprocess module from python 2.7"""
if 'stdout' in kwargs:
raise ValueError('stdout argument not allowed, it will be overridden.')
process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise subprocess.CalledProcessError(retcode, cmd, output=output)
return output
# Exception classes used by this module.
class CalledProcessError(Exception):
def __init__(self, returncode, cmd, output=None):
self.returncode = returncode
self.cmd = cmd
self.output = output
def __str__(self):
return "Command '%s' returned non-zero exit status %d" % (self.cmd, self.returncode)
subprocess.check_output = check_output
subprocess.CalledProcessError = CalledProcessError
# Global Variables
PackagesDirectory = 'packages'
# TO BE CHANGED WITH EACH NEW RELEASE IF THE BUNDLE VERSION CHANGES
# TODO: Installer should automatically figure this out from the folder instead of requiring this update
BundleFileNameDeb = 'azure-mdsd_1.5.133-build.master.157_x86_64.deb'
BundleFileNameRpm = 'azure-mdsd_1.5.133-build.master.157_x86_64.rpm'
BundleFileName = ''
TelegrafBinName = 'telegraf'
InitialRetrySleepSeconds = 30
PackageManager = ''
PackageManagerOptions = ''
MdsdCounterJsonPath = '/etc/mdsd.d/config-cache/metricCounters.json'
# Commands
OneAgentInstallCommand = ''
OneAgentUninstallCommand = ''
RestartOneAgentServiceCommand = ''
DisableOneAgentServiceCommand = ''
# Error codes
DPKGLockedErrorCode = 56
MissingorInvalidParameterErrorCode = 53
UnsupportedOperatingSystem = 51
IndeterminateOperatingSystem = 51
# Configuration
HUtilObject = None
SettingsSequenceNumber = None
HandlerEnvironment = None
SettingsDict = None
# Change permission of log path - if we fail, that is not an exit case
try:
ext_log_path = '/var/log/azure/'
if os.path.exists(ext_log_path):
os.chmod(ext_log_path, 700)
except:
pass
def main():
"""
Main method
Parse out operation from argument, invoke the operation, and finish.
"""
init_waagent_logger()
waagent_log_info('Azure Monitoring Agent for Linux started to handle.')
# Determine the operation being executed
operation = None
try:
option = sys.argv[1]
if re.match('^([-/]*)(disable)', option):
operation = 'Disable'
elif re.match('^([-/]*)(uninstall)', option):
operation = 'Uninstall'
elif re.match('^([-/]*)(install)', option):
operation = 'Install'
elif re.match('^([-/]*)(enable)', option):
operation = 'Enable'
elif re.match('^([-/]*)(update)', option):
operation = 'Update'
elif re.match('^([-/]*)(metrics)', option):
operation = 'Metrics'
elif re.match('^([-/]*)(arc)', option):
operation = 'Arc'
except Exception as e:
waagent_log_error(str(e))
if operation is None:
log_and_exit('Unknown', 1, 'No valid operation provided')
# Set up for exit code and any error messages
exit_code = 0
message = '{0} succeeded'.format(operation)
exit_code = check_disk_space_availability()
if exit_code != 0:
message = '{0} failed due to low disk space'.format(operation)
log_and_exit(operation, exit_code, message)
# Invoke operation
try:
global HUtilObject
HUtilObject = parse_context(operation)
exit_code, output = operations[operation]()
# Exit code 1 indicates a general problem that doesn't have a more
# specific error code; it often indicates a missing dependency
if exit_code == 1 and operation == 'Install':
message = 'Install failed with exit code 1. Please check that ' \
'dependencies are installed. For details, check logs ' \
'in /var/log/azure/Microsoft.Azure.Monitor' \
'.AzureMonitorLinuxAgent'
elif exit_code is DPKGLockedErrorCode and operation == 'Install':
message = 'Install failed with exit code {0} because the ' \
'package manager on the VM is currently locked: ' \
'please wait and try again'.format(DPKGLockedErrorCode)
elif exit_code != 0:
message = '{0} failed with exit code {1} {2}'.format(operation,
exit_code, output)
except AzureMonitorAgentForLinuxException as e:
exit_code = e.error_code
message = e.get_error_message(operation)
except Exception as e:
exit_code = 1
message = '{0} failed with error: {1}\n' \
'Stacktrace: {2}'.format(operation, e,
traceback.format_exc())
# Finish up and log messages
log_and_exit(operation, exit_code, message)
def check_disk_space_availability():
"""
Check if there is the required space on the machine.
"""
try:
if get_free_space_mb("/var") < 500 or get_free_space_mb("/etc") < 500 :
# 52 is the exit code for missing dependency i.e. disk space
# https://github.com/Azure/azure-marketplace/wiki/Extension-Build-Notes-Best-Practices#error-codes-and-messages-output-to-stderr
return 52
else:
return 0
except:
print('Failed to check disk usage.')
return 0
def get_free_space_mb(dirname):
"""
Get the free space in MB in the directory path.
"""
st = os.statvfs(dirname)
return (st.f_bavail * st.f_frsize) // (1024 * 1024)
def is_systemd():
"""
Check if the system is using systemd
"""
check_systemd = os.system("pidof systemd 1>/dev/null 2>&1")
return check_systemd == 0
def install():
"""
Ensure that this VM distro and version are supported.
Install the Azure Monitor Linux Agent package, using retries.
Note: install operation times out from WAAgent at 15 minutes, so do not
wait longer.
"""
find_package_manager("Install")
exit_if_vm_not_supported('Install')
public_settings, protected_settings = get_settings()
package_directory = os.path.join(os.getcwd(), PackagesDirectory)
bundle_path = os.path.join(package_directory, BundleFileName)
os.chmod(bundle_path, 100)
print(PackageManager, " and ", BundleFileName)
OneAgentInstallCommand = "{0} {1} -i {2}".format(PackageManager, PackageManagerOptions, bundle_path)
hutil_log_info('Running command "{0}"'.format(OneAgentInstallCommand))
# Retry, since install can fail due to concurrent package operations
exit_code, output = run_command_with_retries_output(OneAgentInstallCommand, retries = 15,
retry_check = retry_if_dpkg_locked,
final_check = final_check_if_dpkg_locked)
default_configs = {
"MDSD_LOG" : "/var/log",
"MDSD_ROLE_PREFIX" : "/var/run/mdsd/default",
"MDSD_SPOOL_DIRECTORY" : "/var/opt/microsoft/linuxmonagent",
"MDSD_OPTIONS" : "\"-A -c /etc/mdsd.d/mdsd.xml -d -r $MDSD_ROLE_PREFIX -S $MDSD_SPOOL_DIRECTORY/eh -e $MDSD_LOG/mdsd.err -w $MDSD_LOG/mdsd.warn -o $MDSD_LOG/mdsd.info\"",
"MCS_ENDPOINT" : "handler.control.monitor.azure.com",
"AZURE_ENDPOINT" : "https://monitor.azure.com/",
"ADD_REGION_TO_MCS_ENDPOINT" : "true",
"ENABLE_MCS" : "false",
"MONITORING_USE_GENEVA_CONFIG_SERVICE" : "false",
"MDSD_USE_LOCAL_PERSISTENCY" : "true",
#"OMS_TLD" : "int2.microsoftatlanta-int.com",
#"customResourceId" : "/subscriptions/42e7aed6-f510-46a2-8597-a5fe2e15478b/resourcegroups/amcs-test/providers/Microsoft.OperationalInsights/workspaces/amcs-pretend-linuxVM",
}
# Decide the mode
if public_settings is not None and public_settings.get("GCS_AUTO_CONFIG") == "true":
hutil_log_info("Detecting Auto-Config mode.")
return 0, ""
elif protected_settings is None or len(protected_settings) == 0:
default_configs["ENABLE_MCS"] = "true"
else:
# look for LA protected settings
for var in list(protected_settings.keys()):
if "_key" in var or "_id" in var:
default_configs[var] = protected_settings.get(var)
# check if required GCS params are available
MONITORING_GCS_CERT_CERTFILE = None
if "certificate" in protected_settings:
MONITORING_GCS_CERT_CERTFILE = base64.standard_b64decode(protected_settings.get("certificate"))
MONITORING_GCS_CERT_KEYFILE = None
if "certificateKey" in protected_settings:
MONITORING_GCS_CERT_KEYFILE = base64.standard_b64decode(protected_settings.get("certificateKey"))
MONITORING_GCS_ENVIRONMENT = ""
if "monitoringGCSEnvironment" in protected_settings:
MONITORING_GCS_ENVIRONMENT = protected_settings.get("monitoringGCSEnvironment")
MONITORING_GCS_NAMESPACE = ""
if "namespace" in protected_settings:
MONITORING_GCS_NAMESPACE = protected_settings.get("namespace")
MONITORING_GCS_ACCOUNT = ""
if "monitoringGCSAccount" in protected_settings:
MONITORING_GCS_ACCOUNT = protected_settings.get("monitoringGCSAccount")
MONITORING_GCS_REGION = ""
if "monitoringGCSRegion" in protected_settings:
MONITORING_GCS_REGION = protected_settings.get("monitoringGCSRegion")
MONITORING_CONFIG_VERSION = ""
if "configVersion" in protected_settings:
MONITORING_CONFIG_VERSION = protected_settings.get("configVersion")
MONITORING_GCS_AUTH_ID_TYPE = ""
if "monitoringGCSAuthIdType" in protected_settings:
MONITORING_GCS_AUTH_ID_TYPE = protected_settings.get("monitoringGCSAuthIdType")
MONITORING_GCS_AUTH_ID = ""
if "monitoringGCSAuthId" in protected_settings:
MONITORING_GCS_AUTH_ID = protected_settings.get("monitoringGCSAuthId")
if ((MONITORING_GCS_CERT_CERTFILE is None or MONITORING_GCS_CERT_KEYFILE is None) and (MONITORING_GCS_AUTH_ID_TYPE == "")) or MONITORING_GCS_ENVIRONMENT == "" or MONITORING_GCS_NAMESPACE == "" or MONITORING_GCS_ACCOUNT == "" or MONITORING_GCS_REGION == "" or MONITORING_CONFIG_VERSION == "":
waagent_log_error('Not all required GCS parameters are provided')
raise ParameterMissingException
else:
# set the values for GCS
default_configs["MONITORING_USE_GENEVA_CONFIG_SERVICE"] = "true"
default_configs["MONITORING_GCS_ENVIRONMENT"] = MONITORING_GCS_ENVIRONMENT
default_configs["MONITORING_GCS_NAMESPACE"] = MONITORING_GCS_NAMESPACE
default_configs["MONITORING_GCS_ACCOUNT"] = MONITORING_GCS_ACCOUNT
default_configs["MONITORING_GCS_REGION"] = MONITORING_GCS_REGION
default_configs["MONITORING_CONFIG_VERSION"] = MONITORING_CONFIG_VERSION
# write the certificate and key to disk
uid = pwd.getpwnam("syslog").pw_uid
gid = grp.getgrnam("syslog").gr_gid
if MONITORING_GCS_AUTH_ID_TYPE != "":
default_configs["MONITORING_GCS_AUTH_ID_TYPE"] = MONITORING_GCS_AUTH_ID_TYPE
if MONITORING_GCS_AUTH_ID != "":
default_configs["MONITORING_GCS_AUTH_ID"] = MONITORING_GCS_AUTH_ID
if MONITORING_GCS_CERT_CERTFILE is not None:
default_configs["MONITORING_GCS_CERT_CERTFILE"] = "/etc/mdsd.d/gcscert.pem"
fh = open("/etc/mdsd.d/gcscert.pem", "wb")
fh.write(MONITORING_GCS_CERT_CERTFILE)
fh.close()
os.chown("/etc/mdsd.d/gcscert.pem", uid, gid)
os.system('chmod {1} {0}'.format("/etc/mdsd.d/gcscert.pem", 400))
if MONITORING_GCS_CERT_KEYFILE is not None:
default_configs["MONITORING_GCS_CERT_KEYFILE"] = "/etc/mdsd.d/gcskey.pem"
fh = open("/etc/mdsd.d/gcskey.pem", "wb")
fh.write(MONITORING_GCS_CERT_KEYFILE)
fh.close()
os.chown("/etc/mdsd.d/gcskey.pem", uid, gid)
os.system('chmod {1} {0}'.format("/etc/mdsd.d/gcskey.pem", 400))
config_file = "/etc/default/mdsd"
config_updated = False
try:
if os.path.isfile(config_file):
data = []
new_data = ""
vars_set = set()
with open(config_file, "r") as f:
data = f.readlines()
for line in data:
for var in list(default_configs.keys()):
if var in line:
line = "export " + var + "=" + default_configs[var] + "\n"
vars_set.add(var)
break
new_data += line
for var in list(default_configs.keys()):
if var not in vars_set:
new_data += "export " + var + "=" + default_configs[var] + "\n"
with open("/etc/default/mdsd_temp", "w") as f:
f.write(new_data)
config_updated = True if len(new_data) > 0 else False
if not config_updated or not os.path.isfile("/etc/default/mdsd_temp"):
log_and_exit("install",MissingorInvalidParameterErrorCode, "Error while updating MCS Environment Variables in /etc/default/mdsd")
os.remove(config_file)
os.rename("/etc/default/mdsd_temp", config_file)
uid = pwd.getpwnam("syslog").pw_uid
gid = grp.getgrnam("syslog").gr_gid
os.chown(config_file, uid, gid)
os.system('chmod {1} {0}'.format(config_file, 400))
else:
log_and_exit("install", MissingorInvalidParameterErrorCode, "Could not find the file - /etc/default/mdsd" )
except:
log_and_exit("install", MissingorInvalidParameterErrorCode, "Failed to add MCS Environment Variables in /etc/default/mdsd" )
return exit_code, output
def check_kill_process(pstring):
for line in os.popen("ps ax | grep " + pstring + " | grep -v grep"):
fields = line.split()
pid = fields[0]
os.kill(int(pid), signal.SIGKILL)
def uninstall():
"""
Uninstall the Azure Monitor Linux Agent.
This is a somewhat soft uninstall. It is not a purge.
Note: uninstall operation times out from WAAgent at 5 minutes
"""
find_package_manager("Uninstall")
if PackageManager == "dpkg":
OneAgentUninstallCommand = "dpkg -P azure-mdsd"
elif PackageManager == "rpm":
OneAgentUninstallCommand = "rpm -e azure-mdsd"
else:
log_and_exit(operation, UnsupportedOperatingSystem, "The OS has neither rpm nor dpkg" )
hutil_log_info('Running command "{0}"'.format(OneAgentUninstallCommand))
# Retry, since uninstall can fail due to concurrent package operations
try:
exit_code, output = run_command_with_retries_output(OneAgentUninstallCommand, retries = 4,
retry_check = retry_if_dpkg_locked,
final_check = final_check_if_dpkg_locked)
except Exception as ex:
exit_code = 1
output = 'Uninstall failed with error: {0}\n' \
'Stacktrace: {1}'.format(ex, traceback.format_exc())
return exit_code, output
def enable():
"""
Start the Azure Monitor Linux Agent Service
This call will return non-zero or throw an exception if
the settings provided are incomplete or incorrect.
Note: enable operation times out from WAAgent at 5 minutes
"""
exit_if_vm_not_supported('Enable')
# Check if this is Arc VM and enable arc daemon if it is
if metrics_utils.is_arc_installed():
hutil_log_info("This VM is an Arc VM, Running the arc watcher daemon.")
start_arc_process()
if is_systemd():
OneAgentEnableCommand = "systemctl start mdsd"
else:
hutil_log_info("The VM doesn't have systemctl. Using the init.d service to start mdsd.")
OneAgentEnableCommand = "/etc/init.d/mdsd start"
public_settings, protected_settings = get_settings()
if public_settings is not None and public_settings.get("GCS_AUTO_CONFIG") == "true":
OneAgentEnableCommand = "systemctl start mdsdmgr"
if not is_systemd():
hutil_log_info("The VM doesn't have systemctl. Using the init.d service to start mdsdmgr.")
OneAgentEnableCommand = "/etc/init.d/mdsdmgr start"
hutil_log_info('Handler initiating onboarding.')
exit_code, output = run_command_and_log(OneAgentEnableCommand)
if exit_code == 0:
#start metrics process if enable is successful
start_metrics_process()
return exit_code, output
def disable():
"""
Disable Azure Monitor Linux Agent process on the VM.
Note: disable operation times out from WAAgent at 15 minutes
"""
# disable arc daemon if it is running
stop_arc_watcher()
#stop the metrics process
stop_metrics_process()
#stop the Azure Monitor Linux Agent service
if is_systemd():
DisableOneAgentServiceCommand = "systemctl stop mdsd"
else:
DisableOneAgentServiceCommand = "/etc/init.d/mdsd stop"
hutil_log_info("The VM doesn't have systemctl. Using the init.d service to stop mdsd.")
exit_code, output = run_command_and_log(DisableOneAgentServiceCommand)
return exit_code, output
def update():
"""
Update the current installation of AzureMonitorLinuxAgent
No logic to install the agent as agent -> install() will be called
with udpate because upgradeMode = "UpgradeWithInstall" set in HandlerManifest
"""
return 0, ""
def stop_metrics_process():
if telhandler.is_running(is_lad=False):
#Stop the telegraf and ME services
tel_out, tel_msg = telhandler.stop_telegraf_service(is_lad=False)
if tel_out:
hutil_log_info(tel_msg)
else:
hutil_log_error(tel_msg)
#Delete the telegraf and ME services
tel_rm_out, tel_rm_msg = telhandler.remove_telegraf_service()
if tel_rm_out:
hutil_log_info(tel_rm_msg)
else:
hutil_log_error(tel_rm_msg)
if me_handler.is_running(is_lad=False):
me_out, me_msg = me_handler.stop_metrics_service(is_lad=False)
if me_out:
hutil_log_info(me_msg)
else:
hutil_log_error(me_msg)
me_rm_out, me_rm_msg = me_handler.remove_metrics_service(is_lad=False)
if me_rm_out:
hutil_log_info(me_rm_msg)
else:
hutil_log_error(me_rm_msg)
pids_filepath = os.path.join(os.getcwd(),'amametrics.pid')
# kill existing metrics watcher
if os.path.exists(pids_filepath):
with open(pids_filepath, "r") as f:
for pids in f.readlines():
kill_cmd = "kill " + pids
run_command_and_log(kill_cmd)
run_command_and_log("rm "+pids_filepath)
def start_metrics_process():
"""
Start metrics process that performs periodic monitoring activities
:return: None
"""
stop_metrics_process()
#start metrics watcher
oneagent_filepath = os.path.join(os.getcwd(),'agent.py')
args = ['python{0}'.format(sys.version_info[0]), oneagent_filepath, '-metrics']
log = open(os.path.join(os.getcwd(), 'daemon.log'), 'w')
hutil_log_info('start watcher process '+str(args))
subprocess.Popen(args, stdout=log, stderr=log)
def metrics_watcher(hutil_error, hutil_log):
"""
Watcher thread to monitor metric configuration changes and to take action on them
"""
# check every 30 seconds
sleepTime = 30
# sleep before starting the monitoring.
time.sleep(sleepTime)
last_crc = None
me_msi_token_expiry_epoch = None
while True:
try:
if os.path.isfile(MdsdCounterJsonPath):
f = open(MdsdCounterJsonPath, "r")
data = f.read()
if (data != ''):
json_data = json.loads(data)
if len(json_data) == 0:
last_crc = hashlib.sha256(data.encode('utf-8')).hexdigest()
if telhandler.is_running(is_lad=False):
#Stop the telegraf and ME services
tel_out, tel_msg = telhandler.stop_telegraf_service(is_lad=False)
if tel_out:
hutil_log(tel_msg)
else:
hutil_error(tel_msg)
#Delete the telegraf and ME services
tel_rm_out, tel_rm_msg = telhandler.remove_telegraf_service()
if tel_rm_out:
hutil_log(tel_rm_msg)
else:
hutil_error(tel_rm_msg)
if me_handler.is_running(is_lad=False):
me_out, me_msg = me_handler.stop_metrics_service(is_lad=False)
if me_out:
hutil_log(me_msg)
else:
hutil_error(me_msg)
me_rm_out, me_rm_msg = me_handler.remove_metrics_service(is_lad=False)
if me_rm_out:
hutil_log(me_rm_msg)
else:
hutil_error(me_rm_msg)
else:
crc = hashlib.sha256(data.encode('utf-8')).hexdigest()
if(crc != last_crc):
# Resetting the me_msi_token_expiry_epoch variable if we set up ME again.
me_msi_token_expiry_epoch = None
hutil_log("Start processing metric configuration")
hutil_log(data)
telegraf_config, telegraf_namespaces = telhandler.handle_config(
json_data,
"udp://127.0.0.1:" + metrics_constants.ama_metrics_extension_udp_port,
"unix:///var/run/mdsd/default_influx.socket",
is_lad=False)
me_handler.setup_me(is_lad=False)
start_telegraf_out, log_messages = telhandler.start_telegraf(is_lad=False)
if start_telegraf_out:
hutil_log("Successfully started metrics-sourcer.")
else:
hutil_error(log_messages)
start_metrics_out, log_messages = me_handler.start_metrics(is_lad=False)
if start_metrics_out:
hutil_log("Successfully started metrics-extension.")
else:
hutil_error(log_messages)
last_crc = crc
generate_token = False
me_token_path = os.path.join(os.getcwd(), "/config/metrics_configs/AuthToken-MSI.json")
if me_msi_token_expiry_epoch is None or me_msi_token_expiry_epoch == "":
if os.path.isfile(me_token_path):
with open(me_token_path, "r") as f:
authtoken_content = f.read()
if authtoken_content and "expires_on" in authtoken_content:
me_msi_token_expiry_epoch = authtoken_content["expires_on"]
else:
generate_token = True
else:
generate_token = True
if me_msi_token_expiry_epoch:
currentTime = datetime.datetime.now()
token_expiry_time = datetime.datetime.fromtimestamp(int(me_msi_token_expiry_epoch))
if token_expiry_time - currentTime < datetime.timedelta(minutes=30):
# The MSI Token will expire within 30 minutes. We need to refresh the token
generate_token = True
if generate_token:
generate_token = False
msi_token_generated, me_msi_token_expiry_epoch, log_messages = me_handler.generate_MSI_token()
if msi_token_generated:
hutil_log("Successfully refreshed metrics-extension MSI Auth token.")
else:
hutil_error(log_messages)
telegraf_restart_retries = 0
me_restart_retries = 0
max_restart_retries = 10
# Check if telegraf is running, if not, then restart
if not telhandler.is_running(is_lad=False):
if telegraf_restart_retries < max_restart_retries:
telegraf_restart_retries += 1
hutil_log("Telegraf binary process is not running. Restarting telegraf now. Retry count - {0}".format(telegraf_restart_retries))
tel_out, tel_msg = telhandler.stop_telegraf_service(is_lad=False)
if tel_out:
hutil_log(tel_msg)
else:
hutil_error(tel_msg)
start_telegraf_out, log_messages = telhandler.start_telegraf(is_lad=False)
if start_telegraf_out:
hutil_log("Successfully started metrics-sourcer.")
else:
hutil_error(log_messages)
else:
hutil_error("Telegraf binary process is not running. Failed to restart after {0} retries. Please check telegraf.log".format(max_restart_retries))
else:
telegraf_restart_retries = 0
# Check if ME is running, if not, then restart
if not me_handler.is_running(is_lad=False):
if me_restart_retries < max_restart_retries:
me_restart_retries += 1
hutil_log("MetricsExtension binary process is not running. Restarting MetricsExtension now. Retry count - {0}".format(me_restart_retries))
me_out, me_msg = me_handler.stop_metrics_service(is_lad=False)
if me_out:
hutil_log(me_msg)
else:
hutil_error(me_msg)
start_metrics_out, log_messages = me_handler.start_metrics(is_lad=False)
if start_metrics_out:
hutil_log("Successfully started metrics-extension.")
else:
hutil_error(log_messages)
else:
hutil_error("MetricsExtension binary process is not running. Failed to restart after {0} retries. Please check /var/log/syslog for ME logs".format(max_restart_retries))
else:
me_restart_retries = 0
except IOError as e:
hutil_error('I/O error in monitoring metrics. Exception={0}'.format(e))
except Exception as e:
hutil_error('Error in monitoring metrics. Exception={0}'.format(e))
finally:
time.sleep(sleepTime)
def metrics():
"""
Take care of setting up telegraf and ME for metrics if configuration is present
"""
pids_filepath = os.path.join(os.getcwd(), 'amametrics.pid')
py_pid = os.getpid()
with open(pids_filepath, 'w') as f:
f.write(str(py_pid) + '\n')
watcher_thread = Thread(target = metrics_watcher, args = [hutil_log_error, hutil_log_info])
watcher_thread.start()
watcher_thread.join()
return 0, ""
def start_arc_process():
"""
Start arc process that performs periodic monitoring activities
:return: None
"""
hutil_log_info("stopping previously running arc process")
stop_arc_watcher()
hutil_log_info("starting arc process")
#start arc watcher
oneagent_filepath = os.path.join(os.getcwd(),'agent.py')
args = ['python{0}'.format(sys.version_info[0]), oneagent_filepath, '-arc']
log = open(os.path.join(os.getcwd(), 'daemon.log'), 'w')
hutil_log_info('start watcher process '+str(args))
subprocess.Popen(args, stdout=log, stderr=log)
def start_arc_watcher():
"""
Take care of starting arc_watcher daemon if the VM has arc running
"""
hutil_log_info("Starting the watcher")
print("Starting the watcher")
pids_filepath = os.path.join(os.getcwd(), 'amaarc.pid')
py_pid = os.getpid()
print("pid ", py_pid)
with open(pids_filepath, 'w') as f:
f.write(str(py_pid) + '\n')
hutil_log_info("Written all the pids")
print("Written all the pids")
watcher_thread = Thread(target = arc_watcher, args = [hutil_log_error, hutil_log_info])
watcher_thread.start()
watcher_thread.join()
return 0, ""
# Dictionary of operations strings to methods
operations = {'Disable' : disable,
'Uninstall' : uninstall,
'Install' : install,
'Enable' : enable,
'Update' : update,
'Metrics' : metrics,
'Arc' : start_arc_watcher,
}
def stop_arc_watcher():
"""
Take care of stopping arc_watcher daemon if the VM has arc running
"""
pids_filepath = os.path.join(os.getcwd(),'amaarc.pid')
# kill existing arc watcher
if os.path.exists(pids_filepath):
with open(pids_filepath, "r") as f:
for pids in f.readlines():
proc = subprocess.Popen(["ps -o cmd= {0}".format(pids)], stdout=subprocess.PIPE, shell=True)
output = proc.communicate()[0]
if "arc" in output:
kill_cmd = "kill " + pids
run_command_and_log(kill_cmd)
# Delete the file after to avoid clutter
os.remove(pids_filepath)
def arc_watcher(hutil_error, hutil_log):
"""
This is needed to override mdsd's syslog permissions restriction which prevents mdsd
from reading temporary key files that are needed to make https calls to get an MSI token for arc during onboarding to download amcs config
This method spins up a process that will continuously keep refreshing that particular file path with valid keys
So that whenever mdsd needs to refresh it's MSI token, it is able to find correct keys there to make the https calls
"""
# check every 25 seconds
sleepTime = 25
# sleep before starting the monitoring.
time.sleep(sleepTime)
while True:
try:
arc_token_mdsd_dir = "/etc/mdsd.d/arc_tokens/"
if not os.path.exists(arc_token_mdsd_dir):
os.makedirs(arc_token_mdsd_dir)
else:
# delete the existing keys as they might not be valid anymore
for filename in os.listdir(arc_token_mdsd_dir):
filepath = arc_token_mdsd_dir + filename
os.remove(filepath)
arc_endpoint = metrics_utils.get_arc_endpoint()
try:
msiauthurl = arc_endpoint + "/metadata/identity/oauth2/token?api-version=2019-11-01&resource=https://monitor.azure.com/"
req = urllib.request.Request(msiauthurl, headers={'Metadata':'true'})
res = urllib.request.urlopen(req)
except:
# The above request is expected to fail and add a key to the path -
authkey_dir = "/var/opt/azcmagent/tokens/"
if not os.path.exists(authkey_dir):
raise Exception("Unable to find the auth key file at {0} returned from the arc msi auth request.".format(authkey_dir))
# Copy the tokens to mdsd accessible dir
for filename in os.listdir(authkey_dir):
filepath = authkey_dir + filename
print(filepath)
shutil.copy(filepath, arc_token_mdsd_dir)
# Change the ownership of the mdsd arc token dir to be accessible by syslog (since mdsd runs as syslog user)
os.system("chown -R syslog:syslog {0}".format(arc_token_mdsd_dir))
except Exception as e:
hutil_error('Error in arc watcher process while copying token for arc MSI auth queries. Exception={0}'.format(e))
finally:
time.sleep(sleepTime)
def parse_context(operation):
"""
Initialize a HandlerUtil object for this operation.
If the required modules have not been imported, this will return None.
"""
hutil = None
if ('Utils.WAAgentUtil' in sys.modules
and 'Utils.HandlerUtil' in sys.modules):
try:
logFileName = 'extension.log'
hutil = HUtil.HandlerUtility(waagent.Log, waagent.Error, logFileName=logFileName)
hutil.do_parse_context(operation)
# parse_context may throw KeyError if necessary JSON key is not
# present in settings
except KeyError as e:
waagent_log_error('Unable to parse context with error: ' \
'{0}'.format(e))
raise ParameterMissingException
return hutil
def find_package_manager(operation):
"""
Checks if the dist is debian based or centos based and assigns the package manager accordingly
"""
global PackageManager
global PackageManagerOptions
global BundleFileName
dist, ver = find_vm_distro(operation)
dpkg_set = set(["debian", "ubuntu"])
rpm_set = set(["oracle", "redhat", "centos", "red hat", "suse", "sles"])
for dpkg_dist in dpkg_set:
if dist.lower().startswith(dpkg_dist):
PackageManager = "dpkg"
# OK to replace the /etc/default/mdsd, since the placeholders gets replaced again.
# Otherwise, the package manager prompts for action (Y/I/N/O/D/Z) [default=N]
PackageManagerOptions = "--force-overwrite --force-confnew"
BundleFileName = BundleFileNameDeb
break
for rpm_dist in rpm_set:
if dist.lower().startswith(rpm_dist):
PackageManager = "rpm"
# Same as above.
PackageManagerOptions = "--force"
BundleFileName = BundleFileNameRpm
break
if PackageManager == "":
log_and_exit(operation, UnsupportedOperatingSystem, "The OS has neither rpm nor dpkg" )
def find_vm_distro(operation):
"""
Finds the Linux Distribution this vm is running on.
"""
vm_dist = vm_id = vm_ver = None
parse_manually = False
try:
vm_dist, vm_ver, vm_id = platform.linux_distribution()
except AttributeError:
try:
vm_dist, vm_ver, vm_id = platform.dist()
except AttributeError:
hutil_log_info("Falling back to /etc/os-release distribution parsing")
# Some python versions *IF BUILT LOCALLY* (ex 3.5) give string responses (ex. 'bullseye/sid') to platform.dist() function
# This causes exception in the method below. Thus adding a check to switch to manual parsing in this case
try:
temp_vm_ver = int(vm_ver.split('.')[0])
except:
parse_manually = True
if (not vm_dist and not vm_ver) or parse_manually: # SLES 15 and others
try:
with open('/etc/os-release', 'r') as fp:
for line in fp:
if line.startswith('ID='):
vm_dist = line.split('=')[1]
vm_dist = vm_dist.split('-')[0]
vm_dist = vm_dist.replace('\"', '').replace('\n', '')
elif line.startswith('VERSION_ID='):
vm_ver = line.split('=')[1]
vm_ver = vm_ver.replace('\"', '').replace('\n', '')
except:
log_and_exit(operation, IndeterminateOperatingSystem, 'Indeterminate operating system')
return vm_dist, vm_ver
def is_vm_supported_for_extension(operation):
"""
Checks if the VM this extension is running on is supported by AzureMonitorAgent
Returns for platform.linux_distribution() vary widely in format, such as
'7.3.1611' returned for a VM with CentOS 7, so the first provided
digits must match
The supported distros of the AzureMonitorLinuxAgent are allowed to utilize
this VM extension. All other distros will get error code 51
"""
supported_dists = {'redhat' : ['6', '7', '8'], # Rhel
'centos' : ['6', '7', '8'], # CentOS
'red hat' : ['6', '7', '8'], # Oracle, RHEL
'oracle' : ['6', '7', '8'], # Oracle
'debian' : ['8', '9', '10'], # Debian
'ubuntu' : ['14.04', '16.04', '18.04', '20.04'], # Ubuntu
'suse' : ['12'], 'sles' : ['15'] # SLES
}
vm_supported = False
vm_dist, vm_ver = find_vm_distro(operation)
# Find this VM distribution in the supported list
for supported_dist in list(supported_dists.keys()):
if not vm_dist.lower().startswith(supported_dist):
continue
# Check if this VM distribution version is supported
vm_ver_split = vm_ver.split('.')
for supported_ver in supported_dists[supported_dist]:
supported_ver_split = supported_ver.split('.')
# If vm_ver is at least as precise (at least as many digits) as
# supported_ver and matches all the supported_ver digits, then
# this VM is guaranteed to be supported
vm_ver_match = True
for idx, supported_ver_num in enumerate(supported_ver_split):
try:
supported_ver_num = int(supported_ver_num)
vm_ver_num = int(vm_ver_split[idx])
except IndexError:
vm_ver_match = False
break
if vm_ver_num != supported_ver_num:
vm_ver_match = False
break
if vm_ver_match:
vm_supported = True
break
if vm_supported:
break
return vm_supported, vm_dist, vm_ver
def exit_if_vm_not_supported(operation):
"""
Check if this VM distro and version are supported by the AzureMonitorLinuxAgent.
If VM is supported, find the package manager present in this distro
If this VM is not supported, log the proper error code and exit.
"""
vm_supported, vm_dist, vm_ver = is_vm_supported_for_extension(operation)
if not vm_supported:
log_and_exit(operation, UnsupportedOperatingSystem, 'Unsupported operating system: ' \
'{0} {1}'.format(vm_dist, vm_ver))
return 0
def run_command_and_log(cmd, check_error = True, log_cmd = True):
"""
Run the provided shell command and log its output, including stdout and
stderr.
The output should not contain any PII, but the command might. In this case,
log_cmd should be set to False.
"""
exit_code, output = run_get_output(cmd, check_error, log_cmd)
if log_cmd:
hutil_log_info('Output of command "{0}": \n{1}'.format(cmd.rstrip(), output))
else:
hutil_log_info('Output: \n{0}'.format(output))
# also write output to STDERR since WA agent uploads that to Azlinux Kusto DB
# take only the last 100 characters as extension cuts off after that
try:
if exit_code != 0:
sys.stderr.write(output[-500:])
if "Permission denied" in output:
# Enable failures
# https://github.com/Azure/azure-marketplace/wiki/Extension-Build-Notes-Best-Practices#error-codes-and-messages-output-to-stderr
exit_code = 52
except:
hutil_log_info('Failed to write output to STDERR')
return exit_code, output
def run_command_with_retries_output(cmd, retries, retry_check, final_check = None,
check_error = True, log_cmd = True,
initial_sleep_time = InitialRetrySleepSeconds,
sleep_increase_factor = 1):
"""
Caller provides a method, retry_check, to use to determine if a retry
should be performed. This must be a function with two parameters:
exit_code and output
The final_check can be provided as a method to perform a final check after
retries have been exhausted
Logic used: will retry up to retries times with initial_sleep_time in
between tries
If the retry_check retuns True for retry_verbosely, we will try cmd with
the standard -v verbose flag added
"""
try_count = 0
sleep_time = initial_sleep_time
run_cmd = cmd
run_verbosely = False
while try_count <= retries:
if run_verbosely:
run_cmd = cmd + ' -v'
exit_code, output = run_command_and_log(run_cmd, check_error, log_cmd)
should_retry, retry_message, run_verbosely = retry_check(exit_code,
output)
if not should_retry:
break
try_count += 1
hutil_log_info(retry_message)
time.sleep(sleep_time)
sleep_time *= sleep_increase_factor
if final_check is not None:
exit_code = final_check(exit_code, output)
return exit_code, output
def is_dpkg_locked(exit_code, output):
"""
If dpkg is locked, the output will contain a message similar to 'dpkg
status database is locked by another process'
"""
if exit_code != 0:
dpkg_locked_search = r'^.*dpkg.+lock.*$'
dpkg_locked_re = re.compile(dpkg_locked_search, re.M)
if dpkg_locked_re.search(output):
return True
return False
def retry_if_dpkg_locked(exit_code, output):
"""
Some commands fail because the package manager is locked (apt-get/dpkg
only); this will allow retries on failing commands.
"""
retry_verbosely = False
dpkg_locked = is_dpkg_locked(exit_code, output)
apt_get_exit_code, apt_get_output = run_get_output('which apt-get',
chk_err = False,
log_cmd = False)
if dpkg_locked:
return True, 'Retrying command because package manager is locked.', \
retry_verbosely
else:
return False, '', False
def final_check_if_dpkg_locked(exit_code, output):
"""
If dpkg is still locked after the retries, we want to return a specific
error code
"""
dpkg_locked = is_dpkg_locked(exit_code, output)
if dpkg_locked:
exit_code = DPKGLockedErrorCode
return exit_code
def get_settings():
"""
Retrieve the configuration for this extension operation
"""
global SettingsDict
public_settings = None
protected_settings = None
if HUtilObject is not None:
public_settings = HUtilObject.get_public_settings()
protected_settings = HUtilObject.get_protected_settings()
elif SettingsDict is not None:
public_settings = SettingsDict['public_settings']
protected_settings = SettingsDict['protected_settings']
else:
SettingsDict = {}
handler_env = get_handler_env()
try:
config_dir = str(handler_env['handlerEnvironment']['configFolder'])
except:
config_dir = os.path.join(os.getcwd(), 'config')
seq_no = get_latest_seq_no()
settings_path = os.path.join(config_dir, '{0}.settings'.format(seq_no))
try:
with open(settings_path, 'r') as settings_file:
settings_txt = settings_file.read()
settings = json.loads(settings_txt)
h_settings = settings['runtimeSettings'][0]['handlerSettings']
public_settings = h_settings['publicSettings']
SettingsDict['public_settings'] = public_settings
except:
hutil_log_error('Unable to load handler settings from ' \
'{0}'.format(settings_path))
if ('protectedSettings' in h_settings
and 'protectedSettingsCertThumbprint' in h_settings
and h_settings['protectedSettings'] is not None
and h_settings['protectedSettingsCertThumbprint'] is not None):
encoded_settings = h_settings['protectedSettings']
settings_thumbprint = h_settings['protectedSettingsCertThumbprint']
encoded_cert_path = os.path.join('/var/lib/waagent',
'{0}.crt'.format(
settings_thumbprint))
encoded_key_path = os.path.join('/var/lib/waagent',
'{0}.prv'.format(
settings_thumbprint))
decoded_settings = base64.standard_b64decode(encoded_settings)
decrypt_cmd = 'openssl smime -inform DER -decrypt -recip {0} ' \
'-inkey {1}'.format(encoded_cert_path,
encoded_key_path)
try:
session = subprocess.Popen([decrypt_cmd], shell = True,
stdin = subprocess.PIPE,
stderr = subprocess.STDOUT,
stdout = subprocess.PIPE)
output = session.communicate(decoded_settings)
except OSError:
pass
protected_settings_str = output[0]
if protected_settings_str is None:
log_and_exit('Enable', 1, 'Failed decrypting ' \
'protectedSettings')
protected_settings = ''
try:
protected_settings = json.loads(protected_settings_str)
except:
hutil_log_error('JSON exception decoding protected settings')
SettingsDict['protected_settings'] = protected_settings
return public_settings, protected_settings
def update_status_file(operation, exit_code, exit_status, message):
"""
Mimic HandlerUtil method do_status_report in case hutil method is not
available
Write status to status file
"""
handler_env = get_handler_env()
try:
extension_version = str(handler_env['version'])
status_dir = str(handler_env['handlerEnvironment']['statusFolder'])
except:
extension_version = "1.0"
status_dir = os.path.join(os.getcwd(), 'status')
status_txt = [{
"version" : extension_version,
"timestampUTC" : time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()),
"status" : {
"name" : "Microsoft.Azure.Monitor.AzureMonitorLinuxAgent",
"operation" : operation,
"status" : exit_status,
"code" : exit_code,
"formattedMessage" : {
"lang" : "en-US",
"message" : message
}
}
}]
status_json = json.dumps(status_txt)
# Find the most recently changed config file and then use the
# corresponding status file
latest_seq_no = get_latest_seq_no()
status_path = os.path.join(status_dir, '{0}.status'.format(latest_seq_no))
status_tmp = '{0}.tmp'.format(status_path)
with open(status_tmp, 'w+') as tmp_file:
tmp_file.write(status_json)
os.rename(status_tmp, status_path)
def get_handler_env():
"""
Set and retrieve the contents of HandlerEnvironment.json as JSON
"""
global HandlerEnvironment
if HandlerEnvironment is None:
handler_env_path = os.path.join(os.getcwd(), 'HandlerEnvironment.json')
try:
with open(handler_env_path, 'r') as handler_env_file:
handler_env_txt = handler_env_file.read()
handler_env = json.loads(handler_env_txt)
if type(handler_env) == list:
handler_env = handler_env[0]
HandlerEnvironment = handler_env
except Exception as e:
waagent_log_error(str(e))
return HandlerEnvironment
def get_latest_seq_no():
"""
Determine the latest operation settings number to use
"""
global SettingsSequenceNumber
if SettingsSequenceNumber is None:
handler_env = get_handler_env()
try:
config_dir = str(handler_env['handlerEnvironment']['configFolder'])
except:
config_dir = os.path.join(os.getcwd(), 'config')
latest_seq_no = -1
cur_seq_no = -1
latest_time = None
try:
for dir_name, sub_dirs, file_names in os.walk(config_dir):
for file_name in file_names:
file_basename = os.path.basename(file_name)
match = re.match(r'[0-9]{1,10}\.settings', file_basename)
if match is None:
continue
cur_seq_no = int(file_basename.split('.')[0])
file_path = os.path.join(config_dir, file_name)
cur_time = os.path.getmtime(file_path)
if latest_time is None or cur_time > latest_time:
latest_time = cur_time
latest_seq_no = cur_seq_no
except:
pass
if latest_seq_no < 0:
latest_seq_no = 0
SettingsSequenceNumber = latest_seq_no
return SettingsSequenceNumber
def run_get_output(cmd, chk_err = False, log_cmd = True):
"""
Mimic waagent mothod RunGetOutput in case waagent is not available
Run shell command and return exit code and output
"""
if 'Utils.WAAgentUtil' in sys.modules:
# WALinuxAgent-2.0.14 allows only 2 parameters for RunGetOutput
# If checking the number of parameters fails, pass 2
try:
sig = inspect.signature(waagent.RunGetOutput)
params = sig.parameters
waagent_params = len(params)
except:
try:
spec = inspect.getargspec(waagent.RunGetOutput)
params = spec.args
waagent_params = len(params)
except:
waagent_params = 2
if waagent_params >= 3:
exit_code, output = waagent.RunGetOutput(cmd, chk_err, log_cmd)
else:
exit_code, output = waagent.RunGetOutput(cmd, chk_err)
else:
try:
output = subprocess.check_output(cmd, stderr = subprocess.STDOUT,
shell = True)
exit_code = 0
except subprocess.CalledProcessError as e:
exit_code = e.returncode
output = e.output
output = output.encode('utf-8')
# On python 3, encode returns a byte object, so we must decode back to a string
if sys.version_info >= (3,):
output = output.decode('utf-8', 'ignore')
return exit_code, output.strip()
def init_waagent_logger():
"""
Initialize waagent logger
If waagent has not been imported, catch the exception
"""
try:
waagent.LoggerInit('/var/log/waagent.log', '/dev/stdout', True)
except Exception as e:
print('Unable to initialize waagent log because of exception ' \
'{0}'.format(e))
def waagent_log_info(message):
"""
Log informational message, being cautious of possibility that waagent may
not be imported
"""
if 'Utils.WAAgentUtil' in sys.modules:
waagent.Log(message)
else:
print('Info: {0}'.format(message))
def waagent_log_error(message):
"""
Log error message, being cautious of possibility that waagent may not be
imported
"""
if 'Utils.WAAgentUtil' in sys.modules:
waagent.Error(message)
else:
print('Error: {0}'.format(message))
def hutil_log_info(message):
"""
Log informational message, being cautious of possibility that hutil may
not be imported and configured
"""
if HUtilObject is not None:
HUtilObject.log(message)
else:
print('Info: {0}'.format(message))
def hutil_log_error(message):
"""
Log error message, being cautious of possibility that hutil may not be
imported and configured
"""
if HUtilObject is not None:
HUtilObject.error(message)
else:
print('Error: {0}'.format(message))
def log_and_exit(operation, exit_code = 1, message = ''):
"""
Log the exit message and perform the exit
"""
if exit_code == 0:
waagent_log_info(message)
hutil_log_info(message)
exit_status = 'success'
else:
waagent_log_error(message)
hutil_log_error(message)
exit_status = 'failed'
if HUtilObject is not None:
HUtilObject.do_exit(exit_code, operation, exit_status, str(exit_code),
message)
else:
update_status_file(operation, str(exit_code), exit_status, message)
sys.exit(exit_code)
# Exceptions
# If these exceptions are expected to be caught by the main method, they
# include an error_code field with an integer with which to exit from main
class AzureMonitorAgentForLinuxException(Exception):
"""
Base exception class for all exceptions; as such, its error code is the
basic error code traditionally returned in Linux: 1
"""
error_code = 1
def get_error_message(self, operation):
"""
Return a descriptive error message based on this type of exception
"""
return '{0} failed with exit code {1}'.format(operation,
self.error_code)
class ParameterMissingException(AzureMonitorAgentForLinuxException):
"""
There is a missing parameter for the AzureMonitorLinuxAgent Extension
"""
error_code = MissingorInvalidParameterErrorCode
def get_error_message(self, operation):
return '{0} failed due to a missing parameter: {1}'.format(operation,
self)
if __name__ == '__main__' :
main()
|
test.py
|
from iUSBDAQ import iUSBDAQ
import numpy as np
import matplotlib.pyplot as plt
from threading import Thread, Lock, Event
import time
bufflen = 1000
buff = np.zeros((bufflen, NCHANNELS), np.float)
buffidx = 0
def read(lock, stop):
global buff, buffidx
dev = iUSBDAQ(0)
dev.AIStartStream()
while not stop.isSet():
dev.AIGetScans()
with lock:
buff[buffidx,:] = dev.buff[-NCHANNELS:]
buffidx = (buffidx + 1) % bufflen
dev.AIStopStream()
def draw(lock, stop):
plt.ion()
plt.figure()
lines = plt.plot(buff)
# ratio = plt.plot(buff[:,1])[0]
bar = plt.plot([0,0],[0,5])[0]
# plt.ylim([2.5,2.6])
plt.ylim([0,5])
plt.ylabel('Voltage')
while not stop.isSet():
with lock:
for i, line in enumerate(lines):
line.set_ydata(buff[:,i])
# ratio.set_ydata(buff[:,1] * 2.533 / buff[:,2])
bar.set_xdata([buffidx, buffidx])
# rat = buff[:,1] + 2.580 - buff[:,2]
rat = buff[:,1]
avg = np.mean(rat)
rmse = np.sqrt(np.mean(np.power(rat - avg, 2)))
vpp = np.max(rat) - np.min(rat)
print avg, rmse, vpp
plt.draw()
plt.close()
if __name__ == '__main__':
assert iUSBDAQ.EnumerateDev() >= 1
lock = Lock()
stop = Event()
reader = Thread(target=read, args=(lock, stop))
drawer = Thread(target=draw, args=(lock, stop))
reader.start()
drawer.start()
time.sleep(30)
stop.set()
reader.join()
drawer.join()
|
utils.py
|
"""Utilities shared by tests."""
import asyncio
import collections
import contextlib
import io
import logging
import os
import re
import selectors
import socket
import socketserver
import sys
import tempfile
import threading
import time
import unittest
import weakref
from unittest import mock
from http.server import HTTPServer
from wsgiref.simple_server import WSGIRequestHandler, WSGIServer
try:
import ssl
except ImportError: # pragma: no cover
ssl = None
from asyncio import base_events
from asyncio import events
from asyncio import format_helpers
from asyncio import futures
from asyncio import tasks
from asyncio.log import logger
from test import support
def data_file(filename):
if hasattr(support, 'TEST_HOME_DIR'):
fullname = os.path.join(support.TEST_HOME_DIR, filename)
if os.path.isfile(fullname):
return fullname
fullname = os.path.join(os.path.dirname(__file__), '..', filename)
if os.path.isfile(fullname):
return fullname
raise FileNotFoundError(filename)
ONLYCERT = data_file('ssl_cert.pem')
ONLYKEY = data_file('ssl_key.pem')
SIGNED_CERTFILE = data_file('keycert3.pem')
SIGNING_CA = data_file('pycacert.pem')
PEERCERT = {
'OCSP': ('http://testca.pythontest.net/testca/ocsp/',),
'caIssuers': ('http://testca.pythontest.net/testca/pycacert.cer',),
'crlDistributionPoints': ('http://testca.pythontest.net/testca/revocation.crl',),
'issuer': ((('countryName', 'XY'),),
(('organizationName', 'Python Software Foundation CA'),),
(('commonName', 'our-ca-server'),)),
'notAfter': 'Jul 7 14:23:16 2028 GMT',
'notBefore': 'Aug 29 14:23:16 2018 GMT',
'serialNumber': 'CB2D80995A69525C',
'subject': ((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'localhost'),)),
'subjectAltName': (('DNS', 'localhost'),),
'version': 3
}
def simple_server_sslcontext():
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
server_context.load_cert_chain(ONLYCERT, ONLYKEY)
server_context.check_hostname = False
server_context.verify_mode = ssl.CERT_NONE
return server_context
def simple_client_sslcontext(*, disable_verify=True):
client_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
client_context.check_hostname = False
if disable_verify:
client_context.verify_mode = ssl.CERT_NONE
return client_context
def dummy_ssl_context():
if ssl is None:
return None
else:
return ssl.SSLContext(ssl.PROTOCOL_TLS)
def run_briefly(loop):
async def once():
pass
gen = once()
t = loop.create_task(gen)
# Don't log a warning if the task is not done after run_until_complete().
# It occurs if the loop is stopped or if a task raises a BaseException.
t._log_destroy_pending = False
try:
loop.run_until_complete(t)
finally:
gen.close()
def run_until(loop, pred, timeout=30):
deadline = time.monotonic() + timeout
while not pred():
if timeout is not None:
timeout = deadline - time.monotonic()
if timeout <= 0:
raise futures.TimeoutError()
loop.run_until_complete(tasks.sleep(0.001))
def run_once(loop):
"""Legacy API to run once through the event loop.
This is the recommended pattern for test code. It will poll the
selector once and run all callbacks scheduled in response to I/O
events.
"""
loop.call_soon(loop.stop)
loop.run_forever()
class SilentWSGIRequestHandler(WSGIRequestHandler):
def get_stderr(self):
return io.StringIO()
def log_message(self, format, *args):
pass
class SilentWSGIServer(WSGIServer):
request_timeout = support.LOOPBACK_TIMEOUT
def get_request(self):
request, client_addr = super().get_request()
request.settimeout(self.request_timeout)
return request, client_addr
def handle_error(self, request, client_address):
pass
class SSLWSGIServerMixin:
def finish_request(self, request, client_address):
# The relative location of our test directory (which
# contains the ssl key and certificate files) differs
# between the stdlib and stand-alone asyncio.
# Prefer our own if we can find it.
context = ssl.SSLContext()
context.load_cert_chain(ONLYCERT, ONLYKEY)
ssock = context.wrap_socket(request, server_side=True)
try:
self.RequestHandlerClass(ssock, client_address, self)
ssock.close()
except OSError:
# maybe socket has been closed by peer
pass
class SSLWSGIServer(SSLWSGIServerMixin, SilentWSGIServer):
pass
def _run_test_server(*, address, use_ssl=False, server_cls, server_ssl_cls):
def loop(environ):
size = int(environ['CONTENT_LENGTH'])
while size:
data = environ['wsgi.input'].read(min(size, 0x10000))
yield data
size -= len(data)
def app(environ, start_response):
status = '200 OK'
headers = [('Content-type', 'text/plain')]
start_response(status, headers)
if environ['PATH_INFO'] == '/loop':
return loop(environ)
else:
return [b'Test message']
# Run the test WSGI server in a separate thread in order not to
# interfere with event handling in the main thread
server_class = server_ssl_cls if use_ssl else server_cls
httpd = server_class(address, SilentWSGIRequestHandler)
httpd.set_app(app)
httpd.address = httpd.server_address
server_thread = threading.Thread(
target=lambda: httpd.serve_forever(poll_interval=0.05))
server_thread.start()
try:
yield httpd
finally:
httpd.shutdown()
httpd.server_close()
server_thread.join()
if hasattr(socket, 'AF_UNIX'):
class UnixHTTPServer(socketserver.UnixStreamServer, HTTPServer):
def server_bind(self):
socketserver.UnixStreamServer.server_bind(self)
self.server_name = '127.0.0.1'
self.server_port = 80
class UnixWSGIServer(UnixHTTPServer, WSGIServer):
request_timeout = support.LOOPBACK_TIMEOUT
def server_bind(self):
UnixHTTPServer.server_bind(self)
self.setup_environ()
def get_request(self):
request, client_addr = super().get_request()
request.settimeout(self.request_timeout)
# Code in the stdlib expects that get_request
# will return a socket and a tuple (host, port).
# However, this isn't true for UNIX sockets,
# as the second return value will be a path;
# hence we return some fake data sufficient
# to get the tests going
return request, ('127.0.0.1', '')
class SilentUnixWSGIServer(UnixWSGIServer):
def handle_error(self, request, client_address):
pass
class UnixSSLWSGIServer(SSLWSGIServerMixin, SilentUnixWSGIServer):
pass
def gen_unix_socket_path():
with tempfile.NamedTemporaryFile() as file:
return file.name
@contextlib.contextmanager
def unix_socket_path():
path = gen_unix_socket_path()
try:
yield path
finally:
try:
os.unlink(path)
except OSError:
pass
@contextlib.contextmanager
def run_test_unix_server(*, use_ssl=False):
with unix_socket_path() as path:
yield from _run_test_server(address=path, use_ssl=use_ssl,
server_cls=SilentUnixWSGIServer,
server_ssl_cls=UnixSSLWSGIServer)
@contextlib.contextmanager
def run_test_server(*, host='127.0.0.1', port=0, use_ssl=False):
yield from _run_test_server(address=(host, port), use_ssl=use_ssl,
server_cls=SilentWSGIServer,
server_ssl_cls=SSLWSGIServer)
def make_test_protocol(base):
dct = {}
for name in dir(base):
if name.startswith('__') and name.endswith('__'):
# skip magic names
continue
dct[name] = MockCallback(return_value=None)
return type('TestProtocol', (base,) + base.__bases__, dct)()
class TestSelector(selectors.BaseSelector):
def __init__(self):
self.keys = {}
def register(self, fileobj, events, data=None):
key = selectors.SelectorKey(fileobj, 0, events, data)
self.keys[fileobj] = key
return key
def unregister(self, fileobj):
return self.keys.pop(fileobj)
def select(self, timeout):
return []
def get_map(self):
return self.keys
class TestLoop(base_events.BaseEventLoop):
"""Loop for unittests.
It manages self time directly.
If something scheduled to be executed later then
on next loop iteration after all ready handlers done
generator passed to __init__ is calling.
Generator should be like this:
def gen():
...
when = yield ...
... = yield time_advance
Value returned by yield is absolute time of next scheduled handler.
Value passed to yield is time advance to move loop's time forward.
"""
def __init__(self, gen=None):
super().__init__()
if gen is None:
def gen():
yield
self._check_on_close = False
else:
self._check_on_close = True
self._gen = gen()
next(self._gen)
self._time = 0
self._clock_resolution = 1e-9
self._timers = []
self._selector = TestSelector()
self.readers = {}
self.writers = {}
self.reset_counters()
self._transports = weakref.WeakValueDictionary()
def time(self):
return self._time
def advance_time(self, advance):
"""Move test time forward."""
if advance:
self._time += advance
def close(self):
super().close()
if self._check_on_close:
try:
self._gen.send(0)
except StopIteration:
pass
else: # pragma: no cover
raise AssertionError("Time generator is not finished")
def _add_reader(self, fd, callback, *args):
self.readers[fd] = events.Handle(callback, args, self, None)
def _remove_reader(self, fd):
self.remove_reader_count[fd] += 1
if fd in self.readers:
del self.readers[fd]
return True
else:
return False
def assert_reader(self, fd, callback, *args):
if fd not in self.readers:
raise AssertionError(f'fd {fd} is not registered')
handle = self.readers[fd]
if handle._callback != callback:
raise AssertionError(
f'unexpected callback: {handle._callback} != {callback}')
if handle._args != args:
raise AssertionError(
f'unexpected callback args: {handle._args} != {args}')
def assert_no_reader(self, fd):
if fd in self.readers:
raise AssertionError(f'fd {fd} is registered')
def _add_writer(self, fd, callback, *args):
self.writers[fd] = events.Handle(callback, args, self, None)
def _remove_writer(self, fd):
self.remove_writer_count[fd] += 1
if fd in self.writers:
del self.writers[fd]
return True
else:
return False
def assert_writer(self, fd, callback, *args):
assert fd in self.writers, 'fd {} is not registered'.format(fd)
handle = self.writers[fd]
assert handle._callback == callback, '{!r} != {!r}'.format(
handle._callback, callback)
assert handle._args == args, '{!r} != {!r}'.format(
handle._args, args)
def _ensure_fd_no_transport(self, fd):
if not isinstance(fd, int):
try:
fd = int(fd.fileno())
except (AttributeError, TypeError, ValueError):
# This code matches selectors._fileobj_to_fd function.
raise ValueError("Invalid file object: "
"{!r}".format(fd)) from None
try:
transport = self._transports[fd]
except KeyError:
pass
else:
raise RuntimeError(
'File descriptor {!r} is used by transport {!r}'.format(
fd, transport))
def add_reader(self, fd, callback, *args):
"""Add a reader callback."""
self._ensure_fd_no_transport(fd)
return self._add_reader(fd, callback, *args)
def remove_reader(self, fd):
"""Remove a reader callback."""
self._ensure_fd_no_transport(fd)
return self._remove_reader(fd)
def add_writer(self, fd, callback, *args):
"""Add a writer callback.."""
self._ensure_fd_no_transport(fd)
return self._add_writer(fd, callback, *args)
def remove_writer(self, fd):
"""Remove a writer callback."""
self._ensure_fd_no_transport(fd)
return self._remove_writer(fd)
def reset_counters(self):
self.remove_reader_count = collections.defaultdict(int)
self.remove_writer_count = collections.defaultdict(int)
def _run_once(self):
super()._run_once()
for when in self._timers:
advance = self._gen.send(when)
self.advance_time(advance)
self._timers = []
def call_at(self, when, callback, *args, context=None):
self._timers.append(when)
return super().call_at(when, callback, *args, context=context)
def _process_events(self, event_list):
return
def _write_to_self(self):
pass
def MockCallback(**kwargs):
return mock.Mock(spec=['__call__'], **kwargs)
class MockPattern(str):
"""A regex based str with a fuzzy __eq__.
Use this helper with 'mock.assert_called_with', or anywhere
where a regex comparison between strings is needed.
For instance:
mock_call.assert_called_with(MockPattern('spam.*ham'))
"""
def __eq__(self, other):
return bool(re.search(str(self), other, re.S))
class MockInstanceOf:
def __init__(self, type):
self._type = type
def __eq__(self, other):
return isinstance(other, self._type)
def get_function_source(func):
source = format_helpers._get_function_source(func)
if source is None:
raise ValueError("unable to get the source of %r" % (func,))
return source
class TestCase(unittest.TestCase):
@staticmethod
def close_loop(loop):
if loop._default_executor is not None:
if not loop.is_closed():
loop.run_until_complete(loop.shutdown_default_executor())
else:
loop._default_executor.shutdown(wait=True)
loop.close()
policy = support.maybe_get_event_loop_policy()
if policy is not None:
try:
watcher = policy.get_child_watcher()
except NotImplementedError:
# watcher is not implemented by EventLoopPolicy, e.g. Windows
pass
else:
if isinstance(watcher, asyncio.ThreadedChildWatcher):
threads = list(watcher._threads.values())
for thread in threads:
thread.join()
def set_event_loop(self, loop, *, cleanup=True):
assert loop is not None
# ensure that the event loop is passed explicitly in asyncio
events.set_event_loop(None)
if cleanup:
self.addCleanup(self.close_loop, loop)
def new_test_loop(self, gen=None):
loop = TestLoop(gen)
self.set_event_loop(loop)
return loop
def unpatch_get_running_loop(self):
events._get_running_loop = self._get_running_loop
def setUp(self):
self._get_running_loop = events._get_running_loop
events._get_running_loop = lambda: None
self._thread_cleanup = support.threading_setup()
def tearDown(self):
self.unpatch_get_running_loop()
events.set_event_loop(None)
# Detect CPython bug #23353: ensure that yield/yield-from is not used
# in an except block of a generator
self.assertEqual(sys.exc_info(), (None, None, None))
self.doCleanups()
support.threading_cleanup(*self._thread_cleanup)
support.reap_children()
@contextlib.contextmanager
def disable_logger():
"""Context manager to disable asyncio logger.
For example, it can be used to ignore warnings in debug mode.
"""
old_level = logger.level
try:
logger.setLevel(logging.CRITICAL+1)
yield
finally:
logger.setLevel(old_level)
def mock_nonblocking_socket(proto=socket.IPPROTO_TCP, type=socket.SOCK_STREAM,
family=socket.AF_INET):
"""Create a mock of a non-blocking socket."""
sock = mock.MagicMock(socket.socket)
sock.proto = proto
sock.type = type
sock.family = family
sock.gettimeout.return_value = 0.0
return sock
|
ros2_raspicam_node.py
|
# Copyright 2021 Robert Adams
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import queue
import threading
import time
import traceback
import sys
import rclpy
from rclpy.parameter import Parameter
from rclpy.node import Node
from sensor_msgs.msg import Image, CompressedImage
from cv_bridge import CvBridge
import cv2
class ROS2_raspicam_node(Node):
def __init__(self):
super().__init__('ros2_raspicam_node', namespace='raspicam')
self.set_parameter_defaults( [
('compressed_image', Parameter.Type.BOOL, False),
('image_topic', Parameter.Type.STRING, 'raspicam_uncompressed'),
('image_topic_qos', Parameter.Type.INTEGER, 10),
('compressed_image_topic', Parameter.Type.STRING, 'raspicam_compressed'),
('compressed_image_topic_qos', Parameter.Type.INTEGER, 10),
('camera_index', Parameter.Type.INTEGER, 0),
('camera_frame_rate', Parameter.Type.INTEGER, 2),
('camera_image_width', Parameter.Type.INTEGER, 640),
('camera_image_height', Parameter.Type.INTEGER, 480),
# Saturation: -100..100, default 0
('camera_awb_mode', Parameter.Type.BOOL, True),
('camera_wb_temp', Parameter.Type.INTEGER, 2700),
# ('camera_wb_red', Parameter.Type.INTEGER, 128),
# ('camera_wb_green', Parameter.Type.INTEGER, 128),
# ('camera_wb_blue', Parameter.Type.INTEGER, 128),
# brightness: 1..100, default 50
('camera_brightness', Parameter.Type.INTEGER, 55),
# Contrast: -100..100, default 0
('camera_contrast', Parameter.Type.INTEGER, 0),
('camera_hflip', Parameter.Type.BOOL, False),
('camera_vflip', Parameter.Type.BOOL, True),
# ('camera_exif_copyright', Parameter.Type.STRING, 'Copyrightt 2018 MY NAME'),
# ('camera_user_comment', Parameter.Type.STRING, 'SOMETHING INFORMATIVE'),
# Exposure compenstation: -25..25, default 0, one step = 1/6 F-stop
# ('camera_exposure_compenstation', Parameter.Type.INTEGER, 0),
# off, auto, night, backlight, spotlight, sports, snow, beach, antishake, fireworks
# ('camera_exposure_mode', Parameter.Type.STRING, 'auto'),
# the camera is upside down in initial setup
# ('camera_hflip', Parameter.Type.BOOL, True),
# ('camera_vflip', Parameter.Type.BOOL, True),
# 'none', 'negative', 'solarize', 'sketch', 'denoise', 'emboss', 'oilpaint',
# 'hatch', 'gpen', 'pastel', 'watercolor', 'film', 'blur', 'saturation',
# 'colorswap', 'washedout', 'posterise', 'colorpoint', 'colorbalance', 'cartoon', 'deinterlace1',
# 'deinterlace2'
# ('camera_image_effect', Parameter.Type.STRING, 'none'),
# 'average' 'spot' 'backlit' 'matrix'
# ('camera_meter_mode', Parameter.Type.STRING, 'average'),
# 640/480, 800/600, 1280/720
# ('camera_saturation', Parameter.Type.INTEGER, 0),
# Sharpness: -100..100, default 0
# ('camera_sharpness', Parameter.Type.INTEGER, 10),
] )
self.keepRunning = True
self.camera = cv2.VideoCapture(self.get_parameter_value('camera_index'))
time.sleep(1); # let camera initialization complete
self.bridge = CvBridge()
self.initialize_publisher()
self.set_camera_parameters()
self.initialize_capture_queue()
def destroy_node(self):
# overlay Node function called when class is being stopped and camera needs closing
# if hasattr(self, 'publisher') and self.publisher != None:
# # nothing to do
if hasattr(self, 'camera') and self.camera != None:
self.camera.release()
super().destroy_node()
def initialize_publisher(self):
if self.get_parameter_value('compressed_image'):
self.publisher = self.create_publisher(CompressedImage,
self.get_parameter_value('compressed_image_topic'),
self.get_parameter_value('compressed_image_topic_qos') )
else:
self.publisher = self.create_publisher(Image,
self.get_parameter_value('image_topic'),
self.get_parameter_value('image_topic_qos') )
self.frame_num = 0
def set_camera_parameters(self):
self.camera.set(cv2.CAP_PROP_CONVERT_RGB, 1.0)
self.camera.set(cv2.CAP_PROP_FPS, self.get_parameter_value('camera_frame_rate'))
self.camera.set(cv2.CAP_PROP_FRAME_WIDTH, self.get_parameter_value('camera_image_width'))
self.camera.set(cv2.CAP_PROP_FRAME_HEIGHT, self.get_parameter_value('camera_image_height'))
#if self.get_parameter_value('camera_awb_mode'):
# self.camera.set(cv2.CAP_PROP_AUTO_WB, 1.0)
#else:
# self.camera.set(cv2.CAP_PROP_AUTO_WB, 0.0)
# self.camera.set(cv2.CAP_PROP_TEMPERATURE, self.get_parameter_value('camera_temp'))
#self.camera.set(cv2.CAP_PROP_CONTRAST, self.get_parameter_value('camera_contrast'))
#self.camera.set(cv2.CAP_PROP_BRIGHTNESS, self.get_parameter_value('camera_brightness'))
# self.camera.brightness = self.get_parameter_value('camera_brightness')
# self.camera.contrast = self.get_parameter_value('camera_contrast')
# if self.has_parameter('camera_exif_copyright'):
# self.camera.exif_tage['IFDO.Copyright'] = self.get_parameter_value('camera_exif_copyright')
# if self.has_parameter('camera_exif_user_comment'):
# self.camera.exif_tage['EXIF.UserComment'] = self.get_parameter_value('camera_exif_user_comment')
# self.camera.exposure_compensation = self.get_parameter_value('camera_exposure_compenstation')
# self.camera.exposure_mode = self.get_parameter_value('camera_exposure_mode')
# self.camera.hflip = self.get_parameter_value('camera_hflip')
# self.camera.vflip = self.get_parameter_value('camera_vflip')
# self.camera.image_effect = self.get_parameter_value('camera_image_effect')
# self.camera.meter_mode = self.get_parameter_value('camera_meter_mode')
# self.image_width = self.get_parameter_value('camera_image_width')
# self.image_height = self.get_parameter_value('camera_image_height')
# self.camera.resolution = ( self.image_width, self.image_height )
# self.get_logger().debug('CAM: setting capture resolution = %s/%s'
# % (self.camera.resolution[0], self.camera.resolution[1]))
# self.camera.saturation = self.get_parameter_value('camera_saturation')
# self.camera.sharpness = self.get_parameter_value('camera_sharpness')
def initialize_capture_queue(self):
# Create a queue and two threads to capture and then push the images to the topic
self.queue_lock = threading.Lock()
self.capture_queue = queue.Queue()
# self.capture_queue = queue.SimpleQueue() # introduced in Python 3.7
# thread to capture camera images and place in queue
self.capture_event = threading.Event()
self.capturer_thread = threading.Thread(target=self.take_pictures, name='capturer')
# thread to read queue and send them to the topic
self.publisher_event = threading.Event()
self.publisher_thread = threading.Thread(target=self.publish_images, name='publisher')
self.capturer_thread.start()
self.publisher_thread.start()
def stop_workers(self):
# if workers are initialized and running, tell them to stop and wait until stopped
if hasattr(self, 'capture_event') and self.capture_event != None:
self.capture_event.set()
if hasattr(self, 'publisher_event') and self.publisher_event != None:
self.publisher_event.set()
if hasattr(self, 'publisher_thread') and self.publisher_thread.is_alive():
self.publisher_thread.join()
if hasattr(self, 'capturer_thread') and self.capturer_thread.is_alive():
self.capturer_thread.join()
def take_pictures(self):
# Take compressed images and put into the queue.
# 'jpeg', 'rgb'
# https://docs.opencv.org/3.4/d8/dfe/classcv_1_1VideoCapture.html
try:
while self.keepRunning:
ret, frame = self.camera.read()
if ret == True:
# Flip the image if requested
if self.get_parameter_value('camera_vflip'):
if self.get_parameter_value('camera_hflip'):
frame = cv2.flip(frame, -1)
else:
frame = cv2.flip(frame, 0)
else:
if self.get_parameter_value('camera_hflip'):
frame = cv2.flip(frame, 1)
if self.get_parameter_value('compressed_image'):
result, encimg = cv2.imencode('.jpg', frame)
if result == True:
self.write_compressed_capture(encimg, 'jpeg')
else:
# encimg = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
# self.write_capture(encimg, 'rgb8')
self.write_capture(frame, 'bgr8')
time.sleep(0.5)
except Exception as err:
self.get_logger().error('take_pictures: exiting take_pictures because of exception')
self.get_logger().error(traceback.format_exc())
def write_capture(self, frame, fmt):
with self.queue_lock:
msg = self.bridge.cv2_to_imgmsg(frame, fmt)
# msg.encoding = fmt
# msg.height = self.get_parameter_value('camera_image_height')
# msg.width = self.get_parameter_value('camera_image_width')
# msg.step = 3 * msg.width
msg.header.frame_id = str(self.frame_num)
self.frame_num += 1
self.get_logger().debug('write_capture: capture frame. size=%s, frame=%s'
% (len(frame), msg.header.frame_id) )
# msg.header.stamp = time.Time
self.capture_queue.put(msg)
def write_compressed_capture(self, frame, fmt):
with self.queue_lock:
msg = CompressedImage()
msg.data = np.array(frame).tostring()
msg.format = fmt
msg.header.frame_id = str(self.frame_num)
self.frame_num += 1
self.get_logger().debug('write_compressed_capture: capture frame. size=%s, frame=%s'
% (len(frame), msg.header.frame_id) )
# msg.header.stamp = time.Time
self.capture_queue.put(msg)
def publish_images(self):
# Loop reading from capture queue and send to ROS topic
while True:
if self.publisher_event.is_set():
break
try:
msg = self.capture_queue.get(block=True, timeout=2)
except queue.Empty:
msg = None
if self.publisher_event.is_set():
break
if msg != None:
self.get_logger().debug('CAM: sending frame. frame=%s'
% (msg.header.frame_id) )
self.publisher.publish(msg)
def get_parameter_value(self, param):
# Helper function to return value of a parameter
ret = None
param_desc = self.get_parameter(param)
if param_desc.type_== Parameter.Type.NOT_SET:
raise Exception('Fetch of parameter that does not exist: ' + param)
else:
ret = param_desc.value
return ret
def set_parameter_defaults(self, params):
# If a parameter has not been set externally, set the value to a default.
# Passed a list of "(parameterName, parameterType, defaultValue)" tuples.
for (pparam, ptype, pdefault) in params:
if not self.has_parameter(pparam):
self.declare_parameter(pparam, pdefault)
def parameter_set_if_set(self, param, set_function):
# If there is a parameter set, do set_function with the value
if self.has_parameter(param):
set_function(self.get_parameter_value(param))
def main(args=None):
rclpy.init(args=args)
camNode = ROS2_raspicam_node()
try:
rclpy.spin(camNode)
except KeyboardInterrupt:
camNode.get_logger().info('CAM: Keyboard interrupt')
camNode.keepRunning = False
camNode.stop_workers()
camNode.destroy_node()
rclpy.shutdown()
if __name__ == '__main__':
main()
|
Main.py
|
from time import sleep
import json
import os
import psutil
import datetime
import subprocess
import threading
import shutil
from pysteamcmdwrapper import SteamCMD, SteamCMDException
# Clear Function for CMD
def clear(): return os.system('cls')
# This Loads the Config.json
f = open("config.json", 'r+')
config = json.load(f)
Max_Ram = config['max_ram']
Server_Path = config['Path_To_Server']
Max_System_Ram = config['max_system_ram']
Server_Folder = Server_Path.strip('deadmatterServer.exe')
SteamCMDInstall = config['Steam_CMD_Path']
RamRefresh = config['RamRefresh']
AutoUpdate = config['AutoUpdate']
Ram_Refresh_Timer = config['Ram_Clean_Timer']
Server_Check_Timer = config['Server_Check_Timer']
Skip_Menu = config['Skip_Window_To_Monitor']
Auto_Backup = config['Auto_Backups']
Auto_Backup_Timer = config['Auto_Backup_Time']
SteamUser = config['SteamUsername']
SteamPass = config['SteamPassword']
# Global Vars
PID = 0
PID_Fallback = ''
NAME = ""
mem_per = 0
system_per = 0
# Logging Function
def logging(content):
logfile = open('server_perf_log.txt', 'a')
logfile.write(str(datetime.datetime.now())+'|' + content + '\n')
print(str(datetime.datetime.now())+'|' + content)
logfile.close()
# Checks the ram. Has a Failsafe implemented
def checkram():
global PID, NAME, mem_per, PID_Fallback, system_per
try:
mem_per = 0
system_per = 0
PID_Fallback = ''
# Attempt at detecting application memory usage
for process in psutil.process_iter():
if 'deadmatterServer-Win64-Shipping.exe' in str(process):
PID = process.pid
NAME = process.name()
mem_per = round(psutil.Process(PID).memory_percent(), 2)
system_per = round(psutil.virtual_memory().percent, 2)
PID_Fallback = ''
break
if mem_per == 0 or PID_Fallback == 'XXXX':
PID_Fallback = 'XXXX'
NAME = 'Connection Error'
system_per = round(psutil.virtual_memory().percent, 2)
except Exception as ex:
print(str(ex))
mem_per = 0
PID_Fallback = 'XXXX'
NAME = 'Connection Error'
system_per = round(psutil.virtual_memory().percent, 2)
# Checks Current Ram usage to Preset Cap
def check_restart():
global mem_per, system_per, PID_Fallback
try:
# Nomral Restart
if mem_per > Max_Ram:
logging(f'Max Ram Met. Current Ram:{mem_per}% Server Restarting.')
mem_per = 0
system_per = 0
PID_Fallback = ''
os.system("TASKKILL /F /IM deadmatterServer-Win64-Shipping.exe")
elif system_per > Max_System_Ram:
logging(
f'Max System Ram Met. Current System Ram:{system_per}% Server Restarting.')
mem_per = 0
system_per = 0
PID_Fallback = ''
os.system("TASKKILL /F /IM deadmatterServer-Win64-Shipping.exe")
except:
pass
# Checks if Server is open
def process_exists(process_name):
call = 'TASKLIST', '/FI', 'imagename eq %s' % process_name
# use buildin check_output right away
output = subprocess.check_output(call).decode(
encoding="utf8", errors='ignore')
# check in last line for process name
last_line = output.strip().split('\r\n')[-1]
# because Fail message could be translated
return last_line.lower().startswith(process_name.lower())
def Auto_Restart():
while 1:
try:
# Opens Server
if mem_per < Max_Ram and process_exists('deadmatterServer.exe') is False and system_per < Max_System_Ram:
logging('Server not found. Starting Server.')
subprocess.Popen([Server_Path, "-log"])
# Normal Logging
elif process_exists('deadmatterServer.exe') is True and PID_Fallback != 'XXXX':
logging(
f'Monitoring:{NAME} | PID:{PID} | Current Ram Usage:{mem_per}% | Server Ram Cutoff:{Max_Ram}% | System Ram Usage:{system_per}% | System Ram Cutoff:{Max_System_Ram}%')
# Fallback logging
elif process_exists('deadmatterServer.exe') is True and PID_Fallback == 'XXXX':
logging(
f'USING FALLBACK| Monitoring:{NAME} | PID:{PID} | Current Ram Usage:{mem_per}% | Server Ram Cutoff:{Max_Ram}% | System Ram Usage:{system_per}% | System Ram Cutoff:{Max_System_Ram}%')
checkram()
check_restart()
sleep(Server_Check_Timer)
except:
pass
def Ram_Cleaner():
while 1:
try:
os.startfile('RamCleaner.bat')
logging('Cleaned Ram')
sleep(Ram_Refresh_Timer)
except:
logging('Error Cleaning Ram')
def steaminstall(auto_update):
try:
try:
os.mkdir('steam')
except:
pass
steam = SteamCMD("steam")
steam.install()
except SteamCMDException:
pass
try:
if auto_update is False:
dirpath = input(
'Enter Path to Server Directory (Leave blank for config.json):')
if dirpath == '':
dirpath = Server_Folder
elif auto_update is True:
dirpath = Server_Folder
# Checks for login credentials
if SteamUser != "" and SteamPass != "":
steam.login(SteamUser, SteamPass)
else:
steam.login()
try:
os.mkdir(dirpath + '/BACKUP_FILES')
except:
pass
print('BACKING UP FILES')
for filename in os.listdir(dirpath + 'deadmatter/Saved/Config/WindowsServer'):
original = dirpath + 'deadmatter/Saved/Config/WindowsServer/' + filename
copy = dirpath + '/BACKUP_FILES/' + filename
shutil.copyfile(original, copy)
print('Backed up config files into BACKUP_FILES folder.')
steam.app_update(1110990, dirpath, validate=True)
print('Installed Dead Matter Dedicated Server.')
except Exception as ex:
print(f'Error: {str(ex)}')
menu()
def existingsteam(steampath):
try:
steam = SteamCMD(steampath)
if SteamUser != "" and SteamPass != "":
steam.login(SteamUser, SteamPass)
else:
steam.login()
dirpath = input(
'Enter Path to Server Directory (Leave blank for config.json):')
if dirpath == '':
dirpath = Server_Folder
try:
os.mkdir(dirpath + '/BACKUP_FILES')
except:
pass
for filename in os.listdir(dirpath + 'deadmatter/Saved/Config/WindowsServer'):
original = dirpath + 'deadmatter/Saved/Config/WindowsServer/' + filename
print(original)
copy = dirpath + '/BACKUP_FILES/' + filename
print(copy)
shutil.copyfile(original, copy)
steam.app_update(1110990, dirpath, validate=True)
print('Installed Dead Matter Dedicated Server.')
except:
print('Error Logging in.')
menu()
# Automatic Backup
def Auto_Backup():
try:
while 1:
now = datetime.datetime.now()
current_time = str(now.year) + '_' + str(now.month) + '_' + \
str(now.day) + '_' + str(now.hour) + '_' + str(now.minute)
dirpath = Server_Folder
try:
os.mkdir(dirpath + '/Save_Backups')
except:
pass
for filename in os.listdir(dirpath + 'deadmatter/Saved/sqlite3'):
original = dirpath + 'deadmatter/Saved/sqlite3/' + filename
copy = dirpath + '/Save_Backups/' + \
filename + f'_{current_time}_BACKUP'
shutil.copyfile(original, copy)
logging('Saved ServerDB Backup')
sleep(Auto_Backup_Timer)
except:
pass
# Menu Function
def menu():
clear()
if Skip_Menu is True:
threading.Thread(target=Auto_Restart).start()
checkram()
threading.Thread(target=Ram_Cleaner).start()
if Auto_Backup:
threading.Thread(target=Auto_Backup).start()
return
choice = input(
'DeadSplatter Menu\n1)Run Monitor\n2)Update / Install Server\nPlease Choose:')
if choice == '1':
threading.Thread(target=Auto_Restart).start()
checkram()
if RamRefresh:
threading.Thread(target=Ram_Cleaner).start()
if Auto_Backup:
threading.Thread(target=Auto_Backup).start()
print('Monitoring Started.')
elif choice == '2':
steaminput = input(
'SteamCMD Menu\n1)local Steamcmd Install(Will Install new if no steamcmd is installed)\n2)Existing SteamCMD Install (Must be set in config.json)\nPlease choose:')
if steaminput == '1':
steaminstall(False)
elif steaminput == '2':
steamdir = SteamCMDInstall
existingsteam(steamdir)
if __name__ == "__main__":
try:
if AutoUpdate is True:
auto_prompt = input(
'Auto Updated Enabled in config.json\nWould you like to try to update? y/n\nchoice:')
if auto_prompt == 'y' or 'Y':
steaminstall(True)
menu()
except Exception as ex:
print(f'Failure during startup. Please try again EX:{str(ex)}')
|
collisionsViewerClient.py
|
from multiprocessing import Process, Lock
from multiprocessing.sharedctypes import Value, Array
from ctypes import c_double
import matplotlib.pyplot as plt
import pinocchio as pin
import numpy as np
import time
# Helper functions
def visualizeCollisionDist(gv, p1, p2, name, color, init=False):
### --- display witness as normal patch tangent to capsule
if(init):
for i in range(2):
gv.addCylinder('world/pinocchio/collisions/simple_patch_' + name + '_%d'%i, .01, .003, color)
gv.addLine('world/pinocchio/collisions/line_' + name, p1.tolist(), p2.tolist(), color)
direc = (p2-p1)/np.linalg.norm(p2-p1)
M1 = pin.SE3(pin.Quaternion.FromTwoVectors(np.matrix([0,0,1]).T,p1-p2).matrix(),p1)
M2 = pin.SE3(pin.Quaternion.FromTwoVectors(np.matrix([0,0,1]).T,p2-p1).matrix(),p2)
gv.applyConfiguration('world/pinocchio/collisions/simple_patch_' + name + '_0',pin.SE3ToXYZQUATtuple(M1))
gv.applyConfiguration('world/pinocchio/collisions/simple_patch_' + name + '_1',pin.SE3ToXYZQUATtuple(M2))
gv.setLineExtremalPoints('world/pinocchio/collisions/line_' + name, p1.tolist(), p2.tolist())
gv.setColor('world/pinocchio/collisions/simple_patch_' + name + '_0', color)
gv.setColor('world/pinocchio/collisions/simple_patch_' + name + '_1', color)
gv.setColor('world/pinocchio/collisions/line_' + name, color)
gv.refresh()
def visualizePair(gv, rmodel, rdata, q, caps_frames, local_wpoints, color, world_frame=False, init=False):
pin.forwardKinematics(rmodel, rdata, q)
pin.updateFramePlacements(rmodel, rdata)
p0 = np.array(local_wpoints[0])
p1 = np.array(local_wpoints[1])
p0.resize(3,1)
p1.resize(3,1)
leg_seg0 = rmodel.getFrameId(caps_frames[0])
leg_seg1 = rmodel.getFrameId(caps_frames[1])
if(not world_frame):
p0 = rdata.oMf[leg_seg0].rotation@p0 + rdata.oMf[leg_seg0].translation
p1 = rdata.oMf[leg_seg1].rotation@p1 + rdata.oMf[leg_seg1].translation
else:
p0 = local_wpoints[0]
p1 = local_wpoints[1]
p0.resize(3)
p1.resize(3)
visualizeCollisionDist(gv, np.array(p0), np.array(p1), caps_frames[0] + '_' + caps_frames[1], color, init=init)
def visualizeCollisions(gv, rmodel, rdata, q, caps_frames_list, legs_dist_list, wpoints_list, viz_thresh, legs_activation_thresh, init=False):
for i in range(len(caps_frames_list)):
color = [0,0,0,0]
if(legs_dist_list[i]) < viz_thresh:
color = [0,1,0,1] if legs_dist_list[i] > legs_activation_thresh else [1,0,0,1]
visualizePair(gv, rmodel, rdata, q, caps_frames_list[i], wpoints_list[i], color, world_frame=False, init=init)
def visualizeTorques(gv, rmodel, rdata, tau_q, init=False):
solo12 = (len(tau_q) == 12)
for k in range(len(tau_q)):
jointFrame = rdata.oMi[k+2]
#jointFrame = rdata.oMi[k]
name = 'world/pinocchio/collisions/torque_' + str(k)
color = [0,0,1,1]
additional_transl = np.array([0,0,0.0])
if solo12:
if(k%3==0):
direc = [1,0,0] if tau_q[k]>0 else [-1,0,0]
additional_transl = np.array([0,0,0.05])
else:
direc = [0,1,0] if tau_q[k]>0 else [0,-1,0]
else:
direc = [0,1,0] if tau_q[k]>0 else [0,-1,0]
additional_transl.resize(3,1)
orientation = pin.SE3(pin.Quaternion.FromTwoVectors(np.matrix([1,0,0]).T,np.matrix(direc).T).matrix(),jointFrame.translation + additional_transl)
if(init):
gv.addArrow(name, 0.003, 1, color)
gv.resizeArrow(name, 0.003, np.abs(tau_q[k]))
gv.applyConfiguration(name, pin.SE3ToXYZQUATtuple(orientation))
def visualizeShoulderDist(q_shoulder, dist, shd_thresh):
plt.axis([-np.pi, np.pi, -np.pi, np.pi])
if dist < shd_thresh:
color = 'r'
else:
color = 'limegreen'
out = plt.scatter(q_shoulder[0][0], q_shoulder[1][0], c=color, alpha=1)
return out
def visualizeShoulderTorque(q_shoulder, dist, shd_thresh, shd_torque, scale=1.):
x_vals = [q_shoulder[0][0], q_shoulder[0][0] + scale*shd_torque[0]]
y_vals = [q_shoulder[1][0], q_shoulder[1][0] + scale*shd_torque[1]]
color = 'b'
if(len(shd_torque) == 3):
color = (0,0,0.5 + shd_torque[2])
if dist < shd_thresh:
t = plt.plot(x_vals, y_vals, c=color, linestyle='-')
else:
t = plt.plot(x_vals, y_vals, c=color, linestyle='-', alpha=0)
return t
def visualizeShoulderBackground(q, shd_dist_landscape, activation_thresh, dim=2):
shd_dist_landscape = 1*(shd_dist_landscape > 0) + 1*(shd_dist_landscape > activation_thresh)
shoulders_names = ['FL', 'FR', 'HL', 'HR']
shoulders_syms = [[1,1],[-1,1], [1,-1], [-1,-1]]
for k in range(4):
if dim==2:
local_dist_landscape = shd_dist_landscape.copy()
elif dim==3:
#ind = 0
ind = int(len(shd_dist_landscape)*((q[7 + k*3 + 2]%(2*np.pi))/(2*np.pi)))
local_dist_landscape = shd_dist_landscape[ind].copy()
if(shoulders_syms[k][0] == -1):
local_dist_landscape = np.flip(local_dist_landscape, axis = 1)
if(shoulders_syms[k][1] == -1):
local_dist_landscape = np.flip(local_dist_landscape, axis = 0)
plt.subplot(2,2,k+1)
#plt.imshow(shd_dist_landscape, extent=[-np.pi, np.pi, -np.pi, np.pi], cmap=plt.cm.gray)
plt.imshow(local_dist_landscape, extent=[-np.pi, np.pi, -np.pi, np.pi], cmap=plt.cm.afmhot)
def visualizeShouldersCollisions(qplots, line_plots, q, shd_dist, tau_shd, activation_thresh, dim=2, dt=0):
shoulders_names = ['FL', 'FR', 'HL', 'HR']
shoulders_syms = [[1,1],[-1,1], [1,-1], [-1,-1]]
for k in range(4):
plt.subplot(2,2,k+1)
#plt.imshow(shd_dist_landscape, extent=[-np.pi, np.pi, -np.pi, np.pi])
plt.title(shoulders_names[k] + '\nd = {:.3f}'.format(shd_dist[k]))
shd_torque = tau_shd[3*k:3*k+dim]
qplots[k].append(visualizeShoulderDist(q[7+3*k:7+3*k+2].tolist(), shd_dist[k], activation_thresh))
torque_line, = visualizeShoulderTorque(q[7+3*k:7+3*k+dim].tolist(), shd_dist[k], activation_thresh, shd_torque)
line_plots[k].append(torque_line)
if (len(qplots[k]) > 4):
qplots[k].pop(0).remove()
if (len(line_plots[k]) > 4):
line_plots[k].pop(0).remove()
plt.pause(dt)
class NonBlockingViewerFromRobot():
def __init__(self,robot,dt=0.01,nb_pairs=0, viz_thresh=0, act_thresh_legs=0, act_thresh_shd=0, shoulder_nn_dim=0):
# a shared c_double array
self.dt = dt
self.nb_pairs = nb_pairs
self.shoulder_nn_dim = shoulder_nn_dim
self.shared_q_viewer = Array(c_double, robot.nq, lock=False)
self.shared_tau = Array(c_double, robot.nq - 7, lock=False)
self.shared_tau_shd = Array(c_double, robot.nq - 7, lock=False)
self.shared_legs_dist = Array(c_double, nb_pairs, lock=False)
self.shared_shd_dist = Array(c_double, 4, lock=False)
self.shared_wpoints = []
for k in range(self.nb_pairs):
self.shared_wpoints.append([Array(c_double, 3), Array(c_double, 3)])
self.p = Process(target=self.display_process, args=(robot, self.shared_q_viewer, self.shared_wpoints, self.shared_legs_dist, self.shared_shd_dist, self.shared_tau, self.shared_tau_shd, viz_thresh, act_thresh_legs, act_thresh_shd, self.shoulder_nn_dim))
self.p.start()
def display_process(self,robot, shared_q_viewer, shared_wpoints, shared_legs_dist, shared_shd_dist, shared_tau, shared_tau_shd, viz_thresh, legs_activation_thresh, shd_activation_thresh, shoulder_nn_dim=0):
robot.displayVisuals(True)
robot.displayCollisions(True)
''' This will run on a different process'''
q_viewer = robot.q0.copy()
tau_q = np.zeros(robot.nq - 7)
tau_q_shd = np.zeros(robot.nq - 7)
legs_dist = np.zeros(self.nb_pairs)
shd_dist = np.zeros(4)
wpoints = [[[0,0,0],[0,0,0]]]*self.nb_pairs
gv = robot.viewer.gui
rmodel = robot.model
rdata = rmodel.createData()
if(len(tau_q)==8):
caps_frames_list = [["FL_UPPER_LEG", "HL_LOWER_LEG"],\
["FL_LOWER_LEG", "HL_UPPER_LEG"],
["FL_LOWER_LEG", "HL_LOWER_LEG"],
["FR_UPPER_LEG", "HR_LOWER_LEG"],
["FR_LOWER_LEG", "HR_UPPER_LEG"],
["FR_LOWER_LEG", "HR_LOWER_LEG"]]
else:
caps_frames_list = [["FL_UPPER_LEG", "FR_UPPER_LEG"],\
["FL_UPPER_LEG", "FR_LOWER_LEG"],
["FL_LOWER_LEG", "FR_UPPER_LEG"],
["FL_LOWER_LEG", "FR_LOWER_LEG"],
["FL_UPPER_LEG", "HL_LOWER_LEG"],
["FL_LOWER_LEG", "HL_UPPER_LEG"],
["FL_LOWER_LEG", "HL_LOWER_LEG"],
["FL_UPPER_LEG", "HR_LOWER_LEG"],
["FL_LOWER_LEG", "HR_UPPER_LEG"],
["FL_LOWER_LEG", "HR_LOWER_LEG"],
["FR_UPPER_LEG", "HL_LOWER_LEG"],
["FR_LOWER_LEG", "HL_UPPER_LEG"],
["FR_LOWER_LEG", "HL_LOWER_LEG"],
["FR_UPPER_LEG", "HR_LOWER_LEG"],
["FR_LOWER_LEG", "HR_UPPER_LEG"],
["FR_LOWER_LEG", "HR_LOWER_LEG"],
["HL_UPPER_LEG", "HR_UPPER_LEG"],
["HL_UPPER_LEG", "HR_LOWER_LEG"],
["HL_LOWER_LEG", "HR_UPPER_LEG"],
["HL_LOWER_LEG", "HR_LOWER_LEG"]]
count = 0
displayShoulder = (shoulder_nn_dim > 0)
if(displayShoulder):
plots = [[]]*4
line_plots = [[]]*4
plt.figure()
plt.suptitle("Shoulders distances")
#shd_dist_landscape = np.load('/home/ada/git/tnoel/solopython/coll_avoidance_modules/ref_net_dist_landscape.npy', allow_pickle=True)
shd_dist_landscape = np.load('/home/ada/git/tnoel/solopython/coll_avoidance_modules/ref_net3d_dist_landscape.npy', allow_pickle=True)
visualizeShoulderBackground(q_viewer, shd_dist_landscape, shd_activation_thresh, dim=3)
'''
plt.figure()
shd_dist_landscape = np.load('/home/tnoel/stage/solo-collisions/src/python/ref_net_dist_landscape.npy', allow_pickle=True)
plt.suptitle("Shoulders distances")
shd_dist_landscape = 1*(shd_dist_landscape > 0) + 1*(shd_dist_landscape > shd_activation_thresh)
shoulders_names = ['FL', 'FR', 'HL', 'HR']
shoulders_syms = [[1,1],[-1,1], [1,-1], [-1,-1]]
for k in range(4):
local_dist_landscape = shd_dist_landscape.copy()
if(shoulders_syms[k][0] == -1):
local_dist_landscape = np.flip(local_dist_landscape, axis = 1)
if(shoulders_syms[k][1] == -1):
local_dist_landscape = np.flip(local_dist_landscape, axis = 0)
plt.subplot(2,2,k+1)
#plt.imshow(shd_dist_landscape, extent=[-np.pi, np.pi, -np.pi, np.pi], cmap=plt.cm.gray)
plt.imshow(local_dist_landscape, extent=[-np.pi, np.pi, -np.pi, np.pi], cmap=plt.cm.afmhot)
#plt.show()
'''
while(1):
for n in gv.getNodeList():
if 'LEG_0' in n and 'collision' in n and len(n)>27:
gv.setColor(n, [1,0.5,0,0.1])
for i in range(robot.nq):
q_viewer[i] = shared_q_viewer[i]
for i in range(robot.nq - 7):
tau_q[i] = shared_tau[i]
tau_q_shd[i] = shared_tau_shd[i]
for i in range(self.nb_pairs):
wpoints[i] = shared_wpoints[i]
legs_dist[i] = shared_legs_dist[i]
for i in range(4):
shd_dist[i] = shared_shd_dist[i]
robot.display(q_viewer)
if(displayShoulder):
visualizeShoulderBackground(q_viewer, shd_dist_landscape, shd_activation_thresh, dim=shoulder_nn_dim)
visualizeShouldersCollisions(plots, line_plots, q_viewer, shd_dist, tau_q_shd, shd_activation_thresh, dim=shoulder_nn_dim, dt=self.dt)
visualizeCollisions(gv, rmodel, rdata, q_viewer, caps_frames_list, legs_dist, wpoints, viz_thresh, legs_activation_thresh, init=(count==0))
visualizeTorques(gv, rmodel, rdata, tau_q, init=(count==0))
gv.refresh()
time.sleep(self.dt)
count += 1
plt.show()
def display(self,q):
for i in range(len(self.shared_q_viewer)):
self.shared_q_viewer[i] = q[i]
def display_tau(self, tau):
for i in range(len(self.shared_tau)):
self.shared_tau[i] = tau[i]
def display_tau_shd(self, tau_shd):
for i in range(len(self.shared_tau_shd)):
self.shared_tau_shd[i] = tau_shd[i]
def display_legs_dist(self, legs_dist):
for i in range(len(self.shared_legs_dist)):
self.shared_legs_dist[i] = legs_dist[i]
def display_shd_dist(self, shd_dist):
for i in range(len(self.shared_shd_dist)):
self.shared_shd_dist[i] = shd_dist[i]
def updateWitnessPoints(self, wpoints):
for i in range(len(wpoints)):
for j in range(3):
self.shared_wpoints[i][0][j] = wpoints[i][0][j]
self.shared_wpoints[i][1][j] = wpoints[i][1][j]
def stop(self):
self.p.terminate()
self.p.join()
class viewerClient():
#def __init__(self,urdf="/opt/openrobots/lib/python3.5/site-packages/../../../share/example-robot-data/robots/solo_description/robots/solo.urdf",modelPath="/opt/openrobots/lib/python3.5/site-packages/../../../share/example-robot-data/robots",dt=0.01):
def __init__(self, nb_legs_pairs, nn_dim, legs_pairs_dist_threshold, shoulders_dist_threshold, urdf = "/opt/openrobots/share/example-robot-data/robots/solo_description/robots/solo.urdf", modelPath="/opt/openrobots/share/example-robot-data/robots/solo_description/robots/"):
pin.switchToNumpyMatrix()
robot = pin.RobotWrapper.BuildFromURDF( urdf, modelPath, pin.JointModelFreeFlyer())
robot.initViewer(loadModel=True)
if ('viewer' in robot.viz.__dict__):
robot.viewer.gui.setRefreshIsSynchronous(False)
dt = 0.01
self.nbv = NonBlockingViewerFromRobot(robot,dt, nb_pairs=nb_legs_pairs, viz_thresh=3*legs_pairs_dist_threshold, act_thresh_legs=legs_pairs_dist_threshold, act_thresh_shd=shoulders_dist_threshold, shoulder_nn_dim=nn_dim)
def display(self,q, legs_dist, shd_dist, wpoints, tau, tau_shd):
self.nbv.display(q)
self.nbv.display_tau(tau)
self.nbv.display_tau_shd(tau_shd)
self.nbv.display_legs_dist(legs_dist)
self.nbv.display_shd_dist(shd_dist)
self.nbv.updateWitnessPoints(wpoints)
def stop(self):
self.nbv.stop()
"""v=viewerClient()
from IPython import embed
embed()"""
|
fifo_queue_test.py
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.data_flow_ops.FIFOQueue."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import re
import time
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
class FIFOQueueTest(tf.test.TestCase):
def testConstructor(self):
with tf.Graph().as_default():
q = tf.FIFOQueue(10, tf.float32, name="Q")
self.assertTrue(isinstance(q.queue_ref, tf.Tensor))
self.assertEquals(tf.string_ref, q.queue_ref.dtype)
self.assertProtoEquals("""
name:'Q' op:'FIFOQueue'
attr { key: 'component_types' value { list { type: DT_FLOAT } } }
attr { key: 'shapes' value { list {} } }
attr { key: 'capacity' value { i: 10 } }
attr { key: 'container' value { s: '' } }
attr { key: 'shared_name' value { s: '' } }
""", q.queue_ref.op.node_def)
def testMultiQueueConstructor(self):
with tf.Graph().as_default():
q = tf.FIFOQueue(5, (tf.int32, tf.float32), shared_name="foo", name="Q")
self.assertTrue(isinstance(q.queue_ref, tf.Tensor))
self.assertEquals(tf.string_ref, q.queue_ref.dtype)
self.assertProtoEquals("""
name:'Q' op:'FIFOQueue'
attr { key: 'component_types' value { list {
type: DT_INT32 type : DT_FLOAT
} } }
attr { key: 'shapes' value { list {} } }
attr { key: 'capacity' value { i: 5 } }
attr { key: 'container' value { s: '' } }
attr { key: 'shared_name' value { s: 'foo' } }
""", q.queue_ref.op.node_def)
def testConstructorWithShapes(self):
with tf.Graph().as_default():
q = tf.FIFOQueue(5, (tf.int32, tf.float32),
shapes=(tf.TensorShape([1, 1, 2, 3]),
tf.TensorShape([5, 8])), name="Q")
self.assertTrue(isinstance(q.queue_ref, tf.Tensor))
self.assertEquals(tf.string_ref, q.queue_ref.dtype)
self.assertProtoEquals("""
name:'Q' op:'FIFOQueue'
attr { key: 'component_types' value { list {
type: DT_INT32 type : DT_FLOAT
} } }
attr { key: 'shapes' value { list {
shape { dim { size: 1 }
dim { size: 1 }
dim { size: 2 }
dim { size: 3 } }
shape { dim { size: 5 }
dim { size: 8 } }
} } }
attr { key: 'capacity' value { i: 5 } }
attr { key: 'container' value { s: '' } }
attr { key: 'shared_name' value { s: '' } }
""", q.queue_ref.op.node_def)
def testEnqueue(self):
with self.test_session():
q = tf.FIFOQueue(10, tf.float32)
enqueue_op = q.enqueue((10.0,))
enqueue_op.run()
def testEnqueueWithShape(self):
with self.test_session():
q = tf.FIFOQueue(10, tf.float32, shapes=(3, 2))
enqueue_correct_op = q.enqueue(([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]],))
enqueue_correct_op.run()
with self.assertRaises(ValueError):
q.enqueue(([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]],))
self.assertEqual(1, q.size().eval())
def testEnqueueManyWithShape(self):
with self.test_session():
q = tf.FIFOQueue(10, [tf.int32, tf.int32],
shapes=[(), (2,)])
q.enqueue_many([[1, 2, 3, 4], [[1, 1], [2, 2], [3, 3], [4, 4]]]).run()
self.assertEqual(4, q.size().eval())
def testEnqueueDictWithoutNames(self):
with self.test_session():
q = tf.FIFOQueue(10, tf.float32)
with self.assertRaisesRegexp(ValueError, "must have names"):
q.enqueue({"a": 12.0})
with self.assertRaisesRegexp(ValueError, "must have names"):
q.enqueue_many({"a": [12.0, 13.0]})
def testParallelEnqueue(self):
with self.test_session() as sess:
q = tf.FIFOQueue(10, tf.float32)
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
# Run one producer thread for each element in elems.
def enqueue(enqueue_op):
sess.run(enqueue_op)
threads = [self.checkedThread(target=enqueue, args=(e,))
for e in enqueue_ops]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
# Dequeue every element using a single thread.
results = []
for _ in xrange(len(elems)):
results.append(dequeued_t.eval())
self.assertItemsEqual(elems, results)
def testParallelDequeue(self):
with self.test_session() as sess:
q = tf.FIFOQueue(10, tf.float32)
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
# Enqueue every element using a single thread.
for enqueue_op in enqueue_ops:
enqueue_op.run()
# Run one consumer thread for each element in elems.
results = []
def dequeue():
results.append(sess.run(dequeued_t))
threads = [self.checkedThread(target=dequeue) for _ in enqueue_ops]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(elems, results)
def testDequeue(self):
with self.test_session():
q = tf.FIFOQueue(10, tf.float32)
elems = [10.0, 20.0, 30.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
for enqueue_op in enqueue_ops:
enqueue_op.run()
for i in xrange(len(elems)):
vals = dequeued_t.eval()
self.assertEqual([elems[i]], vals)
def testEnqueueAndBlockingDequeue(self):
with self.test_session() as sess:
q = tf.FIFOQueue(3, tf.float32)
elems = [10.0, 20.0, 30.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
def enqueue():
# The enqueue_ops should run after the dequeue op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
for enqueue_op in enqueue_ops:
sess.run(enqueue_op)
results = []
def dequeue():
for _ in xrange(len(elems)):
results.append(sess.run(dequeued_t))
enqueue_thread = self.checkedThread(target=enqueue)
dequeue_thread = self.checkedThread(target=dequeue)
enqueue_thread.start()
dequeue_thread.start()
enqueue_thread.join()
dequeue_thread.join()
for elem, result in zip(elems, results):
self.assertEqual([elem], result)
def testMultiEnqueueAndDequeue(self):
with self.test_session() as sess:
q = tf.FIFOQueue(10, (tf.int32, tf.float32))
elems = [(5, 10.0), (10, 20.0), (15, 30.0)]
enqueue_ops = [q.enqueue((x, y)) for x, y in elems]
dequeued_t = q.dequeue()
for enqueue_op in enqueue_ops:
enqueue_op.run()
for i in xrange(len(elems)):
x_val, y_val = sess.run(dequeued_t)
x, y = elems[i]
self.assertEqual([x], x_val)
self.assertEqual([y], y_val)
def testQueueSizeEmpty(self):
with self.test_session():
q = tf.FIFOQueue(10, tf.float32)
self.assertEqual([0], q.size().eval())
def testQueueSizeAfterEnqueueAndDequeue(self):
with self.test_session():
q = tf.FIFOQueue(10, tf.float32)
enqueue_op = q.enqueue((10.0,))
dequeued_t = q.dequeue()
size = q.size()
self.assertEqual([], size.get_shape())
enqueue_op.run()
self.assertEqual(1, size.eval())
dequeued_t.op.run()
self.assertEqual(0, size.eval())
def testEnqueueMany(self):
with self.test_session():
q = tf.FIFOQueue(10, tf.float32)
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue()
enqueue_op.run()
enqueue_op.run()
for i in range(8):
vals = dequeued_t.eval()
self.assertEqual([elems[i % 4]], vals)
def testEmptyEnqueueMany(self):
with self.test_session():
q = tf.FIFOQueue(10, tf.float32)
empty_t = tf.constant([], dtype=tf.float32,
shape=[0, 2, 3])
enqueue_op = q.enqueue_many((empty_t,))
size_t = q.size()
self.assertEqual([0], size_t.eval())
enqueue_op.run()
self.assertEqual([0], size_t.eval())
def testEmptyDequeueMany(self):
with self.test_session():
q = tf.FIFOQueue(10, tf.float32, shapes=())
enqueue_op = q.enqueue((10.0,))
dequeued_t = q.dequeue_many(0)
self.assertEqual([], dequeued_t.eval().tolist())
enqueue_op.run()
self.assertEqual([], dequeued_t.eval().tolist())
def testEmptyDequeueManyWithNoShape(self):
with self.test_session():
q = tf.FIFOQueue(10, tf.float32)
# Expect the operation to fail due to the shape not being constrained.
with self.assertRaisesOpError("specified shapes"):
q.dequeue_many(0).eval()
def testMultiEnqueueMany(self):
with self.test_session() as sess:
q = tf.FIFOQueue(10, (tf.float32, tf.int32))
float_elems = [10.0, 20.0, 30.0, 40.0]
int_elems = [[1, 2], [3, 4], [5, 6], [7, 8]]
enqueue_op = q.enqueue_many((float_elems, int_elems))
dequeued_t = q.dequeue()
enqueue_op.run()
enqueue_op.run()
for i in range(8):
float_val, int_val = sess.run(dequeued_t)
self.assertEqual(float_elems[i % 4], float_val)
self.assertAllEqual(int_elems[i % 4], int_val)
def testDequeueMany(self):
with self.test_session():
q = tf.FIFOQueue(10, tf.float32, ())
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(4)
enqueue_op.run()
self.assertAllEqual(elems[0:4], dequeued_t.eval())
self.assertAllEqual(elems[4:8], dequeued_t.eval())
def testMultiDequeueMany(self):
with self.test_session() as sess:
q = tf.FIFOQueue(10, (tf.float32, tf.int32),
shapes=((), (2,)))
float_elems = [
10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
int_elems = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10],
[11, 12], [13, 14], [15, 16], [17, 18], [19, 20]]
enqueue_op = q.enqueue_many((float_elems, int_elems))
dequeued_t = q.dequeue_many(4)
dequeued_single_t = q.dequeue()
enqueue_op.run()
float_val, int_val = sess.run(dequeued_t)
self.assertAllEqual(float_elems[0:4], float_val)
self.assertAllEqual(int_elems[0:4], int_val)
self.assertEqual(float_val.shape, dequeued_t[0].get_shape())
self.assertEqual(int_val.shape, dequeued_t[1].get_shape())
float_val, int_val = sess.run(dequeued_t)
self.assertAllEqual(float_elems[4:8], float_val)
self.assertAllEqual(int_elems[4:8], int_val)
float_val, int_val = sess.run(dequeued_single_t)
self.assertAllEqual(float_elems[8], float_val)
self.assertAllEqual(int_elems[8], int_val)
self.assertEqual(float_val.shape, dequeued_single_t[0].get_shape())
self.assertEqual(int_val.shape, dequeued_single_t[1].get_shape())
def testHighDimension(self):
with self.test_session():
q = tf.FIFOQueue(10, tf.int32, (4, 4, 4, 4))
elems = np.array([[[[[x] * 4] * 4] * 4] * 4 for x in range(10)], np.int32)
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(10)
enqueue_op.run()
self.assertAllEqual(dequeued_t.eval(), elems)
def testEnqueueWrongShape(self):
q = tf.FIFOQueue(10, (tf.int32, tf.int32), ((), (2)))
with self.assertRaises(ValueError):
q.enqueue(([1, 2], [2, 2]))
with self.assertRaises(ValueError):
q.enqueue_many((7, [[1, 2], [3, 4], [5, 6]]))
def testBatchSizeMismatch(self):
q = tf.FIFOQueue(10, (tf.int32, tf.int32, tf.int32), ((), (), ()))
with self.assertRaises(ValueError):
q.enqueue_many(([1, 2, 3], [1, 2], [1, 2, 3]))
with self.assertRaises(ValueError):
q.enqueue_many(([1, 2, 3], [1, 2], tf.placeholder(tf.int32)))
with self.assertRaises(ValueError):
q.enqueue_many((tf.placeholder(tf.int32), [1, 2], [1, 2, 3]))
def testEnqueueManyEmptyTypeConversion(self):
q = tf.FIFOQueue(10, (tf.int32, tf.float32), ((), ()))
enq = q.enqueue_many(([], []))
self.assertEqual(tf.int32, enq.inputs[1].dtype)
self.assertEqual(tf.float32, enq.inputs[2].dtype)
def testEnqueueWrongType(self):
q = tf.FIFOQueue(10, (tf.int32, tf.float32), ((), ()))
with self.assertRaises(ValueError):
q.enqueue((tf.placeholder(tf.int32), tf.placeholder(tf.int32)))
with self.assertRaises(ValueError):
q.enqueue_many((tf.placeholder(tf.int32), tf.placeholder(tf.int32)))
def testEnqueueWrongShapeAtRuntime(self):
with self.test_session() as sess:
q = tf.FIFOQueue(10, (tf.int32, tf.int32), ((2, 2), (3, 3)))
elems_ok = np.array([1] * 4).reshape((2, 2)).astype(np.int32)
elems_bad = tf.placeholder(tf.int32)
enqueue_op = q.enqueue((elems_ok, elems_bad))
with self.assertRaisesRegexp(
tf.errors.InvalidArgumentError, r"Expected \[3,3\], got \[3,4\]"):
sess.run([enqueue_op],
feed_dict={elems_bad: np.array([1] * 12).reshape((3, 4))})
def testEnqueueDequeueManyWrongShape(self):
with self.test_session() as sess:
q = tf.FIFOQueue(10, (tf.int32, tf.int32), ((2, 2), (3, 3)))
elems_ok = np.array([1] * 8).reshape((2, 2, 2)).astype(np.int32)
elems_bad = tf.placeholder(tf.int32)
enqueue_op = q.enqueue_many((elems_ok, elems_bad))
dequeued_t = q.dequeue_many(2)
with self.assertRaisesRegexp(
tf.errors.InvalidArgumentError,
"Shape mismatch in tuple component 1. "
r"Expected \[2,3,3\], got \[2,3,4\]"):
sess.run([enqueue_op],
feed_dict={elems_bad: np.array([1] * 24).reshape((2, 3, 4))})
dequeued_t.eval()
def testParallelEnqueueMany(self):
with self.test_session() as sess:
q = tf.FIFOQueue(1000, tf.float32, shapes=())
elems = [10.0 * x for x in range(100)]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(1000)
# Enqueue 100 items in parallel on 10 threads.
def enqueue():
sess.run(enqueue_op)
threads = [self.checkedThread(target=enqueue) for _ in range(10)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(dequeued_t.eval(), elems * 10)
def testParallelDequeueMany(self):
with self.test_session() as sess:
q = tf.FIFOQueue(1000, tf.float32, shapes=())
elems = [10.0 * x for x in range(1000)]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(100)
enqueue_op.run()
# Dequeue 100 items in parallel on 10 threads.
dequeued_elems = []
def dequeue():
dequeued_elems.extend(sess.run(dequeued_t))
threads = [self.checkedThread(target=dequeue) for _ in range(10)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(elems, dequeued_elems)
def testParallelEnqueueAndDequeue(self):
with self.test_session() as sess:
q = tf.FIFOQueue(50, tf.float32, shapes=())
initial_elements = [10.0] * 49
q.enqueue_many((initial_elements,)).run()
enqueue_op = q.enqueue((20.0,))
dequeued_t = q.dequeue()
def enqueue():
for _ in xrange(100):
sess.run(enqueue_op)
def dequeue():
for _ in xrange(100):
self.assertTrue(sess.run(dequeued_t) in (10.0, 20.0))
enqueue_threads = [self.checkedThread(target=enqueue) for _ in range(10)]
dequeue_threads = [self.checkedThread(target=dequeue) for _ in range(10)]
for enqueue_thread in enqueue_threads:
enqueue_thread.start()
for dequeue_thread in dequeue_threads:
dequeue_thread.start()
for enqueue_thread in enqueue_threads:
enqueue_thread.join()
for dequeue_thread in dequeue_threads:
dequeue_thread.join()
# Dequeue the initial count of elements to clean up.
cleanup_elems = q.dequeue_many(49).eval()
for elem in cleanup_elems:
self.assertTrue(elem in (10.0, 20.0))
def testMixtureOfEnqueueAndEnqueueMany(self):
with self.test_session() as sess:
q = tf.FIFOQueue(10, tf.int32, shapes=())
enqueue_placeholder = tf.placeholder(tf.int32, shape=())
enqueue_op = q.enqueue((enqueue_placeholder,))
enqueuemany_placeholder = tf.placeholder(
tf.int32, shape=(None,))
enqueuemany_op = q.enqueue_many((enqueuemany_placeholder,))
dequeued_t = q.dequeue()
close_op = q.close()
def dequeue():
for i in xrange(250):
self.assertEqual(i, sess.run(dequeued_t))
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
elements_enqueued = 0
while elements_enqueued < 250:
# With equal probability, run Enqueue or enqueue_many.
if random.random() > 0.5:
enqueue_op.run({enqueue_placeholder: elements_enqueued})
elements_enqueued += 1
else:
count = random.randint(0, min(20, 250 - elements_enqueued))
range_to_enqueue = np.arange(elements_enqueued,
elements_enqueued + count,
dtype=np.int32)
enqueuemany_op.run({enqueuemany_placeholder: range_to_enqueue})
elements_enqueued += count
close_op.run()
dequeue_thread.join()
self.assertEqual(0, q.size().eval())
def testMixtureOfDequeueAndDequeueMany(self):
with self.test_session() as sess:
q = tf.FIFOQueue(10, tf.int32, shapes=())
enqueue_op = q.enqueue_many((np.arange(250, dtype=np.int32),))
dequeued_t = q.dequeue()
count_placeholder = tf.placeholder(tf.int32, shape=())
dequeuemany_t = q.dequeue_many(count_placeholder)
def enqueue():
sess.run(enqueue_op)
enqueue_thread = self.checkedThread(target=enqueue)
enqueue_thread.start()
elements_dequeued = 0
while elements_dequeued < 250:
# With equal probability, run Dequeue or dequeue_many.
if random.random() > 0.5:
self.assertEqual(elements_dequeued, dequeued_t.eval())
elements_dequeued += 1
else:
count = random.randint(0, min(20, 250 - elements_dequeued))
expected_range = np.arange(elements_dequeued,
elements_dequeued + count,
dtype=np.int32)
self.assertAllEqual(
expected_range, dequeuemany_t.eval({count_placeholder: count}))
elements_dequeued += count
q.close().run()
enqueue_thread.join()
self.assertEqual(0, q.size().eval())
def testBlockingDequeueMany(self):
with self.test_session() as sess:
q = tf.FIFOQueue(10, tf.float32, ())
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(4)
dequeued_elems = []
def enqueue():
# The enqueue_op should run after the dequeue op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
sess.run(enqueue_op)
def dequeue():
dequeued_elems.extend(sess.run(dequeued_t).tolist())
enqueue_thread = self.checkedThread(target=enqueue)
dequeue_thread = self.checkedThread(target=dequeue)
enqueue_thread.start()
dequeue_thread.start()
enqueue_thread.join()
dequeue_thread.join()
self.assertAllEqual(elems, dequeued_elems)
def testDequeueManyWithTensorParameter(self):
with self.test_session():
# Define a first queue that contains integer counts.
dequeue_counts = [random.randint(1, 10) for _ in range(100)]
count_q = tf.FIFOQueue(100, tf.int32, ())
enqueue_counts_op = count_q.enqueue_many((dequeue_counts,))
total_count = sum(dequeue_counts)
# Define a second queue that contains total_count elements.
elems = [random.randint(0, 100) for _ in range(total_count)]
q = tf.FIFOQueue(total_count, tf.int32, ())
enqueue_elems_op = q.enqueue_many((elems,))
# Define a subgraph that first dequeues a count, then DequeuesMany
# that number of elements.
dequeued_t = q.dequeue_many(count_q.dequeue())
enqueue_counts_op.run()
enqueue_elems_op.run()
dequeued_elems = []
for _ in dequeue_counts:
dequeued_elems.extend(dequeued_t.eval())
self.assertEqual(elems, dequeued_elems)
def testDequeueFromClosedQueue(self):
with self.test_session():
q = tf.FIFOQueue(10, tf.float32)
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue()
enqueue_op.run()
close_op.run()
for elem in elems:
self.assertEqual([elem], dequeued_t.eval())
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(tf.errors.OutOfRangeError,
"is closed and has insufficient"):
dequeued_t.eval()
def testBlockingDequeueFromClosedQueue(self):
with self.test_session() as sess:
q = tf.FIFOQueue(10, tf.float32)
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue()
enqueue_op.run()
def dequeue():
for elem in elems:
self.assertEqual([elem], sess.run(dequeued_t))
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(tf.errors.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testBlockingDequeueFromClosedEmptyQueue(self):
with self.test_session() as sess:
q = tf.FIFOQueue(10, tf.float32)
close_op = q.close()
dequeued_t = q.dequeue()
def dequeue():
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(tf.errors.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testBlockingDequeueManyFromClosedQueue(self):
with self.test_session() as sess:
q = tf.FIFOQueue(10, tf.float32, ())
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_many(4)
enqueue_op.run()
def dequeue():
self.assertAllEqual(elems, sess.run(dequeued_t))
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(tf.errors.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testEnqueueManyLargerThanCapacityWithConcurrentDequeueMany(self):
with self.test_session() as sess:
q = tf.FIFOQueue(4, tf.float32, ())
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_many(3)
cleanup_dequeue_t = q.dequeue()
def enqueue():
sess.run(enqueue_op)
def dequeue():
self.assertAllEqual(elems[0:3], sess.run(dequeued_t))
with self.assertRaises(tf.errors.OutOfRangeError):
sess.run(dequeued_t)
self.assertEqual(elems[3], sess.run(cleanup_dequeue_t))
def close():
sess.run(close_op)
enqueue_thread = self.checkedThread(target=enqueue)
enqueue_thread.start()
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_thread = self.checkedThread(target=close)
close_thread.start()
enqueue_thread.join()
dequeue_thread.join()
close_thread.join()
def testClosedBlockingDequeueManyRestoresPartialBatch(self):
with self.test_session() as sess:
q = tf.FIFOQueue(4, (tf.float32, tf.float32), ((), ()))
elems_a = [1.0, 2.0, 3.0]
elems_b = [10.0, 20.0, 30.0]
enqueue_op = q.enqueue_many((elems_a, elems_b))
dequeued_a_t, dequeued_b_t = q.dequeue_many(4)
cleanup_dequeue_a_t, cleanup_dequeue_b_t = q.dequeue()
close_op = q.close()
enqueue_op.run()
def dequeue():
with self.assertRaises(tf.errors.OutOfRangeError):
sess.run([dequeued_a_t, dequeued_b_t])
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
# Test that the elements in the partially-dequeued batch are
# restored in the correct order.
for elem_a, elem_b in zip(elems_a, elems_b):
val_a, val_b = sess.run([cleanup_dequeue_a_t, cleanup_dequeue_b_t])
self.assertEqual(elem_a, val_a)
self.assertEqual(elem_b, val_b)
self.assertEqual(0, q.size().eval())
def testBlockingDequeueManyFromClosedEmptyQueue(self):
with self.test_session() as sess:
q = tf.FIFOQueue(10, tf.float32, ())
close_op = q.close()
dequeued_t = q.dequeue_many(4)
def dequeue():
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(tf.errors.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testEnqueueToClosedQueue(self):
with self.test_session():
q = tf.FIFOQueue(10, tf.float32)
enqueue_op = q.enqueue((10.0,))
close_op = q.close()
enqueue_op.run()
close_op.run()
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(tf.errors.AbortedError, "is closed"):
enqueue_op.run()
def testEnqueueManyToClosedQueue(self):
with self.test_session():
q = tf.FIFOQueue(10, tf.float32)
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
enqueue_op.run()
close_op.run()
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(tf.errors.AbortedError, "is closed"):
enqueue_op.run()
def testBlockingEnqueueToFullQueue(self):
with self.test_session() as sess:
q = tf.FIFOQueue(4, tf.float32)
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue((50.0,))
dequeued_t = q.dequeue()
enqueue_op.run()
def blocking_enqueue():
sess.run(blocking_enqueue_op)
thread = self.checkedThread(target=blocking_enqueue)
thread.start()
# The dequeue ops should run after the blocking_enqueue_op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
for elem in elems:
self.assertEqual([elem], dequeued_t.eval())
self.assertEqual([50.0], dequeued_t.eval())
thread.join()
def testBlockingEnqueueManyToFullQueue(self):
with self.test_session() as sess:
q = tf.FIFOQueue(4, tf.float32)
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue_many(([50.0, 60.0],))
dequeued_t = q.dequeue()
enqueue_op.run()
def blocking_enqueue():
sess.run(blocking_enqueue_op)
thread = self.checkedThread(target=blocking_enqueue)
thread.start()
# The dequeue ops should run after the blocking_enqueue_op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
for elem in elems:
self.assertEqual([elem], dequeued_t.eval())
time.sleep(0.01)
self.assertEqual([50.0], dequeued_t.eval())
self.assertEqual([60.0], dequeued_t.eval())
def testBlockingEnqueueBeforeClose(self):
with self.test_session() as sess:
q = tf.FIFOQueue(4, tf.float32)
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue((50.0,))
close_op = q.close()
dequeued_t = q.dequeue()
enqueue_op.run()
def blocking_enqueue():
# Expect the operation to succeed once the dequeue op runs.
sess.run(blocking_enqueue_op)
enqueue_thread = self.checkedThread(target=blocking_enqueue)
enqueue_thread.start()
# The close_op should run after the blocking_enqueue_op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
def close():
sess.run(close_op)
close_thread = self.checkedThread(target=close)
close_thread.start()
# The dequeue will unblock both threads.
self.assertEqual(10.0, dequeued_t.eval())
enqueue_thread.join()
close_thread.join()
for elem in [20.0, 30.0, 40.0, 50.0]:
self.assertEqual(elem, dequeued_t.eval())
self.assertEqual(0, q.size().eval())
def testBlockingEnqueueManyBeforeClose(self):
with self.test_session() as sess:
q = tf.FIFOQueue(4, tf.float32)
elems = [10.0, 20.0, 30.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue_many(([50.0, 60.0],))
close_op = q.close()
dequeued_t = q.dequeue()
enqueue_op.run()
def blocking_enqueue():
sess.run(blocking_enqueue_op)
enqueue_thread = self.checkedThread(target=blocking_enqueue)
enqueue_thread.start()
# The close_op should run after the blocking_enqueue_op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
def close():
sess.run(close_op)
close_thread = self.checkedThread(target=close)
close_thread.start()
# The dequeue will unblock both threads.
self.assertEqual(10.0, dequeued_t.eval())
enqueue_thread.join()
close_thread.join()
for elem in [20.0, 30.0, 50.0, 60.0]:
self.assertEqual(elem, dequeued_t.eval())
def testDoesNotLoseValue(self):
with self.test_session():
q = tf.FIFOQueue(1, tf.float32)
enqueue_op = q.enqueue((10.0,))
size_t = q.size()
enqueue_op.run()
for _ in range(500):
self.assertEqual(size_t.eval(), [1])
def testSharedQueueSameSession(self):
with self.test_session():
q1 = tf.FIFOQueue(
1, tf.float32, shared_name="shared_queue")
q1.enqueue((10.0,)).run()
q2 = tf.FIFOQueue(
1, tf.float32, shared_name="shared_queue")
q1_size_t = q1.size()
q2_size_t = q2.size()
self.assertEqual(q1_size_t.eval(), [1])
self.assertEqual(q2_size_t.eval(), [1])
self.assertEqual(q2.dequeue().eval(), [10.0])
self.assertEqual(q1_size_t.eval(), [0])
self.assertEqual(q2_size_t.eval(), [0])
q2.enqueue((20.0,)).run()
self.assertEqual(q1_size_t.eval(), [1])
self.assertEqual(q2_size_t.eval(), [1])
self.assertEqual(q1.dequeue().eval(), [20.0])
self.assertEqual(q1_size_t.eval(), [0])
self.assertEqual(q2_size_t.eval(), [0])
def testIncompatibleSharedQueueErrors(self):
with self.test_session():
q_a_1 = tf.FIFOQueue(10, tf.float32, shared_name="q_a")
q_a_2 = tf.FIFOQueue(15, tf.float32, shared_name="q_a")
q_a_1.queue_ref.eval()
with self.assertRaisesOpError("capacity"):
q_a_2.queue_ref.eval()
q_b_1 = tf.FIFOQueue(10, tf.float32, shared_name="q_b")
q_b_2 = tf.FIFOQueue(10, tf.int32, shared_name="q_b")
q_b_1.queue_ref.eval()
with self.assertRaisesOpError("component types"):
q_b_2.queue_ref.eval()
q_c_1 = tf.FIFOQueue(10, tf.float32, shared_name="q_c")
q_c_2 = tf.FIFOQueue(
10, tf.float32, shapes=[(1, 1, 2, 3)], shared_name="q_c")
q_c_1.queue_ref.eval()
with self.assertRaisesOpError("component shapes"):
q_c_2.queue_ref.eval()
q_d_1 = tf.FIFOQueue(
10, tf.float32, shapes=[(1, 1, 2, 3)], shared_name="q_d")
q_d_2 = tf.FIFOQueue(10, tf.float32, shared_name="q_d")
q_d_1.queue_ref.eval()
with self.assertRaisesOpError("component shapes"):
q_d_2.queue_ref.eval()
q_e_1 = tf.FIFOQueue(
10, tf.float32, shapes=[(1, 1, 2, 3)], shared_name="q_e")
q_e_2 = tf.FIFOQueue(
10, tf.float32, shapes=[(1, 1, 2, 4)], shared_name="q_e")
q_e_1.queue_ref.eval()
with self.assertRaisesOpError("component shapes"):
q_e_2.queue_ref.eval()
q_f_1 = tf.FIFOQueue(10, tf.float32, shared_name="q_f")
q_f_2 = tf.FIFOQueue(
10, (tf.float32, tf.int32), shared_name="q_f")
q_f_1.queue_ref.eval()
with self.assertRaisesOpError("component types"):
q_f_2.queue_ref.eval()
def testSelectQueue(self):
with self.test_session():
num_queues = 10
qlist = list()
for _ in xrange(num_queues):
qlist.append(tf.FIFOQueue(10, tf.float32))
# Enqueue/Dequeue into a dynamically selected queue
for _ in xrange(20):
index = np.random.randint(num_queues)
q = tf.FIFOQueue.from_list(index, qlist)
q.enqueue((10.,)).run()
self.assertEqual(q.dequeue().eval(), 10.0)
def testSelectQueueOutOfRange(self):
with self.test_session():
q1 = tf.FIFOQueue(10, tf.float32)
q2 = tf.FIFOQueue(15, tf.float32)
enq_q = tf.FIFOQueue.from_list(3, [q1, q2])
with self.assertRaisesOpError("Index must be in the range"):
enq_q.dequeue().eval()
def _blockingDequeue(self, sess, dequeue_op):
with self.assertRaisesOpError("Dequeue operation was cancelled"):
sess.run(dequeue_op)
def _blockingDequeueMany(self, sess, dequeue_many_op):
with self.assertRaisesOpError("Dequeue operation was cancelled"):
sess.run(dequeue_many_op)
def _blockingEnqueue(self, sess, enqueue_op):
with self.assertRaisesOpError("Enqueue operation was cancelled"):
sess.run(enqueue_op)
def _blockingEnqueueMany(self, sess, enqueue_many_op):
with self.assertRaisesOpError("Enqueue operation was cancelled"):
sess.run(enqueue_many_op)
def testResetOfBlockingOperation(self):
with self.test_session() as sess:
q_empty = tf.FIFOQueue(5, tf.float32, ())
dequeue_op = q_empty.dequeue()
dequeue_many_op = q_empty.dequeue_many(1)
q_full = tf.FIFOQueue(5, tf.float32)
sess.run(q_full.enqueue_many(([1.0, 2.0, 3.0, 4.0, 5.0],)))
enqueue_op = q_full.enqueue((6.0,))
enqueue_many_op = q_full.enqueue_many(([6.0],))
threads = [
self.checkedThread(self._blockingDequeue, args=(sess, dequeue_op)),
self.checkedThread(self._blockingDequeueMany, args=(sess,
dequeue_many_op)),
self.checkedThread(self._blockingEnqueue, args=(sess, enqueue_op)),
self.checkedThread(self._blockingEnqueueMany, args=(sess,
enqueue_many_op))]
for t in threads:
t.start()
time.sleep(0.1)
sess.close() # Will cancel the blocked operations.
for t in threads:
t.join()
def testBigEnqueueMany(self):
with self.test_session() as sess:
q = tf.FIFOQueue(5, tf.int32, ((),))
elem = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
enq = q.enqueue_many((elem,))
deq = q.dequeue()
size_op = q.size()
enq_done = []
def blocking_enqueue():
enq_done.append(False)
# This will fill the queue and then block until enough dequeues happen.
sess.run(enq)
enq_done.append(True)
thread = self.checkedThread(target=blocking_enqueue)
thread.start()
# The enqueue should start and then block.
results = []
results.append(deq.eval()) # Will only complete after the enqueue starts.
self.assertEqual(len(enq_done), 1)
self.assertEqual(sess.run(size_op), 5)
for _ in range(3):
results.append(deq.eval())
time.sleep(0.1)
self.assertEqual(len(enq_done), 1)
self.assertEqual(sess.run(size_op), 5)
# This dequeue will unblock the thread.
results.append(deq.eval())
time.sleep(0.1)
self.assertEqual(len(enq_done), 2)
thread.join()
for i in range(5):
self.assertEqual(size_op.eval(), 5 - i)
results.append(deq.eval())
self.assertEqual(size_op.eval(), 5 - i - 1)
self.assertAllEqual(elem, results)
def testBigDequeueMany(self):
with self.test_session() as sess:
q = tf.FIFOQueue(2, tf.int32, ((),))
elem = np.arange(4, dtype=np.int32)
enq_list = [q.enqueue((e,)) for e in elem]
deq = q.dequeue_many(4)
results = []
def blocking_dequeue():
# Will only complete after 4 enqueues complete.
results.extend(sess.run(deq))
thread = self.checkedThread(target=blocking_dequeue)
thread.start()
# The dequeue should start and then block.
for enq in enq_list:
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
self.assertEqual(len(results), 0)
sess.run(enq)
# Enough enqueued to unblock the dequeue
thread.join()
self.assertAllEqual(elem, results)
def testDtypes(self):
with self.test_session() as sess:
dtypes = [tf.float32, tf.float64, tf.int32, tf.uint8, tf.int16, tf.int8,
tf.int64, tf.bool, tf.complex64]
shape = (32, 4, 128)
q = tf.FIFOQueue(32, dtypes, [shape[1:]] * len(dtypes))
input_tuple = []
for dtype in dtypes:
np_dtype = dtype.as_numpy_dtype
np_array = np.random.randint(-10, 10, shape)
if dtype == tf.bool:
np_array = np_array > 0
elif dtype == tf.complex64:
np_array = np.sqrt(np_array.astype(np_dtype))
else:
np_array = np_array.astype(np_dtype)
input_tuple.append(np_array)
q.enqueue_many(input_tuple).run()
output_tuple_t = q.dequeue_many(32)
output_tuple = sess.run(output_tuple_t)
for (input_elem, output_elem) in zip(input_tuple, output_tuple):
self.assertAllEqual(input_elem, output_elem)
def testDeviceColocation(self):
with tf.device("/job:ps"):
q = tf.FIFOQueue(32, [tf.int32], name="q")
with tf.device("/job:worker/task:7"):
dequeued_t = q.dequeue()
self.assertDeviceEqual("/job:ps", dequeued_t.device)
self.assertEqual([b"loc:@q"], dequeued_t.op.colocation_groups())
class FIFOQueueDictTest(tf.test.TestCase):
def testConstructor(self):
with tf.Graph().as_default():
q = tf.FIFOQueue(5, (tf.int32, tf.float32), names=("i", "j"),
shared_name="foo", name="Q")
self.assertTrue(isinstance(q.queue_ref, tf.Tensor))
self.assertEquals(tf.string_ref, q.queue_ref.dtype)
self.assertProtoEquals("""
name:'Q' op:'FIFOQueue'
attr { key: 'component_types' value { list {
type: DT_INT32 type : DT_FLOAT
} } }
attr { key: 'shapes' value { list {} } }
attr { key: 'capacity' value { i: 5 } }
attr { key: 'container' value { s: '' } }
attr { key: 'shared_name' value { s: 'foo' } }
""", q.queue_ref.op.node_def)
self.assertEqual(["i", "j"], q.names)
def testConstructorWithShapes(self):
with tf.Graph().as_default():
q = tf.FIFOQueue(5, (tf.int32, tf.float32), names=("i", "f"),
shapes=(tf.TensorShape([1, 1, 2, 3]),
tf.TensorShape([5, 8])), name="Q")
self.assertTrue(isinstance(q.queue_ref, tf.Tensor))
self.assertEquals(tf.string_ref, q.queue_ref.dtype)
self.assertProtoEquals("""
name:'Q' op:'FIFOQueue'
attr { key: 'component_types' value { list {
type: DT_INT32 type : DT_FLOAT
} } }
attr { key: 'shapes' value { list {
shape { dim { size: 1 }
dim { size: 1 }
dim { size: 2 }
dim { size: 3 } }
shape { dim { size: 5 }
dim { size: 8 } }
} } }
attr { key: 'capacity' value { i: 5 } }
attr { key: 'container' value { s: '' } }
attr { key: 'shared_name' value { s: '' } }
""", q.queue_ref.op.node_def)
self.assertEqual(["i", "f"], q.names)
def testEnqueueDequeueOneComponent(self):
with self.test_session() as sess:
q = tf.FIFOQueue(10, tf.float32, shapes=((),), names="f")
# Verify that enqueue() checks that when using names we must enqueue a
# dictionary.
with self.assertRaisesRegexp(ValueError, "enqueue a dictionary"):
enqueue_op = q.enqueue(10.0)
with self.assertRaisesRegexp(ValueError, "enqueue a dictionary"):
enqueue_op = q.enqueue((10.0,))
# The dictionary keys must match the queue component names.
with self.assertRaisesRegexp(ValueError, "match names of Queue"):
enqueue_op = q.enqueue({})
with self.assertRaisesRegexp(ValueError, "match names of Queue"):
enqueue_op = q.enqueue({"x": 12})
with self.assertRaisesRegexp(ValueError, "match names of Queue"):
enqueue_op = q.enqueue({"f": 10.0, "s": "aa"})
enqueue_op = q.enqueue({"f": 10.0})
enqueue_op2 = q.enqueue({"f": 20.0})
enqueue_op3 = q.enqueue({"f": 30.0})
# Verify that enqueue_many() checks that when using names we must enqueue
# a dictionary.
with self.assertRaisesRegexp(ValueError, "enqueue a dictionary"):
enqueue_op4 = q.enqueue_many([40.0, 50.0])
# The dictionary keys must match the queue component names.
with self.assertRaisesRegexp(ValueError, "match names of Queue"):
enqueue_op4 = q.enqueue_many({})
with self.assertRaisesRegexp(ValueError, "match names of Queue"):
enqueue_op4 = q.enqueue_many({"x": 12})
with self.assertRaisesRegexp(ValueError, "match names of Queue"):
enqueue_op4 = q.enqueue_many({"f": [40.0, 50.0], "s": ["aa", "bb"]})
enqueue_op4 = q.enqueue_many({"f": [40.0, 50.0]})
dequeue = q.dequeue()
dequeue_2 = q.dequeue_many(2)
sess.run(enqueue_op)
sess.run(enqueue_op2)
sess.run(enqueue_op3)
sess.run(enqueue_op4)
f = sess.run(dequeue["f"])
self.assertEqual(10.0, f)
f = sess.run(dequeue_2["f"])
self.assertEqual([20.0, 30.0], list(f))
f = sess.run(dequeue_2["f"])
self.assertEqual([40.0, 50.0], list(f))
def testEnqueueDequeueMultipleComponent(self):
with self.test_session() as sess:
q = tf.FIFOQueue(10, (tf.float32, tf.int32, tf.string),
shapes=((), (), ()), names=("f", "i", "s"))
# Verify that enqueue() checks that when using names we must enqueue a
# dictionary.
with self.assertRaisesRegexp(ValueError, "enqueue a dictionary"):
enqueue_op = q.enqueue((10.0, 123, "aa"))
# The dictionary keys must match the queue component names.
with self.assertRaisesRegexp(ValueError, "match names of Queue"):
enqueue_op = q.enqueue({})
with self.assertRaisesRegexp(ValueError, "match names of Queue"):
enqueue_op = q.enqueue({"x": 10.0})
with self.assertRaisesRegexp(ValueError, "match names of Queue"):
enqueue_op = q.enqueue({"i": 12, "s": "aa"})
with self.assertRaisesRegexp(ValueError, "match names of Queue"):
enqueue_op = q.enqueue({"i": 123, "s": "aa", "f": 10.0, "x": 10.0})
enqueue_op = q.enqueue({"i": 123, "s": "aa", "f": 10.0})
enqueue_op2 = q.enqueue({"i": 124, "s": "bb", "f": 20.0})
enqueue_op3 = q.enqueue({"i": 125, "s": "cc", "f": 30.0})
# Verify that enqueue_many() checks that when using names we must enqueue
# a dictionary.
with self.assertRaisesRegexp(ValueError, "enqueue a dictionary"):
enqueue_op4 = q.enqueue_many(([40.0, 50.0], [126, 127], ["dd", "ee"]))
# The dictionary keys must match the queue component names.
with self.assertRaisesRegexp(ValueError, "match names of Queue"):
enqueue_op4 = q.enqueue_many({})
with self.assertRaisesRegexp(ValueError, "match names of Queue"):
enqueue_op4 = q.enqueue_many({"x": [10.0, 20.0]})
with self.assertRaisesRegexp(ValueError, "match names of Queue"):
enqueue_op4 = q.enqueue_many({"i": [12, 12], "s": ["aa", "bb"]})
with self.assertRaisesRegexp(ValueError, "match names of Queue"):
enqueue_op4 = q.enqueue_many({"f": [40.0, 50.0], "i": [126, 127],
"s": ["dd", "ee"], "x": [1, 2]})
enqueue_op4 = q.enqueue_many({"f": [40.0, 50.0], "i": [126, 127],
"s": ["dd", "ee"]})
dequeue = q.dequeue()
dequeue_2 = q.dequeue_many(2)
sess.run(enqueue_op)
sess.run(enqueue_op2)
sess.run(enqueue_op3)
sess.run(enqueue_op4)
i, f, s = sess.run([dequeue["i"], dequeue["f"], dequeue["s"]])
self.assertEqual(123, i)
self.assertEqual(10.0, f)
self.assertEqual(tf.compat.as_bytes("aa"), s)
i, f, s = sess.run([dequeue_2["i"], dequeue_2["f"], dequeue_2["s"]])
self.assertEqual([124, 125], list(i))
self.assertTrue([20.0, 30.0], list(f))
self.assertTrue([tf.compat.as_bytes("bb"), tf.compat.as_bytes("cc")],
list(s))
i, f, s = sess.run([dequeue_2["i"], dequeue_2["f"], dequeue_2["s"]])
self.assertEqual([126, 127], list(i))
self.assertTrue([40.0, 50.0], list(f))
self.assertTrue([tf.compat.as_bytes("dd"), tf.compat.as_bytes("ee")],
list(s))
class FIFOQueueWithTimeoutTest(tf.test.TestCase):
def testDequeueWithTimeout(self):
with self.test_session(
config=tf.ConfigProto(operation_timeout_in_ms=20)) as sess:
q = tf.FIFOQueue(10, tf.float32)
dequeued_t = q.dequeue()
# Intentionally do not run any enqueue_ops so that dequeue will block
# until operation_timeout_in_ms.
with self.assertRaisesRegexp(tf.errors.DeadlineExceededError,
"Timed out waiting for notification"):
sess.run(dequeued_t)
if __name__ == "__main__":
tf.test.main()
|
test_queues.py
|
import threading
import time
from dagster import ModeDefinition, default_executors
from dagster.core.test_utils import instance_for_test
from dagster_celery import celery_executor
from .utils import execute_on_thread, start_celery_worker
celery_mode_defs = [ModeDefinition(executor_defs=default_executors + [celery_executor])]
def test_multiqueue(rabbitmq): # pylint: disable=unused-argument
with instance_for_test() as instance:
done = threading.Event()
with start_celery_worker():
execute_thread = threading.Thread(
target=execute_on_thread, args=("multiqueue_pipeline", done, instance.get_ref())
)
execute_thread.daemon = True
execute_thread.start()
time.sleep(1)
assert not done.is_set()
with start_celery_worker(queue="fooqueue"):
execute_thread.join()
assert done.is_set()
|
multiprocessing_namespaces_mutable.py
|
# Copyright (c) 2009 Doug Hellmann All rights reserved.
#
"""
"""
# end_pymotw_header
import multiprocessing
def producer(ns, event):
# DOES NOT UPDATE GLOBAL VALUE!
ns.my_list.append("This is the value")
event.set()
def consumer(ns, event):
print("Before event:", ns.my_list)
event.wait()
print("After event :", ns.my_list)
if __name__ == "__main__":
mgr = multiprocessing.Manager()
namespace = mgr.Namespace()
namespace.my_list = []
event = multiprocessing.Event()
p = multiprocessing.Process(target=producer, args=(namespace, event))
c = multiprocessing.Process(target=consumer, args=(namespace, event))
c.start()
p.start()
c.join()
p.join()
|
unsplashed_wallpaper.py
|
#!/usr/bin/env python3
import os
import sys
import time
import threading
import signal
import requests
import configparser
import gi
gi.require_version('Gtk', '3.0')
gi.require_version('AppIndicator3', '0.1')
from gi.repository import Gtk, AppIndicator3, GObject
from requests.exceptions import ConnectionError, Timeout
curr_path = os.path.dirname(os.path.realpath(__file__))
APPINDICATOR_ID = 'myappindicator'
CONFIG_FILE = os.path.expanduser('~/.config/unsplashed_wallpaper/config.conf')
# Options Dialog global vars
USE_LOCATION = False
SEARCH_TERMS = 'San Francisco'
REFRESH_INT_LIST = [1800, 3600, 7200] # 30, 60, or 120 minutes
REFRESH_INTERVAL = 1 # 3600 seconds by default
class UnsplashedWallpaper(object):
def __init__(self):
self.cwd = os.getcwd()
self.file_name = "unsplash_wallpaper.png"
self.change_now = False
def get_location(self, ip=None):
if ip:
r = requests.get("http://ipinfo.io/%s/json" % (ip))
else:
r = requests.get("http://ipinfo.io/json")
j = r.json()
if 'bogon' in j and j['bogon']:
return None
else:
return ",".join([j['city'], j['region']])
def get_wallpaper(self, location, width, height, write_to_file=False):
try:
request_url = "https://source.unsplash.com/%sx%s/?%s" % (width, height, location)
# return json, if we're not writing to local disk
if write_to_file == False:
return {'url': request_url}
r = requests.get(request_url)
with open(self.file_name, 'wb') as f:
f.write(r.content)
return None
except:
print("Failed to get wallpaper")
def set_wallpaper(self):
try:
self.set_cmd = "gsettings set org.gnome.desktop.background picture-uri file:///" + self.cwd + "/" + self.file_name
os.system(self.set_cmd)
except:
print("Failed to set wallpaper")
def remove_wallpaper(self):
self.file_path = self.cwd+"/"+self.file_name
if os.path.exists(self.file_path):
os.remove(self.file_path)
def check_network(self):
try:
requests.get('http://www.google.com',timeout=1)
return True
except (ConnectionError, Timeout) as err:
pass
return False
def should_change_now(self):
return self.change_now
def reset_change_now(self):
self.change_now = False
def set_change_now(self):
self.change_now = True
class MenuHandler:
def menu_change_wallpaper(self, *args):
uw.set_change_now()
def menu_about(self, *args):
about_dialog = builder.get_object("ABOUT_DIALOG")
about_dialog.run()
about_dialog.hide()
def menu_options(self, *args):
options_dialog = builder.get_object("OPTIONS_DIALOG")
options_dialog.run()
options_dialog.hide()
def options_cancel_btn_clicked(self, *args):
options_dialog = builder.get_object("OPTIONS_DIALOG")
options_dialog.hide()
def options_save_btn_clicked(self, *args):
global SEARCH_TERMS
global USE_LOCATION
global REFRESH_INTERVAL
SEARCH_TERMS = builder.get_object("OPTIONS_SEARCH_TERMS").get_text()
USE_LOCATION = builder.get_object("OPTIONS_LOCATION_SWITCH").get_active()
REFRESH_INTERVAL = builder.get_object("OPTIONS_WALLPAPER_INTERVAL_COMBOBOX").get_active()
save_config()
options_dialog = builder.get_object("OPTIONS_DIALOG")
options_dialog.hide()
def menu_quit(self, *args):
Gtk.main_quit()
def load_config():
global SEARCH_TERMS
global USE_LOCATION
global REFRESH_INTERVAL
config = configparser.RawConfigParser(
{'use_location': 'False',
'search_terms': 'San Francisco',
'refresh_interval': '1',
})
config.add_section('general')
config.read(CONFIG_FILE)
USE_LOCATION = config.getboolean('general', 'use_location')
SEARCH_TERMS = config.get('general', 'search_terms')
REFRESH_INTERVAL = config.getint('general', 'refresh_interval')
def save_config():
global SEARCH_TERMS
global USE_LOCATION
global REFRESH_INTERVAL
config = configparser.RawConfigParser()
config.add_section('general')
config.set('general', 'use_location', USE_LOCATION)
config.set('general', 'search_terms', SEARCH_TERMS)
config.set('general', 'refresh_interval', REFRESH_INTERVAL)
dirname = os.path.dirname(CONFIG_FILE)
if not os.path.exists(dirname):
os.makedirs(dirname)
with open(CONFIG_FILE, 'w+') as configfile:
config.write(configfile)
def unsplashed_thread():
import tkinter as tk
my_screen = tk.Tk()
screen_width = my_screen.winfo_screenwidth()
screen_height = my_screen.winfo_screenheight()
while True:
if uw.check_network():
print("Getting new wallpaper...")
uw.remove_wallpaper()
if USE_LOCATION:
location = uw.get_location()
else:
location = SEARCH_TERMS
uw.get_wallpaper(location, screen_width, screen_height, write_to_file=True)
uw.set_wallpaper()
for _ in range(REFRESH_INT_LIST[REFRESH_INTERVAL]):
if uw.should_change_now():
uw.reset_change_now()
break
time.sleep(1)
if __name__ == "__main__":
signal.signal(signal.SIGINT, signal.SIG_DFL)
load_config()
builder = Gtk.Builder()
builder.add_from_file("unsplashed_menu.glade")
builder.connect_signals(MenuHandler())
builder.get_object("OPTIONS_SEARCH_TERMS").set_text(SEARCH_TERMS)
builder.get_object("OPTIONS_LOCATION_SWITCH").set_active(USE_LOCATION)
list_store = Gtk.ListStore(GObject.TYPE_STRING)
list_store.append(("30 minutes",))
list_store.append(("1 hour",))
list_store.append(("2 hours",))
cell = Gtk.CellRendererText()
refresh_combobox = builder.get_object("OPTIONS_WALLPAPER_INTERVAL_COMBOBOX")
refresh_combobox.set_model(list_store)
refresh_combobox.pack_start(cell, True)
refresh_combobox.set_active(REFRESH_INTERVAL)
refresh_combobox.add_attribute(cell, "text", 0)
indicator = AppIndicator3.Indicator.new(APPINDICATOR_ID, Gtk.STOCK_INFO, AppIndicator3.IndicatorCategory.SYSTEM_SERVICES)
indicator.set_status(AppIndicator3.IndicatorStatus.ACTIVE)
indicator.set_menu(builder.get_object("THE_MENU"))
uw = UnsplashedWallpaper()
thread = threading.Thread(target=unsplashed_thread)
thread.daemon = True
thread.start()
Gtk.main()
|
main_window.py
|
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2012 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import time
import threading
import os
import traceback
import json
import shutil
import weakref
import webbrowser
import csv
from decimal import Decimal
import base64
from functools import partial
import queue
import asyncio
from PyQt5.QtGui import *
from PyQt5.QtCore import *
import PyQt5.QtCore as QtCore
from PyQt5.QtWidgets import *
import electrum
from electrum import (keystore, simple_config, ecc, constants, util, bitcoin, commands,
coinchooser, paymentrequest)
from electrum.bitcoin import COIN, is_address, TYPE_ADDRESS
from electrum.plugin import run_hook
from electrum.i18n import _
from electrum.util import (format_time, format_satoshis, format_fee_satoshis,
format_satoshis_plain, NotEnoughFunds, PrintError,
UserCancelled, NoDynamicFeeEstimates, profiler,
export_meta, import_meta, bh2u, bfh, InvalidPassword,
base_units, base_units_list, base_unit_name_to_decimal_point,
decimal_point_to_base_unit_name, quantize_feerate,
UnknownBaseUnit, DECIMAL_POINT_DEFAULT, UserFacingException)
from electrum.transaction import Transaction, TxOutput
from electrum.address_synchronizer import AddTransactionException
from electrum.wallet import (Multisig_Wallet, CannotBumpFee, Abstract_Wallet,
sweep_preparations)
from electrum.version import ELECTRUM_VERSION
from electrum.network import Network
from electrum.exchange_rate import FxThread
from electrum.simple_config import SimpleConfig
from .exception_window import Exception_Hook
from .amountedit import AmountEdit, BTCAmountEdit, MyLineEdit, FeerateEdit
from .qrcodewidget import QRCodeWidget, QRDialog
from .qrtextedit import ShowQRTextEdit, ScanQRTextEdit
from .transaction_dialog import show_transaction
from .fee_slider import FeeSlider
from .util import *
from .installwizard import WIF_HELP_TEXT
class StatusBarButton(QPushButton):
def __init__(self, icon, tooltip, func):
QPushButton.__init__(self, icon, '')
self.setToolTip(tooltip)
self.setFlat(True)
self.setMaximumWidth(25)
self.clicked.connect(self.onPress)
self.func = func
self.setIconSize(QSize(25,25))
def onPress(self, checked=False):
'''Drops the unwanted PyQt5 "checked" argument'''
self.func()
def keyPressEvent(self, e):
if e.key() == Qt.Key_Return:
self.func()
from electrum.paymentrequest import PR_PAID
class ElectrumWindow(QMainWindow, MessageBoxMixin, PrintError):
payment_request_ok_signal = pyqtSignal()
payment_request_error_signal = pyqtSignal()
new_fx_quotes_signal = pyqtSignal()
new_fx_history_signal = pyqtSignal()
network_signal = pyqtSignal(str, object)
alias_received_signal = pyqtSignal()
computing_privkeys_signal = pyqtSignal()
show_privkeys_signal = pyqtSignal()
def __init__(self, gui_object, wallet: Abstract_Wallet):
QMainWindow.__init__(self)
self.gui_object = gui_object
self.config = config = gui_object.config # type: SimpleConfig
self.setup_exception_hook()
self.network = gui_object.daemon.network # type: Network
self.wallet = wallet
self.fx = gui_object.daemon.fx # type: FxThread
self.invoices = wallet.invoices
self.contacts = wallet.contacts
self.tray = gui_object.tray
self.app = gui_object.app
self.cleaned_up = False
self.is_max = False
self.payment_request = None
self.checking_accounts = False
self.qr_window = None
self.not_enough_funds = False
self.pluginsdialog = None
self.require_fee_update = False
self.tl_windows = []
self.tx_external_keypairs = {}
self.tx_notification_queue = queue.Queue()
self.tx_notification_last_time = 0
self.create_status_bar()
self.need_update = threading.Event()
self.decimal_point = config.get('decimal_point', DECIMAL_POINT_DEFAULT)
try:
decimal_point_to_base_unit_name(self.decimal_point)
except UnknownBaseUnit:
self.decimal_point = DECIMAL_POINT_DEFAULT
self.num_zeros = int(config.get('num_zeros', 0))
self.completions = QStringListModel()
self.tabs = tabs = QTabWidget(self)
self.send_tab = self.create_send_tab()
self.receive_tab = self.create_receive_tab()
self.addresses_tab = self.create_addresses_tab()
self.utxo_tab = self.create_utxo_tab()
self.console_tab = self.create_console_tab()
self.contacts_tab = self.create_contacts_tab()
tabs.addTab(self.create_history_tab(), QIcon(":icons/tab_history.png"), _('History'))
tabs.addTab(self.send_tab, QIcon(":icons/tab_send.png"), _('Send'))
tabs.addTab(self.receive_tab, QIcon(":icons/tab_receive.png"), _('Receive'))
def add_optional_tab(tabs, tab, icon, description, name):
tab.tab_icon = icon
tab.tab_description = description
tab.tab_pos = len(tabs)
tab.tab_name = name
if self.config.get('show_{}_tab'.format(name), False):
tabs.addTab(tab, icon, description.replace("&", ""))
add_optional_tab(tabs, self.addresses_tab, QIcon(":icons/tab_addresses.png"), _("&Addresses"), "addresses")
add_optional_tab(tabs, self.utxo_tab, QIcon(":icons/tab_coins.png"), _("Co&ins"), "utxo")
add_optional_tab(tabs, self.contacts_tab, QIcon(":icons/tab_contacts.png"), _("Con&tacts"), "contacts")
add_optional_tab(tabs, self.console_tab, QIcon(":icons/tab_console.png"), _("Con&sole"), "console")
tabs.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.setCentralWidget(tabs)
if self.config.get("is_maximized"):
self.showMaximized()
self.setWindowIcon(QIcon(":icons/electrum.png"))
self.init_menubar()
wrtabs = weakref.proxy(tabs)
QShortcut(QKeySequence("Ctrl+W"), self, self.close)
QShortcut(QKeySequence("Ctrl+Q"), self, self.close)
QShortcut(QKeySequence("Ctrl+R"), self, self.update_wallet)
QShortcut(QKeySequence("F5"), self, self.update_wallet)
QShortcut(QKeySequence("Ctrl+PgUp"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() - 1)%wrtabs.count()))
QShortcut(QKeySequence("Ctrl+PgDown"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() + 1)%wrtabs.count()))
for i in range(wrtabs.count()):
QShortcut(QKeySequence("Alt+" + str(i + 1)), self, lambda i=i: wrtabs.setCurrentIndex(i))
self.payment_request_ok_signal.connect(self.payment_request_ok)
self.payment_request_error_signal.connect(self.payment_request_error)
self.history_list.setFocus(True)
# network callbacks
if self.network:
self.network_signal.connect(self.on_network_qt)
interests = ['wallet_updated', 'network_updated', 'blockchain_updated',
'new_transaction', 'status',
'banner', 'verified', 'fee', 'fee_histogram']
# To avoid leaking references to "self" that prevent the
# window from being GC-ed when closed, callbacks should be
# methods of this class only, and specifically not be
# partials, lambdas or methods of subobjects. Hence...
self.network.register_callback(self.on_network, interests)
# set initial message
self.console.showMessage(self.network.banner)
self.network.register_callback(self.on_quotes, ['on_quotes'])
self.network.register_callback(self.on_history, ['on_history'])
self.new_fx_quotes_signal.connect(self.on_fx_quotes)
self.new_fx_history_signal.connect(self.on_fx_history)
# update fee slider in case we missed the callback
self.fee_slider.update()
self.load_wallet(wallet)
self.connect_slots(gui_object.timer)
self.fetch_alias()
def on_history(self, b):
self.new_fx_history_signal.emit()
def setup_exception_hook(self):
Exception_Hook(self)
def on_fx_history(self):
self.history_list.refresh_headers()
self.history_list.update()
self.address_list.update()
def on_quotes(self, b):
self.new_fx_quotes_signal.emit()
def on_fx_quotes(self):
self.update_status()
# Refresh edits with the new rate
edit = self.fiat_send_e if self.fiat_send_e.is_last_edited else self.amount_e
edit.textEdited.emit(edit.text())
edit = self.fiat_receive_e if self.fiat_receive_e.is_last_edited else self.receive_amount_e
edit.textEdited.emit(edit.text())
# History tab needs updating if it used spot
if self.fx.history_used_spot:
self.history_list.update()
def toggle_tab(self, tab):
show = not self.config.get('show_{}_tab'.format(tab.tab_name), False)
self.config.set_key('show_{}_tab'.format(tab.tab_name), show)
item_text = (_("Hide") if show else _("Show")) + " " + tab.tab_description
tab.menu_action.setText(item_text)
if show:
# Find out where to place the tab
index = len(self.tabs)
for i in range(len(self.tabs)):
try:
if tab.tab_pos < self.tabs.widget(i).tab_pos:
index = i
break
except AttributeError:
pass
self.tabs.insertTab(index, tab, tab.tab_icon, tab.tab_description.replace("&", ""))
else:
i = self.tabs.indexOf(tab)
self.tabs.removeTab(i)
def push_top_level_window(self, window):
'''Used for e.g. tx dialog box to ensure new dialogs are appropriately
parented. This used to be done by explicitly providing the parent
window, but that isn't something hardware wallet prompts know.'''
self.tl_windows.append(window)
def pop_top_level_window(self, window):
self.tl_windows.remove(window)
def top_level_window(self, test_func=None):
'''Do the right thing in the presence of tx dialog windows'''
override = self.tl_windows[-1] if self.tl_windows else None
if override and test_func and not test_func(override):
override = None # only override if ok for test_func
return self.top_level_window_recurse(override, test_func)
def diagnostic_name(self):
return "%s/%s" % (PrintError.diagnostic_name(self),
self.wallet.basename() if self.wallet else "None")
def is_hidden(self):
return self.isMinimized() or self.isHidden()
def show_or_hide(self):
if self.is_hidden():
self.bring_to_top()
else:
self.hide()
def bring_to_top(self):
self.show()
self.raise_()
def on_error(self, exc_info):
e = exc_info[1]
if isinstance(e, UserCancelled):
pass
elif isinstance(e, UserFacingException):
self.show_error(str(e))
else:
try:
traceback.print_exception(*exc_info)
except OSError:
pass # see #4418
self.show_error(str(e))
def on_network(self, event, *args):
if event == 'wallet_updated':
wallet = args[0]
if wallet == self.wallet:
self.need_update.set()
elif event == 'network_updated':
self.gui_object.network_updated_signal_obj.network_updated_signal \
.emit(event, args)
self.network_signal.emit('status', None)
elif event == 'blockchain_updated':
# to update number of confirmations in history
self.need_update.set()
elif event == 'new_transaction':
wallet, tx = args
if wallet == self.wallet:
self.tx_notification_queue.put(tx)
elif event in ['status', 'banner', 'verified', 'fee', 'fee_histogram']:
# Handle in GUI thread
self.network_signal.emit(event, args)
else:
self.print_error("unexpected network message:", event, args)
def on_network_qt(self, event, args=None):
# Handle a network message in the GUI thread
if event == 'status':
self.update_status()
elif event == 'banner':
self.console.showMessage(args[0])
elif event == 'verified':
wallet, tx_hash, tx_mined_status = args
if wallet == self.wallet:
self.history_list.update_item(tx_hash, tx_mined_status)
elif event == 'fee':
if self.config.is_dynfee():
self.fee_slider.update()
self.do_update_fee()
elif event == 'fee_histogram':
if self.config.is_dynfee():
self.fee_slider.update()
self.do_update_fee()
# todo: update only unconfirmed tx
self.history_list.update()
else:
self.print_error("unexpected network_qt signal:", event, args)
def fetch_alias(self):
self.alias_info = None
alias = self.config.get('alias')
if alias:
alias = str(alias)
def f():
self.alias_info = self.contacts.resolve_openalias(alias)
self.alias_received_signal.emit()
t = threading.Thread(target=f)
t.setDaemon(True)
t.start()
def close_wallet(self):
if self.wallet:
self.print_error('close_wallet', self.wallet.storage.path)
run_hook('close_wallet', self.wallet)
@profiler
def load_wallet(self, wallet):
wallet.thread = TaskThread(self, self.on_error)
self.update_recently_visited(wallet.storage.path)
# update(==init) all tabs; expensive for large wallets..
# so delay it somewhat, hence __init__ can finish and the window can appear sooner
QTimer.singleShot(50, self.update_tabs)
self.need_update.set()
# Once GUI has been initialized check if we want to announce something since the callback has been called before the GUI was initialized
# update menus
self.seed_menu.setEnabled(self.wallet.has_seed())
self.update_lock_icon()
self.update_buttons_on_seed()
self.update_console()
self.clear_receive_tab()
self.request_list.update()
self.tabs.show()
self.init_geometry()
if self.config.get('hide_gui') and self.gui_object.tray.isVisible():
self.hide()
else:
self.show()
self.watching_only_changed()
run_hook('load_wallet', wallet, self)
def init_geometry(self):
winpos = self.wallet.storage.get("winpos-qt")
try:
screen = self.app.desktop().screenGeometry()
assert screen.contains(QRect(*winpos))
self.setGeometry(*winpos)
except:
self.print_error("using default geometry")
self.setGeometry(100, 100, 840, 400)
def watching_only_changed(self):
name = "Electrum NIX Testnet" if constants.net.TESTNET else "Electrum NIX"
title = '%s %s - %s' % (name, ELECTRUM_VERSION,
self.wallet.basename())
extra = [self.wallet.storage.get('wallet_type', '?')]
if self.wallet.is_watching_only():
self.warn_if_watching_only()
extra.append(_('watching only'))
title += ' [%s]'% ', '.join(extra)
self.setWindowTitle(title)
self.password_menu.setEnabled(self.wallet.may_have_password())
self.import_privkey_menu.setVisible(self.wallet.can_import_privkey())
self.import_address_menu.setVisible(self.wallet.can_import_address())
self.export_menu.setEnabled(self.wallet.can_export())
def warn_if_watching_only(self):
if self.wallet.is_watching_only():
msg = ' '.join([
_("This wallet is watching-only."),
_("This means you will not be able to spend NIX with it."),
_("Make sure you own the seed phrase or the private keys, before you request NIX to be sent to this wallet.")
])
self.show_warning(msg, title=_('Information'))
def open_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
filename, __ = QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder)
if not filename:
return
self.gui_object.new_window(filename)
def backup_wallet(self):
path = self.wallet.storage.path
wallet_folder = os.path.dirname(path)
filename, __ = QFileDialog.getSaveFileName(self, _('Enter a filename for the copy of your wallet'), wallet_folder)
if not filename:
return
new_path = os.path.join(wallet_folder, filename)
if new_path != path:
try:
shutil.copy2(path, new_path)
self.show_message(_("A copy of your wallet file was created in")+" '%s'" % str(new_path), title=_("Wallet backup created"))
except BaseException as reason:
self.show_critical(_("Electrum was unable to copy your wallet file to the specified location.") + "\n" + str(reason), title=_("Unable to create backup"))
def update_recently_visited(self, filename):
recent = self.config.get('recently_open', [])
try:
sorted(recent)
except:
recent = []
if filename in recent:
recent.remove(filename)
recent.insert(0, filename)
recent = [path for path in recent if os.path.exists(path)]
recent = recent[:5]
self.config.set_key('recently_open', recent)
self.recently_visited_menu.clear()
for i, k in enumerate(sorted(recent)):
b = os.path.basename(k)
def loader(k):
return lambda: self.gui_object.new_window(k)
self.recently_visited_menu.addAction(b, loader(k)).setShortcut(QKeySequence("Ctrl+%d"%(i+1)))
self.recently_visited_menu.setEnabled(len(recent))
def get_wallet_folder(self):
return os.path.dirname(os.path.abspath(self.config.get_wallet_path()))
def new_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
i = 1
while True:
filename = "wallet_%d" % i
if filename in os.listdir(wallet_folder):
i += 1
else:
break
full_path = os.path.join(wallet_folder, filename)
self.gui_object.start_new_window(full_path, None)
def init_menubar(self):
menubar = QMenuBar()
file_menu = menubar.addMenu(_("&File"))
self.recently_visited_menu = file_menu.addMenu(_("&Recently open"))
file_menu.addAction(_("&Open"), self.open_wallet).setShortcut(QKeySequence.Open)
file_menu.addAction(_("&New/Restore"), self.new_wallet).setShortcut(QKeySequence.New)
file_menu.addAction(_("&Save Copy"), self.backup_wallet).setShortcut(QKeySequence.SaveAs)
file_menu.addAction(_("Delete"), self.remove_wallet)
file_menu.addSeparator()
file_menu.addAction(_("&Quit"), self.close)
wallet_menu = menubar.addMenu(_("&Wallet"))
wallet_menu.addAction(_("&Information"), self.show_master_public_keys)
wallet_menu.addSeparator()
self.password_menu = wallet_menu.addAction(_("&Password"), self.change_password_dialog)
self.seed_menu = wallet_menu.addAction(_("&Seed"), self.show_seed_dialog)
self.private_keys_menu = wallet_menu.addMenu(_("&Private keys"))
self.private_keys_menu.addAction(_("&Sweep"), self.sweep_key_dialog)
self.import_privkey_menu = self.private_keys_menu.addAction(_("&Import"), self.do_import_privkey)
self.export_menu = self.private_keys_menu.addAction(_("&Export"), self.export_privkeys_dialog)
self.import_address_menu = wallet_menu.addAction(_("Import addresses"), self.import_addresses)
wallet_menu.addSeparator()
addresses_menu = wallet_menu.addMenu(_("&Addresses"))
addresses_menu.addAction(_("&Filter"), lambda: self.address_list.toggle_toolbar(self.config))
labels_menu = wallet_menu.addMenu(_("&Labels"))
labels_menu.addAction(_("&Import"), self.do_import_labels)
labels_menu.addAction(_("&Export"), self.do_export_labels)
history_menu = wallet_menu.addMenu(_("&History"))
history_menu.addAction(_("&Filter"), lambda: self.history_list.toggle_toolbar(self.config))
history_menu.addAction(_("&Summary"), self.history_list.show_summary)
history_menu.addAction(_("&Plot"), self.history_list.plot_history_dialog)
history_menu.addAction(_("&Export"), self.history_list.export_history_dialog)
contacts_menu = wallet_menu.addMenu(_("Contacts"))
contacts_menu.addAction(_("&New"), self.new_contact_dialog)
contacts_menu.addAction(_("Import"), lambda: self.contact_list.import_contacts())
contacts_menu.addAction(_("Export"), lambda: self.contact_list.export_contacts())
invoices_menu = wallet_menu.addMenu(_("Invoices"))
invoices_menu.addAction(_("Import"), lambda: self.invoice_list.import_invoices())
invoices_menu.addAction(_("Export"), lambda: self.invoice_list.export_invoices())
wallet_menu.addSeparator()
wallet_menu.addAction(_("Find"), self.toggle_search).setShortcut(QKeySequence("Ctrl+F"))
def add_toggle_action(view_menu, tab):
is_shown = self.config.get('show_{}_tab'.format(tab.tab_name), False)
item_name = (_("Hide") if is_shown else _("Show")) + " " + tab.tab_description
tab.menu_action = view_menu.addAction(item_name, lambda: self.toggle_tab(tab))
view_menu = menubar.addMenu(_("&View"))
add_toggle_action(view_menu, self.addresses_tab)
add_toggle_action(view_menu, self.utxo_tab)
add_toggle_action(view_menu, self.contacts_tab)
add_toggle_action(view_menu, self.console_tab)
tools_menu = menubar.addMenu(_("&Tools"))
# Settings / Preferences are all reserved keywords in macOS using this as work around
tools_menu.addAction(_("Electrum preferences") if sys.platform == 'darwin' else _("Preferences"), self.settings_dialog)
tools_menu.addAction(_("&Network"), lambda: self.gui_object.show_network_dialog(self))
tools_menu.addAction(_("&Plugins"), self.plugins_dialog)
tools_menu.addSeparator()
tools_menu.addAction(_("&Sign/verify message"), self.sign_verify_message)
tools_menu.addAction(_("&Encrypt/decrypt message"), self.encrypt_message)
tools_menu.addSeparator()
paytomany_menu = tools_menu.addAction(_("&Pay to many"), self.paytomany)
raw_transaction_menu = tools_menu.addMenu(_("&Load transaction"))
raw_transaction_menu.addAction(_("&From file"), self.do_process_from_file)
raw_transaction_menu.addAction(_("&From text"), self.do_process_from_text)
raw_transaction_menu.addAction(_("&From the blockchain"), self.do_process_from_txid)
raw_transaction_menu.addAction(_("&From QR code"), self.read_tx_from_qrcode)
self.raw_transaction_menu = raw_transaction_menu
run_hook('init_menubar_tools', self, tools_menu)
help_menu = menubar.addMenu(_("&Help"))
help_menu.addAction(_("&About"), self.show_about)
help_menu.addAction(_("&Official website"), lambda: webbrowser.open("https://electrum.org"))
help_menu.addSeparator()
help_menu.addAction(_("&Documentation"), lambda: webbrowser.open("http://docs.electrum.org/")).setShortcut(QKeySequence.HelpContents)
help_menu.addAction(_("&Report Bug"), self.show_report_bug)
help_menu.addSeparator()
help_menu.addAction(_("&Donate to server"), self.donate_to_server)
self.setMenuBar(menubar)
def donate_to_server(self):
d = self.network.get_donation_address()
if d:
host = self.network.get_parameters().host
self.pay_to_URI('nix:%s?message=donation for %s'%(d, host))
else:
self.show_error(_('No donation address for this server'))
def show_about(self):
QMessageBox.about(self, "Electrum",
(_("Version")+" %s" % ELECTRUM_VERSION + "\n\n" +
_("Electrum's focus is speed, with low resource usage and simplifying NIX.") + " " +
_("You do not need to perform regular backups, because your wallet can be "
"recovered from a secret phrase that you can memorize or write on paper.") + " " +
_("Startup times are instant because it operates in conjunction with high-performance "
"servers that handle the most complicated parts of the NIX system.") + "\n\n" +
_("Uses icons from the Icons8 icon pack (icons8.com).")))
def show_report_bug(self):
msg = ' '.join([
_("Please report any bugs as issues on github:<br/>"),
"<a href=\"https://github.com/NixPlatform/electrum-nix/issues\">https://github.com/NixPlatform/electrum-nix/issues</a><br/><br/>",
_("Before reporting a bug, upgrade to the most recent version of Electrum (latest release or git HEAD), and include the version number in your report."),
_("Try to explain not only what the bug is, but how it occurs.")
])
self.show_message(msg, title="Electrum NIX - " + _("Reporting Bugs"))
def notify_transactions(self):
if self.tx_notification_queue.qsize() == 0:
return
if not self.wallet.up_to_date:
return # no notifications while syncing
now = time.time()
rate_limit = 20 # seconds
if self.tx_notification_last_time + rate_limit > now:
return
self.tx_notification_last_time = now
self.print_error("Notifying GUI about new transactions")
txns = []
while True:
try:
txns.append(self.tx_notification_queue.get_nowait())
except queue.Empty:
break
# Combine the transactions if there are at least three
if len(txns) >= 3:
total_amount = 0
for tx in txns:
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
if is_relevant:
total_amount += v
self.notify(_("{} new transactions received: Total amount received in the new transactions {}")
.format(len(txns), self.format_amount_and_units(total_amount)))
else:
for tx in txns:
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
if is_relevant:
self.notify(_("New transaction received: {}").format(self.format_amount_and_units(v)))
def notify(self, message):
if self.tray:
try:
# this requires Qt 5.9
self.tray.showMessage("Electrum NIX", message, QIcon(":icons/electrum_dark_icon"), 20000)
except TypeError:
self.tray.showMessage("Electrum NIX", message, QSystemTrayIcon.Information, 20000)
# custom wrappers for getOpenFileName and getSaveFileName, that remember the path selected by the user
def getOpenFileName(self, title, filter = ""):
directory = self.config.get('io_dir', os.path.expanduser('~'))
fileName, __ = QFileDialog.getOpenFileName(self, title, directory, filter)
if fileName and directory != os.path.dirname(fileName):
self.config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
def getSaveFileName(self, title, filename, filter = ""):
directory = self.config.get('io_dir', os.path.expanduser('~'))
path = os.path.join( directory, filename )
fileName, __ = QFileDialog.getSaveFileName(self, title, path, filter)
if fileName and directory != os.path.dirname(fileName):
self.config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
def connect_slots(self, sender):
sender.timer_signal.connect(self.timer_actions)
def timer_actions(self):
# Note this runs in the GUI thread
if self.need_update.is_set():
self.need_update.clear()
self.update_wallet()
# resolve aliases
# FIXME this is a blocking network call that has a timeout of 5 sec
self.payto_e.resolve()
# update fee
if self.require_fee_update:
self.do_update_fee()
self.require_fee_update = False
self.notify_transactions()
def format_amount(self, x, is_diff=False, whitespaces=False):
return format_satoshis(x, self.num_zeros, self.decimal_point, is_diff=is_diff, whitespaces=whitespaces)
def format_amount_and_units(self, amount):
text = self.format_amount(amount) + ' '+ self.base_unit()
x = self.fx.format_amount_and_units(amount) if self.fx else None
if text and x:
text += ' (%s)'%x
return text
def format_fee_rate(self, fee_rate):
return format_fee_satoshis(fee_rate/1000, num_zeros=self.num_zeros) + ' sat/byte'
def get_decimal_point(self):
return self.decimal_point
def base_unit(self):
return decimal_point_to_base_unit_name(self.decimal_point)
def connect_fields(self, window, btc_e, fiat_e, fee_e):
def edit_changed(edit):
if edit.follows:
return
edit.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet())
fiat_e.is_last_edited = (edit == fiat_e)
amount = edit.get_amount()
rate = self.fx.exchange_rate() if self.fx else Decimal('NaN')
if rate.is_nan() or amount is None:
if edit is fiat_e:
btc_e.setText("")
if fee_e:
fee_e.setText("")
else:
fiat_e.setText("")
else:
if edit is fiat_e:
btc_e.follows = True
btc_e.setAmount(int(amount / Decimal(rate) * COIN))
btc_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
btc_e.follows = False
if fee_e:
window.update_fee()
else:
fiat_e.follows = True
fiat_e.setText(self.fx.ccy_amount_str(
amount * Decimal(rate) / COIN, False))
fiat_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
fiat_e.follows = False
btc_e.follows = False
fiat_e.follows = False
fiat_e.textChanged.connect(partial(edit_changed, fiat_e))
btc_e.textChanged.connect(partial(edit_changed, btc_e))
fiat_e.is_last_edited = False
def update_status(self):
if not self.wallet:
return
if self.network is None:
text = _("Offline")
icon = QIcon(":icons/status_disconnected.png")
elif self.network.is_connected():
server_height = self.network.get_server_height()
server_lag = self.network.get_local_height() - server_height
fork_str = "_fork" if len(self.network.get_blockchains())>1 else ""
# Server height can be 0 after switching to a new server
# until we get a headers subscription request response.
# Display the synchronizing message in that case.
if not self.wallet.up_to_date or server_height == 0:
text = _("Synchronizing...")
icon = QIcon(":icons/status_waiting.png")
elif server_lag > 1:
text = _("Server is lagging ({} blocks)").format(server_lag)
icon = QIcon(":icons/status_lagging%s.png"%fork_str)
else:
c, u, x = self.wallet.get_balance()
text = _("Balance" ) + ": %s "%(self.format_amount_and_units(c))
if u:
text += " [%s unconfirmed]"%(self.format_amount(u, is_diff=True).strip())
if x:
text += " [%s unmatured]"%(self.format_amount(x, is_diff=True).strip())
# append fiat balance and price
if self.fx.is_enabled():
text += self.fx.get_fiat_status_text(c + u + x,
self.base_unit(), self.get_decimal_point()) or ''
if not self.network.proxy:
icon = QIcon(":icons/status_connected%s.png"%fork_str)
else:
icon = QIcon(":icons/status_connected_proxy%s.png"%fork_str)
else:
if self.network.proxy:
text = "{} ({})".format(_("Not connected"), _("proxy enabled"))
else:
text = _("Not connected")
icon = QIcon(":icons/status_disconnected.png")
self.tray.setToolTip("%s (%s)" % (text, self.wallet.basename()))
self.balance_label.setText(text)
self.status_button.setIcon( icon )
def update_wallet(self):
self.update_status()
if self.wallet.up_to_date or not self.network or not self.network.is_connected():
self.update_tabs()
def update_tabs(self):
self.history_list.update()
self.request_list.update()
self.address_list.update()
self.utxo_list.update()
self.contact_list.update()
self.invoice_list.update()
self.update_completions()
def create_history_tab(self):
from .history_list import HistoryList
self.history_list = l = HistoryList(self)
l.searchable_list = l
toolbar = l.create_toolbar(self.config)
toolbar_shown = self.config.get('show_toolbar_history', False)
l.show_toolbar(toolbar_shown)
return self.create_list_tab(l, toolbar)
def show_address(self, addr):
from . import address_dialog
d = address_dialog.AddressDialog(self, addr)
d.exec_()
def show_transaction(self, tx, tx_desc = None):
'''tx_desc is set only for txs created in the Send tab'''
show_transaction(tx, self, tx_desc)
def create_receive_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.receive_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
self.receive_address_e = ButtonsLineEdit()
self.receive_address_e.addCopyButton(self.app)
self.receive_address_e.setReadOnly(True)
msg = _('NIX address where the payment should be received. Note that each payment request uses a different NIX address.')
self.receive_address_label = HelpLabel(_('Receiving address'), msg)
self.receive_address_e.textChanged.connect(self.update_receive_qr)
self.receive_address_e.setFocusPolicy(Qt.ClickFocus)
grid.addWidget(self.receive_address_label, 0, 0)
grid.addWidget(self.receive_address_e, 0, 1, 1, -1)
self.receive_message_e = QLineEdit()
grid.addWidget(QLabel(_('Description')), 1, 0)
grid.addWidget(self.receive_message_e, 1, 1, 1, -1)
self.receive_message_e.textChanged.connect(self.update_receive_qr)
self.receive_amount_e = BTCAmountEdit(self.get_decimal_point)
grid.addWidget(QLabel(_('Requested amount')), 2, 0)
grid.addWidget(self.receive_amount_e, 2, 1)
self.receive_amount_e.textChanged.connect(self.update_receive_qr)
self.fiat_receive_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_receive_e.setVisible(False)
grid.addWidget(self.fiat_receive_e, 2, 2, Qt.AlignLeft)
self.connect_fields(self, self.receive_amount_e, self.fiat_receive_e, None)
self.expires_combo = QComboBox()
self.expires_combo.addItems([i[0] for i in expiration_values])
self.expires_combo.setCurrentIndex(3)
self.expires_combo.setFixedWidth(self.receive_amount_e.width())
msg = ' '.join([
_('Expiration date of your request.'),
_('This information is seen by the recipient if you send them a signed payment request.'),
_('Expired requests have to be deleted manually from your list, in order to free the corresponding NIX addresses.'),
_('The NIX address never expires and will always be part of this electrum wallet.'),
])
grid.addWidget(HelpLabel(_('Request expires'), msg), 3, 0)
grid.addWidget(self.expires_combo, 3, 1)
self.expires_label = QLineEdit('')
self.expires_label.setReadOnly(1)
self.expires_label.setFocusPolicy(Qt.NoFocus)
self.expires_label.hide()
grid.addWidget(self.expires_label, 3, 1)
self.save_request_button = QPushButton(_('Save'))
self.save_request_button.clicked.connect(self.save_payment_request)
self.new_request_button = QPushButton(_('New'))
self.new_request_button.clicked.connect(self.new_payment_request)
self.receive_qr = QRCodeWidget(fixedSize=200)
self.receive_qr.mouseReleaseEvent = lambda x: self.toggle_qr_window()
self.receive_qr.enterEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.PointingHandCursor))
self.receive_qr.leaveEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.ArrowCursor))
self.receive_buttons = buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.save_request_button)
buttons.addWidget(self.new_request_button)
grid.addLayout(buttons, 4, 1, 1, 2)
self.receive_requests_label = QLabel(_('Requests'))
from .request_list import RequestList
self.request_list = RequestList(self)
# layout
vbox_g = QVBoxLayout()
vbox_g.addLayout(grid)
vbox_g.addStretch()
hbox = QHBoxLayout()
hbox.addLayout(vbox_g)
hbox.addWidget(self.receive_qr)
w = QWidget()
w.searchable_list = self.request_list
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.receive_requests_label)
vbox.addWidget(self.request_list)
vbox.setStretchFactor(self.request_list, 1000)
return w
def delete_payment_request(self, addr):
self.wallet.remove_payment_request(addr, self.config)
self.request_list.update()
self.clear_receive_tab()
def get_request_URI(self, addr):
req = self.wallet.receive_requests[addr]
message = self.wallet.labels.get(addr, '')
amount = req['amount']
URI = util.create_URI(addr, amount, message)
if req.get('time'):
URI += "&time=%d"%req.get('time')
if req.get('exp'):
URI += "&exp=%d"%req.get('exp')
if req.get('name') and req.get('sig'):
sig = bfh(req.get('sig'))
sig = bitcoin.base_encode(sig, base=58)
URI += "&name=" + req['name'] + "&sig="+sig
return str(URI)
def sign_payment_request(self, addr):
alias = self.config.get('alias')
alias_privkey = None
if alias and self.alias_info:
alias_addr, alias_name, validated = self.alias_info
if alias_addr:
if self.wallet.is_mine(alias_addr):
msg = _('This payment request will be signed.') + '\n' + _('Please enter your password')
password = None
if self.wallet.has_keystore_encryption():
password = self.password_dialog(msg)
if not password:
return
try:
self.wallet.sign_payment_request(addr, alias, alias_addr, password)
except Exception as e:
self.show_error(str(e))
return
else:
return
def save_payment_request(self):
addr = str(self.receive_address_e.text())
amount = self.receive_amount_e.get_amount()
message = self.receive_message_e.text()
if not message and not amount:
self.show_error(_('No message or amount'))
return False
i = self.expires_combo.currentIndex()
expiration = list(map(lambda x: x[1], expiration_values))[i]
req = self.wallet.make_payment_request(addr, amount, message, expiration)
try:
self.wallet.add_payment_request(req, self.config)
except Exception as e:
traceback.print_exc(file=sys.stderr)
self.show_error(_('Error adding payment request') + ':\n' + str(e))
else:
self.sign_payment_request(addr)
self.save_request_button.setEnabled(False)
finally:
self.request_list.update()
self.address_list.update()
def view_and_paste(self, title, msg, data):
dialog = WindowModalDialog(self, title)
vbox = QVBoxLayout()
label = QLabel(msg)
label.setWordWrap(True)
vbox.addWidget(label)
pr_e = ShowQRTextEdit(text=data)
vbox.addWidget(pr_e)
vbox.addLayout(Buttons(CopyCloseButton(pr_e.text, self.app, dialog)))
dialog.setLayout(vbox)
dialog.exec_()
def export_payment_request(self, addr):
r = self.wallet.receive_requests.get(addr)
pr = paymentrequest.serialize_request(r).SerializeToString()
name = r['id'] + '.bip70'
fileName = self.getSaveFileName(_("Select where to save your payment request"), name, "*.bip70")
if fileName:
with open(fileName, "wb+") as f:
f.write(util.to_bytes(pr))
self.show_message(_("Request saved successfully"))
self.saved = True
def new_payment_request(self):
addr = self.wallet.get_unused_address()
if addr is None:
if not self.wallet.is_deterministic():
msg = [
_('No more addresses in your wallet.'),
_('You are using a non-deterministic wallet, which cannot create new addresses.'),
_('If you want to create new addresses, use a deterministic wallet instead.')
]
self.show_message(' '.join(msg))
return
if not self.question(_("Warning: The next address will not be recovered automatically if you restore your wallet from seed; you may need to add it manually.\n\nThis occurs because you have too many unused addresses in your wallet. To avoid this situation, use the existing addresses first.\n\nCreate anyway?")):
return
addr = self.wallet.create_new_address(False)
self.set_receive_address(addr)
self.expires_label.hide()
self.expires_combo.show()
self.new_request_button.setEnabled(False)
self.receive_message_e.setFocus(1)
def set_receive_address(self, addr):
self.receive_address_e.setText(addr)
self.receive_message_e.setText('')
self.receive_amount_e.setAmount(None)
def clear_receive_tab(self):
addr = self.wallet.get_receiving_address() or ''
self.receive_address_e.setText(addr)
self.receive_message_e.setText('')
self.receive_amount_e.setAmount(None)
self.expires_label.hide()
self.expires_combo.show()
def toggle_qr_window(self):
from . import qrwindow
if not self.qr_window:
self.qr_window = qrwindow.QR_Window(self)
self.qr_window.setVisible(True)
self.qr_window_geometry = self.qr_window.geometry()
else:
if not self.qr_window.isVisible():
self.qr_window.setVisible(True)
self.qr_window.setGeometry(self.qr_window_geometry)
else:
self.qr_window_geometry = self.qr_window.geometry()
self.qr_window.setVisible(False)
self.update_receive_qr()
def show_send_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.send_tab))
def show_receive_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.receive_tab))
def receive_at(self, addr):
if not bitcoin.is_address(addr):
return
self.show_receive_tab()
self.receive_address_e.setText(addr)
self.new_request_button.setEnabled(True)
def update_receive_qr(self):
addr = str(self.receive_address_e.text())
amount = self.receive_amount_e.get_amount()
message = self.receive_message_e.text()
self.save_request_button.setEnabled((amount is not None) or (message != ""))
uri = util.create_URI(addr, amount, message)
self.receive_qr.setData(uri)
if self.qr_window and self.qr_window.isVisible():
self.qr_window.set_content(addr, amount, message, uri)
def set_feerounding_text(self, num_satoshis_added):
self.feerounding_text = (_('Additional {} satoshis are going to be added.')
.format(num_satoshis_added))
def create_send_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.send_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
from .paytoedit import PayToEdit
self.amount_e = BTCAmountEdit(self.get_decimal_point)
self.payto_e = PayToEdit(self)
msg = _('Recipient of the funds.') + '\n\n'\
+ _('You may enter a NIX address, a label from your list of contacts (a list of completions will be proposed), or an alias (email-like address that forwards to a NIX address)')
payto_label = HelpLabel(_('Pay to'), msg)
grid.addWidget(payto_label, 1, 0)
grid.addWidget(self.payto_e, 1, 1, 1, -1)
completer = QCompleter()
completer.setCaseSensitivity(False)
self.payto_e.set_completer(completer)
completer.setModel(self.completions)
msg = _('Description of the transaction (not mandatory).') + '\n\n'\
+ _('The description is not sent to the recipient of the funds. It is stored in your wallet file, and displayed in the \'History\' tab.')
description_label = HelpLabel(_('Description'), msg)
grid.addWidget(description_label, 2, 0)
self.message_e = MyLineEdit()
grid.addWidget(self.message_e, 2, 1, 1, -1)
self.from_label = QLabel(_('From'))
grid.addWidget(self.from_label, 3, 0)
self.from_list = MyTreeWidget(self, self.from_list_menu, ['',''])
self.from_list.setHeaderHidden(True)
self.from_list.setMaximumHeight(80)
grid.addWidget(self.from_list, 3, 1, 1, -1)
self.set_pay_from([])
msg = _('Amount to be sent.') + '\n\n' \
+ _('The amount will be displayed in red if you do not have enough funds in your wallet.') + ' ' \
+ _('Note that if you have frozen some of your addresses, the available funds will be lower than your total balance.') + '\n\n' \
+ _('Keyboard shortcut: type "!" to send all your coins.')
amount_label = HelpLabel(_('Amount'), msg)
grid.addWidget(amount_label, 4, 0)
grid.addWidget(self.amount_e, 4, 1)
self.fiat_send_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_send_e.setVisible(False)
grid.addWidget(self.fiat_send_e, 4, 2)
self.amount_e.frozen.connect(
lambda: self.fiat_send_e.setFrozen(self.amount_e.isReadOnly()))
self.max_button = EnterButton(_("Max"), self.spend_max)
self.max_button.setFixedWidth(140)
grid.addWidget(self.max_button, 4, 3)
hbox = QHBoxLayout()
hbox.addStretch(1)
grid.addLayout(hbox, 4, 4)
msg = _('NIX transactions are in general not free. A transaction fee is paid by the sender of the funds.') + '\n\n'\
+ _('The amount of fee can be decided freely by the sender. However, transactions with low fees take more time to be processed.') + '\n\n'\
+ _('A suggested fee is automatically added to this field. You may override it. The suggested fee increases with the size of the transaction.')
self.fee_e_label = HelpLabel(_('Fee'), msg)
def fee_cb(dyn, pos, fee_rate):
if dyn:
if self.config.use_mempool_fees():
self.config.set_key('depth_level', pos, False)
else:
self.config.set_key('fee_level', pos, False)
else:
self.config.set_key('fee_per_kb', fee_rate, False)
if fee_rate:
fee_rate = Decimal(fee_rate)
self.feerate_e.setAmount(quantize_feerate(fee_rate / 1000))
else:
self.feerate_e.setAmount(None)
self.fee_e.setModified(False)
self.fee_slider.activate()
self.spend_max() if self.is_max else self.update_fee()
self.fee_slider = FeeSlider(self, self.config, fee_cb)
self.fee_slider.setFixedWidth(140)
def on_fee_or_feerate(edit_changed, editing_finished):
edit_other = self.feerate_e if edit_changed == self.fee_e else self.fee_e
if editing_finished:
if edit_changed.get_amount() is None:
# This is so that when the user blanks the fee and moves on,
# we go back to auto-calculate mode and put a fee back.
edit_changed.setModified(False)
else:
# edit_changed was edited just now, so make sure we will
# freeze the correct fee setting (this)
edit_other.setModified(False)
self.fee_slider.deactivate()
self.update_fee()
class TxSizeLabel(QLabel):
def setAmount(self, byte_size):
self.setText(('x %s bytes =' % byte_size) if byte_size else '')
self.size_e = TxSizeLabel()
self.size_e.setAlignment(Qt.AlignCenter)
self.size_e.setAmount(0)
self.size_e.setFixedWidth(140)
self.size_e.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet())
self.feerate_e = FeerateEdit(lambda: 0)
self.feerate_e.setAmount(self.config.fee_per_byte())
self.feerate_e.textEdited.connect(partial(on_fee_or_feerate, self.feerate_e, False))
self.feerate_e.editingFinished.connect(partial(on_fee_or_feerate, self.feerate_e, True))
self.fee_e = BTCAmountEdit(self.get_decimal_point)
self.fee_e.textEdited.connect(partial(on_fee_or_feerate, self.fee_e, False))
self.fee_e.editingFinished.connect(partial(on_fee_or_feerate, self.fee_e, True))
def feerounding_onclick():
text = (self.feerounding_text + '\n\n' +
_('To somewhat protect your privacy, Electrum tries to create change with similar precision to other outputs.') + ' ' +
_('At most 100 satoshis might be lost due to this rounding.') + ' ' +
_("You can disable this setting in '{}'.").format(_('Preferences')) + '\n' +
_('Also, dust is not kept as change, but added to the fee.') + '\n' +
_('Also, when batching RBF transactions, BIP 125 imposes a lower bound on the fee.'))
QMessageBox.information(self, 'Fee rounding', text)
self.feerounding_icon = QPushButton(QIcon(':icons/info.png'), '')
self.feerounding_icon.setFixedWidth(20)
self.feerounding_icon.setFlat(True)
self.feerounding_icon.clicked.connect(feerounding_onclick)
self.feerounding_icon.setVisible(False)
self.connect_fields(self, self.amount_e, self.fiat_send_e, self.fee_e)
vbox_feelabel = QVBoxLayout()
vbox_feelabel.addWidget(self.fee_e_label)
vbox_feelabel.addStretch(1)
grid.addLayout(vbox_feelabel, 5, 0)
self.fee_adv_controls = QWidget()
hbox = QHBoxLayout(self.fee_adv_controls)
hbox.setContentsMargins(0, 0, 0, 0)
hbox.addWidget(self.feerate_e)
hbox.addWidget(self.size_e)
hbox.addWidget(self.fee_e)
hbox.addWidget(self.feerounding_icon, Qt.AlignLeft)
hbox.addStretch(1)
vbox_feecontrol = QVBoxLayout()
vbox_feecontrol.addWidget(self.fee_adv_controls)
vbox_feecontrol.addWidget(self.fee_slider)
grid.addLayout(vbox_feecontrol, 5, 1, 1, -1)
if not self.config.get('show_fee', False):
self.fee_adv_controls.setVisible(False)
#LPoS integrations
self.lease_to_e = PayToEdit(self)
msg = _('Lease to.') + '\n\n'\
+ _('You may enter a NIX address, a label from your list of contacts (a list of completions will be proposed), or an alias (email-like address that forwards to a NIX address)')
lease_to_label = HelpLabel(_('Lease to'), msg)
grid.addWidget(lease_to_label, 6, 0)
grid.addWidget(self.lease_to_e, 6, 1, 1, -1)
self.cold_lease_to_e = PayToEdit(self)
msg = _('Fee reward to.') + '\n\n'\
+ _('You may enter a NIX address, a label from your list of contacts (a list of completions will be proposed), or an alias (email-like address that forwards to a NIX address)')
fee_label = HelpLabel(_('Fee reward to'), msg)
grid.addWidget(fee_label, 7, 0)
grid.addWidget(self.cold_lease_to_e, 7, 1, 1, -1)
msg = _('Fee percent of the lease contract (not mandatory).') + '\n\n'\
+ _('The fee percent of a successful stake goes to the fee reward address.')
description_label = HelpLabel(_('Fee percent'), msg)
grid.addWidget(description_label, 8, 0)
self.message_e = MyLineEdit()
self.message_e.setFixedWidth(140)
grid.addWidget(self.message_e, 8, 1)
completer = QCompleter()
completer.setCaseSensitivity(False)
self.payto_e.set_completer(completer)
completer.setModel(self.completions)
self.preview_button = EnterButton(_("Preview"), self.do_preview)
self.preview_button.setToolTip(_('Display the details of your transaction before signing it.'))
self.send_button = EnterButton(_("Send"), self.do_send)
self.clear_button = EnterButton(_("Clear"), self.do_clear)
buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.clear_button)
buttons.addWidget(self.preview_button)
buttons.addWidget(self.send_button)
grid.addLayout(buttons, 9, 1, 1, 3)
self.amount_e.shortcut.connect(self.spend_max)
self.payto_e.textChanged.connect(self.update_fee)
self.amount_e.textEdited.connect(self.update_fee)
def reset_max(text):
self.is_max = False
enable = not bool(text) and not self.amount_e.isReadOnly()
self.max_button.setEnabled(enable)
self.amount_e.textEdited.connect(reset_max)
self.fiat_send_e.textEdited.connect(reset_max)
def entry_changed():
text = ""
amt_color = ColorScheme.DEFAULT
fee_color = ColorScheme.DEFAULT
feerate_color = ColorScheme.DEFAULT
if self.not_enough_funds:
amt_color, fee_color = ColorScheme.RED, ColorScheme.RED
feerate_color = ColorScheme.RED
text = _( "Not enough funds" )
c, u, x = self.wallet.get_frozen_balance()
if c+u+x:
text += ' (' + self.format_amount(c+u+x).strip() + ' ' + self.base_unit() + ' ' +_("are frozen") + ')'
# blue color denotes auto-filled values
elif self.fee_e.isModified():
feerate_color = ColorScheme.BLUE
elif self.feerate_e.isModified():
fee_color = ColorScheme.BLUE
elif self.amount_e.isModified():
fee_color = ColorScheme.BLUE
feerate_color = ColorScheme.BLUE
else:
amt_color = ColorScheme.BLUE
fee_color = ColorScheme.BLUE
feerate_color = ColorScheme.BLUE
self.statusBar().showMessage(text)
self.amount_e.setStyleSheet(amt_color.as_stylesheet())
self.fee_e.setStyleSheet(fee_color.as_stylesheet())
self.feerate_e.setStyleSheet(feerate_color.as_stylesheet())
self.amount_e.textChanged.connect(entry_changed)
self.fee_e.textChanged.connect(entry_changed)
self.feerate_e.textChanged.connect(entry_changed)
self.invoices_label = QLabel(_('Invoices'))
from .invoice_list import InvoiceList
self.invoice_list = InvoiceList(self)
vbox0 = QVBoxLayout()
vbox0.addLayout(grid)
hbox = QHBoxLayout()
hbox.addLayout(vbox0)
w = QWidget()
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.invoices_label)
vbox.addWidget(self.invoice_list)
vbox.setStretchFactor(self.invoice_list, 1000)
w.searchable_list = self.invoice_list
run_hook('create_send_tab', grid)
return w
def spend_max(self):
if run_hook('abort_send', self):
return
self.is_max = True
self.do_update_fee()
def update_fee(self):
self.require_fee_update = True
def get_payto_or_dummy(self):
r = self.payto_e.get_recipient()
if r:
return r
return (TYPE_ADDRESS, self.wallet.dummy_address())
def do_update_fee(self):
'''Recalculate the fee. If the fee was manually input, retain it, but
still build the TX to see if there are enough funds.
'''
freeze_fee = self.is_send_fee_frozen()
freeze_feerate = self.is_send_feerate_frozen()
amount = '!' if self.is_max else self.amount_e.get_amount()
if amount is None:
if not freeze_fee:
self.fee_e.setAmount(None)
self.not_enough_funds = False
self.statusBar().showMessage('')
else:
fee_estimator = self.get_send_fee_estimator()
outputs = self.payto_e.get_outputs(self.is_max)
if not outputs:
_type, addr = self.get_payto_or_dummy()
outputs = [TxOutput(_type, addr, amount)]
is_sweep = bool(self.tx_external_keypairs)
make_tx = lambda fee_est: \
self.wallet.make_unsigned_transaction(
self.get_coins(), outputs, self.config,
fixed_fee=fee_est, is_sweep=is_sweep)
try:
tx = make_tx(fee_estimator)
self.not_enough_funds = False
except (NotEnoughFunds, NoDynamicFeeEstimates) as e:
if not freeze_fee:
self.fee_e.setAmount(None)
if not freeze_feerate:
self.feerate_e.setAmount(None)
self.feerounding_icon.setVisible(False)
if isinstance(e, NotEnoughFunds):
self.not_enough_funds = True
elif isinstance(e, NoDynamicFeeEstimates):
try:
tx = make_tx(0)
size = tx.estimated_size()
self.size_e.setAmount(size)
except BaseException:
pass
return
except BaseException:
traceback.print_exc(file=sys.stderr)
return
size = tx.estimated_size()
self.size_e.setAmount(size)
fee = tx.get_fee()
fee = None if self.not_enough_funds else fee
# Displayed fee/fee_rate values are set according to user input.
# Due to rounding or dropping dust in CoinChooser,
# actual fees often differ somewhat.
if freeze_feerate or self.fee_slider.is_active():
displayed_feerate = self.feerate_e.get_amount()
if displayed_feerate is not None:
displayed_feerate = quantize_feerate(displayed_feerate)
else:
# fallback to actual fee
displayed_feerate = quantize_feerate(fee / size) if fee is not None else None
self.feerate_e.setAmount(displayed_feerate)
displayed_fee = round(displayed_feerate * size) if displayed_feerate is not None else None
self.fee_e.setAmount(displayed_fee)
else:
if freeze_fee:
displayed_fee = self.fee_e.get_amount()
else:
# fallback to actual fee if nothing is frozen
displayed_fee = fee
self.fee_e.setAmount(displayed_fee)
displayed_fee = displayed_fee if displayed_fee else 0
displayed_feerate = quantize_feerate(displayed_fee / size) if displayed_fee is not None else None
self.feerate_e.setAmount(displayed_feerate)
# show/hide fee rounding icon
feerounding = (fee - displayed_fee) if fee else 0
self.set_feerounding_text(int(feerounding))
self.feerounding_icon.setToolTip(self.feerounding_text)
self.feerounding_icon.setVisible(abs(feerounding) >= 1)
if self.is_max:
amount = tx.output_value()
__, x_fee_amount = run_hook('get_tx_extra_fee', self.wallet, tx) or (None, 0)
amount_after_all_fees = amount - x_fee_amount
self.amount_e.setAmount(amount_after_all_fees)
def from_list_delete(self, item):
i = self.from_list.indexOfTopLevelItem(item)
self.pay_from.pop(i)
self.redraw_from_list()
self.update_fee()
def from_list_menu(self, position):
item = self.from_list.itemAt(position)
menu = QMenu()
menu.addAction(_("Remove"), lambda: self.from_list_delete(item))
menu.exec_(self.from_list.viewport().mapToGlobal(position))
def set_pay_from(self, coins):
self.pay_from = list(coins)
self.redraw_from_list()
def redraw_from_list(self):
self.from_list.clear()
self.from_label.setHidden(len(self.pay_from) == 0)
self.from_list.setHidden(len(self.pay_from) == 0)
def format(x):
h = x.get('prevout_hash')
return h[0:10] + '...' + h[-10:] + ":%d"%x.get('prevout_n') + u'\t' + "%s"%x.get('address')
for item in self.pay_from:
self.from_list.addTopLevelItem(QTreeWidgetItem( [format(item), self.format_amount(item['value']) ]))
def get_contact_payto(self, key):
_type, label = self.contacts.get(key)
return label + ' <' + key + '>' if _type == 'address' else key
def update_completions(self):
l = [self.get_contact_payto(key) for key in self.contacts.keys()]
self.completions.setStringList(l)
def protected(func):
'''Password request wrapper. The password is passed to the function
as the 'password' named argument. "None" indicates either an
unencrypted wallet, or the user cancelled the password request.
An empty input is passed as the empty string.'''
def request_password(self, *args, **kwargs):
parent = self.top_level_window()
password = None
while self.wallet.has_keystore_encryption():
password = self.password_dialog(parent=parent)
if password is None:
# User cancelled password input
return
try:
self.wallet.check_password(password)
break
except Exception as e:
self.show_error(str(e), parent=parent)
continue
kwargs['password'] = password
return func(self, *args, **kwargs)
return request_password
def is_send_fee_frozen(self):
return self.fee_e.isVisible() and self.fee_e.isModified() \
and (self.fee_e.text() or self.fee_e.hasFocus())
def is_send_feerate_frozen(self):
return self.feerate_e.isVisible() and self.feerate_e.isModified() \
and (self.feerate_e.text() or self.feerate_e.hasFocus())
def get_send_fee_estimator(self):
if self.is_send_fee_frozen():
fee_estimator = self.fee_e.get_amount()
elif self.is_send_feerate_frozen():
amount = self.feerate_e.get_amount() # sat/byte feerate
amount = 0 if amount is None else amount * 1000 # sat/kilobyte feerate
fee_estimator = partial(
simple_config.SimpleConfig.estimate_fee_for_feerate, amount)
else:
fee_estimator = None
return fee_estimator
def read_send_tab(self):
if self.payment_request and self.payment_request.has_expired():
self.show_error(_('Payment request has expired'))
return
label = self.message_e.text()
if self.payment_request:
outputs = self.payment_request.get_outputs()
else:
errors = self.payto_e.get_errors()
if errors:
self.show_warning(_("Invalid Lines found:") + "\n\n" + '\n'.join([ _("Line #") + str(x[0]+1) + ": " + x[1] for x in errors]))
return
outputs = self.payto_e.get_outputs(self.is_max)
lpos_output = None
# if address is empty, assume lpos contract is being made
if self.payto_e.payto_address == None:
outputs = self.lease_to_e.get_outputs(self.is_max)
lpos_output = self.cold_lease_to_e.get_outputs(self.is_max)
if self.payto_e.is_alias and self.payto_e.validated is False:
alias = self.payto_e.toPlainText()
msg = _('WARNING: the alias "{}" could not be validated via an additional '
'security check, DNSSEC, and thus may not be correct.').format(alias) + '\n'
msg += _('Do you wish to continue?')
if not self.question(msg):
return
if not outputs:
self.show_error(_('No outputs'))
return
if self.payto_e.payto_address == None and self.lease_to_e.payto_address == None:
self.show_error(_('No recipients'))
if self.payto_e.payto_address != None and (self.lease_to_e.payto_address != None or self.cold_lease_to_e.payto_address != None):
self.show_error(_('Cannot make a regular transaction and LPoS contract at the same time'))
if self.cold_lease_to_e.payto_address != None and self.lease_to_e.payto_address == None:
self.show_error(_('No lease to address'))
if self.cold_lease_to_e.payto_address == None and self.lease_to_e.payto_address != None:
self.show_error(_('No fee reward address'))
for o in outputs:
if o.address is None:
self.show_error(_('NIX Address is None'))
return
if o.type == TYPE_ADDRESS and not bitcoin.is_address(o.address):
self.show_error(_('Invalid NIX Address'))
return
if o.value is None:
self.show_error(_('Invalid Amount'))
return
for o in lpos_output:
if o.address is None:
self.show_error(_('Fee Reward Address is None'))
return
if o.type == TYPE_ADDRESS and not bitcoin.is_address(o.address):
self.show_error(_('Invalid NIX Address'))
return
if o.value is None:
self.show_error(_('Invalid Amount'))
return
fee_estimator = self.get_send_fee_estimator()
coins = self.get_coins()
return outputs, fee_estimator, label, coins, lpos_output
def do_preview(self):
self.do_send(preview = True)
def do_send(self, preview = False):
if run_hook('abort_send', self):
return
r = self.read_send_tab()
if not r:
return
outputs, fee_estimator, tx_desc, coins, lpos_output = r
try:
is_sweep = bool(self.tx_external_keypairs)
if not lpos_output:
tx = self.wallet.make_unsigned_transaction(
coins, outputs, self.config, fixed_fee=fee_estimator,
is_sweep=is_sweep)
else:
tx = self.wallet.make_unsigned_lpos_transaction(
coins, outputs, lpos_output, self.config, fixed_fee=fee_estimator,
is_sweep=is_sweep)
except (NotEnoughFunds, NoDynamicFeeEstimates) as e:
self.show_message(str(e))
return
except BaseException as e:
traceback.print_exc(file=sys.stdout)
self.show_message(str(e))
return
amount = tx.output_value() if self.is_max else sum(map(lambda x:x[2], outputs))
fee = tx.get_fee()
use_rbf = self.config.get('use_rbf', True)
if use_rbf:
tx.set_rbf(True)
if fee < self.wallet.relayfee() * tx.estimated_size() / 1000:
self.show_error('\n'.join([
_("This transaction requires a higher fee, or it will not be propagated by your current server"),
_("Try to raise your transaction fee, or use a server with a lower relay fee.")
]))
return
if preview:
self.show_transaction(tx, tx_desc)
return
if not self.network:
self.show_error(_("You can't broadcast a transaction without a live network connection."))
return
# confirmation dialog
msg = [
_("Amount to be sent") + ": " + self.format_amount_and_units(amount),
_("Mining fee") + ": " + self.format_amount_and_units(fee),
]
x_fee = run_hook('get_tx_extra_fee', self.wallet, tx)
if x_fee:
x_fee_address, x_fee_amount = x_fee
msg.append( _("Additional fees") + ": " + self.format_amount_and_units(x_fee_amount) )
confirm_rate = simple_config.FEERATE_WARNING_HIGH_FEE
if fee > confirm_rate * tx.estimated_size() / 1000:
msg.append(_('Warning') + ': ' + _("The fee for this transaction seems unusually high."))
if self.wallet.has_keystore_encryption():
msg.append("")
msg.append(_("Enter your password to proceed"))
password = self.password_dialog('\n'.join(msg))
if not password:
return
else:
msg.append(_('Proceed?'))
password = None
if not self.question('\n'.join(msg)):
return
def sign_done(success):
if success:
if not tx.is_complete():
self.show_transaction(tx)
self.do_clear()
else:
self.broadcast_transaction(tx, tx_desc)
self.sign_tx_with_password(tx, sign_done, password)
@protected
def sign_tx(self, tx, callback, password):
self.sign_tx_with_password(tx, callback, password)
def sign_tx_with_password(self, tx, callback, password):
'''Sign the transaction in a separate thread. When done, calls
the callback with a success code of True or False.
'''
def on_success(result):
callback(True)
def on_failure(exc_info):
self.on_error(exc_info)
callback(False)
on_success = run_hook('tc_sign_wrapper', self.wallet, tx, on_success, on_failure) or on_success
if self.tx_external_keypairs:
# can sign directly
task = partial(Transaction.sign, tx, self.tx_external_keypairs)
else:
task = partial(self.wallet.sign_transaction, tx, password)
msg = _('Signing transaction...')
WaitingDialog(self, msg, task, on_success, on_failure)
def broadcast_transaction(self, tx, tx_desc):
def broadcast_thread():
# non-GUI thread
pr = self.payment_request
if pr and pr.has_expired():
self.payment_request = None
return False, _("Payment request has expired")
try:
self.network.run_from_another_thread(self.network.broadcast_transaction(tx))
except Exception as e:
status, msg = False, repr(e)
else:
status, msg = True, tx.txid()
if pr and status is True:
self.invoices.set_paid(pr, tx.txid())
self.invoices.save()
self.payment_request = None
refund_address = self.wallet.get_receiving_address()
coro = pr.send_payment_and_receive_paymentack(str(tx), refund_address)
fut = asyncio.run_coroutine_threadsafe(coro, self.network.asyncio_loop)
ack_status, ack_msg = fut.result(timeout=20)
self.print_error(f"Payment ACK: {ack_status}. Ack message: {ack_msg}")
return status, msg
# Capture current TL window; override might be removed on return
parent = self.top_level_window(lambda win: isinstance(win, MessageBoxMixin))
def broadcast_done(result):
# GUI thread
if result:
status, msg = result
if status:
if tx_desc is not None and tx.is_complete():
self.wallet.set_label(tx.txid(), tx_desc)
parent.show_message(_('Payment sent.') + '\n' + msg)
self.invoice_list.update()
self.do_clear()
else:
parent.show_error(msg)
WaitingDialog(self, _('Broadcasting transaction...'),
broadcast_thread, broadcast_done, self.on_error)
def query_choice(self, msg, choices):
# Needed by QtHandler for hardware wallets
dialog = WindowModalDialog(self.top_level_window())
clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout(dialog)
vbox.addLayout(clayout.layout())
vbox.addLayout(Buttons(OkButton(dialog)))
if not dialog.exec_():
return None
return clayout.selected_index()
def lock_amount(self, b):
self.amount_e.setFrozen(b)
self.max_button.setEnabled(not b)
def prepare_for_payment_request(self):
self.show_send_tab()
self.payto_e.is_pr = True
for e in [self.payto_e, self.message_e]:
e.setFrozen(True)
self.lock_amount(True)
self.payto_e.setText(_("please wait..."))
return True
def delete_invoice(self, key):
self.invoices.remove(key)
self.invoice_list.update()
def payment_request_ok(self):
pr = self.payment_request
key = self.invoices.add(pr)
status = self.invoices.get_status(key)
self.invoice_list.update()
if status == PR_PAID:
self.show_message("invoice already paid")
self.do_clear()
self.payment_request = None
return
self.payto_e.is_pr = True
if not pr.has_expired():
self.payto_e.setGreen()
else:
self.payto_e.setExpired()
self.payto_e.setText(pr.get_requestor())
self.amount_e.setText(format_satoshis_plain(pr.get_amount(), self.decimal_point))
self.message_e.setText(pr.get_memo())
# signal to set fee
self.amount_e.textEdited.emit("")
def payment_request_error(self):
self.show_message(self.payment_request.error)
self.payment_request = None
self.do_clear()
def on_pr(self, request):
self.payment_request = request
if self.payment_request.verify(self.contacts):
self.payment_request_ok_signal.emit()
else:
self.payment_request_error_signal.emit()
def pay_to_URI(self, URI):
if not URI:
return
try:
out = util.parse_URI(URI, self.on_pr)
except BaseException as e:
self.show_error(_('Invalid NIX URI:') + '\n' + str(e))
return
self.show_send_tab()
r = out.get('r')
sig = out.get('sig')
name = out.get('name')
if r or (name and sig):
self.prepare_for_payment_request()
return
address = out.get('address')
amount = out.get('amount')
label = out.get('label')
message = out.get('message')
# use label as description (not BIP21 compliant)
if label and not message:
message = label
if address:
self.payto_e.setText(address)
if message:
self.message_e.setText(message)
if amount:
self.amount_e.setAmount(amount)
self.amount_e.textEdited.emit("")
def do_clear(self):
self.is_max = False
self.not_enough_funds = False
self.payment_request = None
self.payto_e.is_pr = False
for e in [self.payto_e, self.message_e, self.amount_e, self.fiat_send_e,
self.fee_e, self.feerate_e]:
e.setText('')
e.setFrozen(False)
self.fee_slider.activate()
self.feerate_e.setAmount(self.config.fee_per_byte())
self.size_e.setAmount(0)
self.feerounding_icon.setVisible(False)
self.set_pay_from([])
self.tx_external_keypairs = {}
self.update_status()
run_hook('do_clear', self)
def set_frozen_state(self, addrs, freeze):
self.wallet.set_frozen_state(addrs, freeze)
self.address_list.update()
self.utxo_list.update()
self.update_fee()
def create_list_tab(self, l, toolbar=None):
w = QWidget()
w.searchable_list = l
vbox = QVBoxLayout()
w.setLayout(vbox)
vbox.setContentsMargins(0, 0, 0, 0)
vbox.setSpacing(0)
if toolbar:
vbox.addLayout(toolbar)
vbox.addWidget(l)
return w
def create_addresses_tab(self):
from .address_list import AddressList
self.address_list = l = AddressList(self)
toolbar = l.create_toolbar(self.config)
toolbar_shown = self.config.get('show_toolbar_addresses', False)
l.show_toolbar(toolbar_shown)
return self.create_list_tab(l, toolbar)
def create_utxo_tab(self):
from .utxo_list import UTXOList
self.utxo_list = l = UTXOList(self)
return self.create_list_tab(l)
def create_contacts_tab(self):
from .contact_list import ContactList
self.contact_list = l = ContactList(self)
return self.create_list_tab(l)
def remove_address(self, addr):
if self.question(_("Do you want to remove {} from your wallet?").format(addr)):
self.wallet.delete_address(addr)
self.need_update.set() # history, addresses, coins
self.clear_receive_tab()
def get_coins(self):
if self.pay_from:
return self.pay_from
else:
return self.wallet.get_spendable_coins(None, self.config)
def spend_coins(self, coins):
self.set_pay_from(coins)
self.show_send_tab()
self.update_fee()
def paytomany(self):
self.show_send_tab()
self.payto_e.paytomany()
msg = '\n'.join([
_('Enter a list of outputs in the \'Pay to\' field.'),
_('One output per line.'),
_('Format: address, amount'),
_('You may load a CSV file using the file icon.')
])
self.show_message(msg, title=_('Pay to many'))
def payto_contacts(self, labels):
paytos = [self.get_contact_payto(label) for label in labels]
self.show_send_tab()
if len(paytos) == 1:
self.payto_e.setText(paytos[0])
self.amount_e.setFocus()
else:
text = "\n".join([payto + ", 0" for payto in paytos])
self.payto_e.setText(text)
self.payto_e.setFocus()
def set_contact(self, label, address):
if not is_address(address):
self.show_error(_('Invalid Address'))
self.contact_list.update() # Displays original unchanged value
return False
self.contacts[address] = ('address', label)
self.contact_list.update()
self.history_list.update()
self.update_completions()
return True
def delete_contacts(self, labels):
if not self.question(_("Remove {} from your list of contacts?")
.format(" + ".join(labels))):
return
for label in labels:
self.contacts.pop(label)
self.history_list.update()
self.contact_list.update()
self.update_completions()
def show_invoice(self, key):
pr = self.invoices.get(key)
if pr is None:
self.show_error('Cannot find payment request in wallet.')
return
pr.verify(self.contacts)
self.show_pr_details(pr)
def show_pr_details(self, pr):
key = pr.get_id()
d = WindowModalDialog(self, _("Invoice"))
vbox = QVBoxLayout(d)
grid = QGridLayout()
grid.addWidget(QLabel(_("Requestor") + ':'), 0, 0)
grid.addWidget(QLabel(pr.get_requestor()), 0, 1)
grid.addWidget(QLabel(_("Amount") + ':'), 1, 0)
outputs_str = '\n'.join(map(lambda x: self.format_amount(x[2])+ self.base_unit() + ' @ ' + x[1], pr.get_outputs()))
grid.addWidget(QLabel(outputs_str), 1, 1)
expires = pr.get_expiration_date()
grid.addWidget(QLabel(_("Memo") + ':'), 2, 0)
grid.addWidget(QLabel(pr.get_memo()), 2, 1)
grid.addWidget(QLabel(_("Signature") + ':'), 3, 0)
grid.addWidget(QLabel(pr.get_verify_status()), 3, 1)
if expires:
grid.addWidget(QLabel(_("Expires") + ':'), 4, 0)
grid.addWidget(QLabel(format_time(expires)), 4, 1)
vbox.addLayout(grid)
def do_export():
fn = self.getSaveFileName(_("Save invoice to file"), "*.bip70")
if not fn:
return
with open(fn, 'wb') as f:
data = f.write(pr.raw)
self.show_message(_('Invoice saved as' + ' ' + fn))
exportButton = EnterButton(_('Save'), do_export)
def do_delete():
if self.question(_('Delete invoice?')):
self.invoices.remove(key)
self.history_list.update()
self.invoice_list.update()
d.close()
deleteButton = EnterButton(_('Delete'), do_delete)
vbox.addLayout(Buttons(exportButton, deleteButton, CloseButton(d)))
d.exec_()
def do_pay_invoice(self, key):
pr = self.invoices.get(key)
self.payment_request = pr
self.prepare_for_payment_request()
pr.error = None # this forces verify() to re-run
if pr.verify(self.contacts):
self.payment_request_ok()
else:
self.payment_request_error()
def create_console_tab(self):
from .console import Console
self.console = console = Console()
return console
def update_console(self):
console = self.console
console.history = self.config.get("console-history",[])
console.history_index = len(console.history)
console.updateNamespace({
'wallet': self.wallet,
'network': self.network,
'plugins': self.gui_object.plugins,
'window': self,
'config': self.config,
'electrum': electrum,
'daemon': self.gui_object.daemon,
'util': util,
'bitcoin': bitcoin,
})
c = commands.Commands(self.config, self.wallet, self.network, lambda: self.console.set_json(True))
methods = {}
def mkfunc(f, method):
return lambda *args: f(method, args, self.password_dialog)
for m in dir(c):
if m[0]=='_' or m in ['network','wallet','config']: continue
methods[m] = mkfunc(c._run, m)
console.updateNamespace(methods)
def create_status_bar(self):
sb = QStatusBar()
sb.setFixedHeight(35)
qtVersion = qVersion()
self.balance_label = QLabel("Loading wallet...")
self.balance_label.setTextInteractionFlags(Qt.TextSelectableByMouse)
self.balance_label.setStyleSheet("""QLabel { padding: 0 }""")
sb.addWidget(self.balance_label)
self.search_box = QLineEdit()
self.search_box.textChanged.connect(self.do_search)
self.search_box.hide()
sb.addPermanentWidget(self.search_box)
self.lock_icon = QIcon()
self.password_button = StatusBarButton(self.lock_icon, _("Password"), self.change_password_dialog )
sb.addPermanentWidget(self.password_button)
sb.addPermanentWidget(StatusBarButton(QIcon(":icons/preferences.png"), _("Preferences"), self.settings_dialog ) )
self.seed_button = StatusBarButton(QIcon(":icons/seed.png"), _("Seed"), self.show_seed_dialog )
sb.addPermanentWidget(self.seed_button)
self.status_button = StatusBarButton(QIcon(":icons/status_disconnected.png"), _("Network"), lambda: self.gui_object.show_network_dialog(self))
sb.addPermanentWidget(self.status_button)
run_hook('create_status_bar', sb)
self.setStatusBar(sb)
def update_lock_icon(self):
icon = QIcon(":icons/lock.png") if self.wallet.has_password() else QIcon(":icons/unlock.png")
self.password_button.setIcon(icon)
def update_buttons_on_seed(self):
self.seed_button.setVisible(self.wallet.has_seed())
self.password_button.setVisible(self.wallet.may_have_password())
self.send_button.setVisible(not self.wallet.is_watching_only())
def change_password_dialog(self):
from electrum.storage import STO_EV_XPUB_PW
if self.wallet.get_available_storage_encryption_version() == STO_EV_XPUB_PW:
from .password_dialog import ChangePasswordDialogForHW
d = ChangePasswordDialogForHW(self, self.wallet)
ok, encrypt_file = d.run()
if not ok:
return
try:
hw_dev_pw = self.wallet.keystore.get_password_for_storage_encryption()
except UserCancelled:
return
except BaseException as e:
traceback.print_exc(file=sys.stderr)
self.show_error(str(e))
return
old_password = hw_dev_pw if self.wallet.has_password() else None
new_password = hw_dev_pw if encrypt_file else None
else:
from .password_dialog import ChangePasswordDialogForSW
d = ChangePasswordDialogForSW(self, self.wallet)
ok, old_password, new_password, encrypt_file = d.run()
if not ok:
return
try:
self.wallet.update_password(old_password, new_password, encrypt_file)
except InvalidPassword as e:
self.show_error(str(e))
return
except BaseException:
traceback.print_exc(file=sys.stdout)
self.show_error(_('Failed to update password'))
return
msg = _('Password was updated successfully') if self.wallet.has_password() else _('Password is disabled, this wallet is not protected')
self.show_message(msg, title=_("Success"))
self.update_lock_icon()
def toggle_search(self):
tab = self.tabs.currentWidget()
#if hasattr(tab, 'searchable_list'):
# tab.searchable_list.toggle_toolbar()
#return
self.search_box.setHidden(not self.search_box.isHidden())
if not self.search_box.isHidden():
self.search_box.setFocus(1)
else:
self.do_search('')
def do_search(self, t):
tab = self.tabs.currentWidget()
if hasattr(tab, 'searchable_list'):
tab.searchable_list.filter(t)
def new_contact_dialog(self):
d = WindowModalDialog(self, _("New Contact"))
vbox = QVBoxLayout(d)
vbox.addWidget(QLabel(_('New Contact') + ':'))
grid = QGridLayout()
line1 = QLineEdit()
line1.setFixedWidth(280)
line2 = QLineEdit()
line2.setFixedWidth(280)
grid.addWidget(QLabel(_("Address")), 1, 0)
grid.addWidget(line1, 1, 1)
grid.addWidget(QLabel(_("Name")), 2, 0)
grid.addWidget(line2, 2, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if d.exec_():
self.set_contact(line2.text(), line1.text())
def show_master_public_keys(self):
dialog = WindowModalDialog(self, _("Wallet Information"))
dialog.setMinimumSize(500, 100)
mpk_list = self.wallet.get_master_public_keys()
vbox = QVBoxLayout()
wallet_type = self.wallet.storage.get('wallet_type', '')
if self.wallet.is_watching_only():
wallet_type += ' [{}]'.format(_('watching-only'))
seed_available = _('True') if self.wallet.has_seed() else _('False')
keystore_types = [k.get_type_text() for k in self.wallet.get_keystores()]
grid = QGridLayout()
basename = os.path.basename(self.wallet.storage.path)
grid.addWidget(QLabel(_("Wallet name")+ ':'), 0, 0)
grid.addWidget(QLabel(basename), 0, 1)
grid.addWidget(QLabel(_("Wallet type")+ ':'), 1, 0)
grid.addWidget(QLabel(wallet_type), 1, 1)
grid.addWidget(QLabel(_("Script type")+ ':'), 2, 0)
grid.addWidget(QLabel(self.wallet.txin_type), 2, 1)
grid.addWidget(QLabel(_("Seed available") + ':'), 3, 0)
grid.addWidget(QLabel(str(seed_available)), 3, 1)
if len(keystore_types) <= 1:
grid.addWidget(QLabel(_("Keystore type") + ':'), 4, 0)
ks_type = str(keystore_types[0]) if keystore_types else _('No keystore')
grid.addWidget(QLabel(ks_type), 4, 1)
vbox.addLayout(grid)
if self.wallet.is_deterministic():
mpk_text = ShowQRTextEdit()
mpk_text.setMaximumHeight(150)
mpk_text.addCopyButton(self.app)
def show_mpk(index):
mpk_text.setText(mpk_list[index])
# only show the combobox in case multiple accounts are available
if len(mpk_list) > 1:
def label(key):
if isinstance(self.wallet, Multisig_Wallet):
return _("cosigner") + f' {key+1} ( keystore: {keystore_types[key]} )'
return ''
labels = [label(i) for i in range(len(mpk_list))]
on_click = lambda clayout: show_mpk(clayout.selected_index())
labels_clayout = ChoicesLayout(_("Master Public Keys"), labels, on_click)
vbox.addLayout(labels_clayout.layout())
else:
vbox.addWidget(QLabel(_("Master Public Key")))
show_mpk(0)
vbox.addWidget(mpk_text)
vbox.addStretch(1)
vbox.addLayout(Buttons(CloseButton(dialog)))
dialog.setLayout(vbox)
dialog.exec_()
def remove_wallet(self):
if self.question('\n'.join([
_('Delete wallet file?'),
"%s"%self.wallet.storage.path,
_('If your wallet contains funds, make sure you have saved its seed.')])):
self._delete_wallet()
@protected
def _delete_wallet(self, password):
wallet_path = self.wallet.storage.path
basename = os.path.basename(wallet_path)
self.gui_object.daemon.stop_wallet(wallet_path)
self.close()
os.unlink(wallet_path)
self.show_error(_("Wallet removed: {}").format(basename))
@protected
def show_seed_dialog(self, password):
if not self.wallet.has_seed():
self.show_message(_('This wallet has no seed'))
return
keystore = self.wallet.get_keystore()
try:
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
except BaseException as e:
self.show_error(str(e))
return
from .seed_dialog import SeedDialog
d = SeedDialog(self, seed, passphrase)
d.exec_()
def show_qrcode(self, data, title = _("QR code"), parent=None):
if not data:
return
d = QRDialog(data, parent or self, title)
d.exec_()
@protected
def show_private_key(self, address, password):
if not address:
return
try:
pk, redeem_script = self.wallet.export_private_key(address, password)
except Exception as e:
traceback.print_exc(file=sys.stdout)
self.show_message(str(e))
return
xtype = bitcoin.deserialize_privkey(pk)[0]
d = WindowModalDialog(self, _("Private key"))
d.setMinimumSize(600, 150)
vbox = QVBoxLayout()
vbox.addWidget(QLabel(_("Address") + ': ' + address))
vbox.addWidget(QLabel(_("Script type") + ': ' + xtype))
vbox.addWidget(QLabel(_("Private key") + ':'))
keys_e = ShowQRTextEdit(text=pk)
keys_e.addCopyButton(self.app)
vbox.addWidget(keys_e)
if redeem_script:
vbox.addWidget(QLabel(_("Redeem Script") + ':'))
rds_e = ShowQRTextEdit(text=redeem_script)
rds_e.addCopyButton(self.app)
vbox.addWidget(rds_e)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
d.exec_()
msg_sign = _("Signing with an address actually means signing with the corresponding "
"private key, and verifying with the corresponding public key. The "
"address you have entered does not have a unique public key, so these "
"operations cannot be performed.") + '\n\n' + \
_('The operation is undefined. Not just in Electrum, but in general.')
@protected
def do_sign(self, address, message, signature, password):
address = address.text().strip()
message = message.toPlainText().strip()
if not bitcoin.is_address(address):
self.show_message(_('Invalid NIX address.'))
return
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
if not self.wallet.is_mine(address):
self.show_message(_('Address not in wallet.'))
return
txin_type = self.wallet.get_txin_type(address)
if txin_type not in ['p2pkh', 'p2wpkh', 'p2wpkh-p2sh']:
self.show_message(_('Cannot sign messages with this type of address:') + \
' ' + txin_type + '\n\n' + self.msg_sign)
return
task = partial(self.wallet.sign_message, address, message, password)
def show_signed_message(sig):
try:
signature.setText(base64.b64encode(sig).decode('ascii'))
except RuntimeError:
# (signature) wrapped C/C++ object has been deleted
pass
self.wallet.thread.add(task, on_success=show_signed_message)
def do_verify(self, address, message, signature):
address = address.text().strip()
message = message.toPlainText().strip().encode('utf-8')
if not bitcoin.is_address(address):
self.show_message(_('Invalid NIX address.'))
return
try:
# This can throw on invalid base64
sig = base64.b64decode(str(signature.toPlainText()))
verified = ecc.verify_message_with_address(address, sig, message)
except Exception as e:
verified = False
if verified:
self.show_message(_("Signature verified"))
else:
self.show_error(_("Wrong signature"))
def sign_verify_message(self, address=''):
d = WindowModalDialog(self, _('Sign/verify Message'))
d.setMinimumSize(610, 290)
layout = QGridLayout(d)
message_e = QTextEdit()
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
address_e = QLineEdit()
address_e.setText(address)
layout.addWidget(QLabel(_('Address')), 2, 0)
layout.addWidget(address_e, 2, 1)
signature_e = QTextEdit()
layout.addWidget(QLabel(_('Signature')), 3, 0)
layout.addWidget(signature_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Sign"))
b.clicked.connect(lambda: self.do_sign(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Verify"))
b.clicked.connect(lambda: self.do_verify(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
@protected
def do_decrypt(self, message_e, pubkey_e, encrypted_e, password):
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
cyphertext = encrypted_e.toPlainText()
task = partial(self.wallet.decrypt_message, pubkey_e.text(), cyphertext, password)
def setText(text):
try:
message_e.setText(text.decode('utf-8'))
except RuntimeError:
# (message_e) wrapped C/C++ object has been deleted
pass
self.wallet.thread.add(task, on_success=setText)
def do_encrypt(self, message_e, pubkey_e, encrypted_e):
message = message_e.toPlainText()
message = message.encode('utf-8')
try:
public_key = ecc.ECPubkey(bfh(pubkey_e.text()))
except BaseException as e:
traceback.print_exc(file=sys.stdout)
self.show_warning(_('Invalid Public key'))
return
encrypted = public_key.encrypt_message(message)
encrypted_e.setText(encrypted.decode('ascii'))
def encrypt_message(self, address=''):
d = WindowModalDialog(self, _('Encrypt/decrypt Message'))
d.setMinimumSize(610, 490)
layout = QGridLayout(d)
message_e = QTextEdit()
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
pubkey_e = QLineEdit()
if address:
pubkey = self.wallet.get_public_key(address)
pubkey_e.setText(pubkey)
layout.addWidget(QLabel(_('Public key')), 2, 0)
layout.addWidget(pubkey_e, 2, 1)
encrypted_e = QTextEdit()
layout.addWidget(QLabel(_('Encrypted')), 3, 0)
layout.addWidget(encrypted_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Encrypt"))
b.clicked.connect(lambda: self.do_encrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Decrypt"))
b.clicked.connect(lambda: self.do_decrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
def password_dialog(self, msg=None, parent=None):
from .password_dialog import PasswordDialog
parent = parent or self
d = PasswordDialog(parent, msg)
return d.run()
def tx_from_text(self, txt):
from electrum.transaction import tx_from_str
try:
tx = tx_from_str(txt)
return Transaction(tx)
except BaseException as e:
self.show_critical(_("Electrum was unable to parse your transaction") + ":\n" + str(e))
return
def read_tx_from_qrcode(self):
from electrum import qrscanner
try:
data = qrscanner.scan_barcode(self.config.get_video_device())
except BaseException as e:
self.show_error(str(e))
return
if not data:
return
# if the user scanned a bitcoin URI
if str(data).startswith("nix:"):
self.pay_to_URI(data)
return
# else if the user scanned an offline signed tx
try:
data = bh2u(bitcoin.base_decode(data, length=None, base=43))
except BaseException as e:
self.show_error((_('Could not decode QR code')+':\n{}').format(e))
return
tx = self.tx_from_text(data)
if not tx:
return
self.show_transaction(tx)
def read_tx_from_file(self):
fileName = self.getOpenFileName(_("Select your transaction file"), "*.txn")
if not fileName:
return
try:
with open(fileName, "r") as f:
file_content = f.read()
except (ValueError, IOError, os.error) as reason:
self.show_critical(_("Electrum was unable to open your transaction file") + "\n" + str(reason), title=_("Unable to read file or no transaction found"))
return
return self.tx_from_text(file_content)
def do_process_from_text(self):
text = text_dialog(self, _('Input raw transaction'), _("Transaction:"), _("Load transaction"))
if not text:
return
tx = self.tx_from_text(text)
if tx:
self.show_transaction(tx)
def do_process_from_file(self):
tx = self.read_tx_from_file()
if tx:
self.show_transaction(tx)
def do_process_from_txid(self):
from electrum import transaction
txid, ok = QInputDialog.getText(self, _('Lookup transaction'), _('Transaction ID') + ':')
if ok and txid:
txid = str(txid).strip()
try:
raw_tx = self.network.run_from_another_thread(
self.network.get_transaction(txid, timeout=10))
except Exception as e:
self.show_message(_("Error getting transaction from network") + ":\n" + str(e))
return
tx = transaction.Transaction(raw_tx)
self.show_transaction(tx)
@protected
def export_privkeys_dialog(self, password):
if self.wallet.is_watching_only():
self.show_message(_("This is a watching-only wallet"))
return
if isinstance(self.wallet, Multisig_Wallet):
self.show_message(_('WARNING: This is a multi-signature wallet.') + '\n' +
_('It cannot be "backed up" by simply exporting these private keys.'))
d = WindowModalDialog(self, _('Private keys'))
d.setMinimumSize(980, 300)
vbox = QVBoxLayout(d)
msg = "%s\n%s\n%s" % (_("WARNING: ALL your private keys are secret."),
_("Exposing a single private key can compromise your entire wallet!"),
_("In particular, DO NOT use 'redeem private key' services proposed by third parties."))
vbox.addWidget(QLabel(msg))
e = QTextEdit()
e.setReadOnly(True)
vbox.addWidget(e)
defaultname = 'electrum-nix-private-keys.csv'
select_msg = _('Select file to export your private keys to')
hbox, filename_e, csv_button = filename_field(self, self.config, defaultname, select_msg)
vbox.addLayout(hbox)
b = OkButton(d, _('Export'))
b.setEnabled(False)
vbox.addLayout(Buttons(CancelButton(d), b))
private_keys = {}
addresses = self.wallet.get_addresses()
done = False
cancelled = False
def privkeys_thread():
for addr in addresses:
time.sleep(0.1)
if done or cancelled:
break
privkey = self.wallet.export_private_key(addr, password)[0]
private_keys[addr] = privkey
self.computing_privkeys_signal.emit()
if not cancelled:
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.emit()
def show_privkeys():
s = "\n".join( map( lambda x: x[0] + "\t"+ x[1], private_keys.items()))
e.setText(s)
b.setEnabled(True)
self.show_privkeys_signal.disconnect()
nonlocal done
done = True
def on_dialog_closed(*args):
nonlocal done
nonlocal cancelled
if not done:
cancelled = True
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.disconnect()
self.computing_privkeys_signal.connect(lambda: e.setText("Please wait... %d/%d"%(len(private_keys),len(addresses))))
self.show_privkeys_signal.connect(show_privkeys)
d.finished.connect(on_dialog_closed)
threading.Thread(target=privkeys_thread).start()
if not d.exec_():
done = True
return
filename = filename_e.text()
if not filename:
return
try:
self.do_export_privkeys(filename, private_keys, csv_button.isChecked())
except (IOError, os.error) as reason:
txt = "\n".join([
_("Electrum was unable to produce a private key-export."),
str(reason)
])
self.show_critical(txt, title=_("Unable to create csv"))
except Exception as e:
self.show_message(str(e))
return
self.show_message(_("Private keys exported."))
def do_export_privkeys(self, fileName, pklist, is_csv):
with open(fileName, "w+") as f:
if is_csv:
transaction = csv.writer(f)
transaction.writerow(["address", "private_key"])
for addr, pk in pklist.items():
transaction.writerow(["%34s"%addr,pk])
else:
f.write(json.dumps(pklist, indent = 4))
def do_import_labels(self):
def import_labels(path):
def _validate(data):
return data # TODO
def import_labels_assign(data):
for key, value in data.items():
self.wallet.set_label(key, value)
import_meta(path, _validate, import_labels_assign)
def on_import():
self.need_update.set()
import_meta_gui(self, _('labels'), import_labels, on_import)
def do_export_labels(self):
def export_labels(filename):
export_meta(self.wallet.labels, filename)
export_meta_gui(self, _('labels'), export_labels)
def sweep_key_dialog(self):
d = WindowModalDialog(self, title=_('Sweep private keys'))
d.setMinimumSize(600, 300)
vbox = QVBoxLayout(d)
hbox_top = QHBoxLayout()
hbox_top.addWidget(QLabel(_("Enter private keys:")))
hbox_top.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
vbox.addLayout(hbox_top)
keys_e = ScanQRTextEdit(allow_multi=True)
keys_e.setTabChangesFocus(True)
vbox.addWidget(keys_e)
addresses = self.wallet.get_unused_addresses()
if not addresses:
try:
addresses = self.wallet.get_receiving_addresses()
except AttributeError:
addresses = self.wallet.get_addresses()
h, address_e = address_field(addresses)
vbox.addLayout(h)
vbox.addStretch(1)
button = OkButton(d, _('Sweep'))
vbox.addLayout(Buttons(CancelButton(d), button))
button.setEnabled(False)
def get_address():
addr = str(address_e.text()).strip()
if bitcoin.is_address(addr):
return addr
def get_pk():
text = str(keys_e.toPlainText())
return keystore.get_private_keys(text)
f = lambda: button.setEnabled(get_address() is not None and get_pk() is not None)
on_address = lambda text: address_e.setStyleSheet((ColorScheme.DEFAULT if get_address() else ColorScheme.RED).as_stylesheet())
keys_e.textChanged.connect(f)
address_e.textChanged.connect(f)
address_e.textChanged.connect(on_address)
if not d.exec_():
return
# user pressed "sweep"
try:
coins, keypairs = sweep_preparations(get_pk(), self.network)
except Exception as e: # FIXME too broad...
#traceback.print_exc(file=sys.stderr)
self.show_message(str(e))
return
self.do_clear()
self.tx_external_keypairs = keypairs
self.spend_coins(coins)
self.payto_e.setText(get_address())
self.spend_max()
self.payto_e.setFrozen(True)
self.amount_e.setFrozen(True)
self.warn_if_watching_only()
def _do_import(self, title, header_layout, func):
text = text_dialog(self, title, header_layout, _('Import'), allow_multi=True)
if not text:
return
keys = str(text).split()
good_inputs, bad_inputs = func(keys)
if good_inputs:
msg = '\n'.join(good_inputs[:10])
if len(good_inputs) > 10: msg += '\n...'
self.show_message(_("The following addresses were added")
+ f' ({len(good_inputs)}):\n' + msg)
if bad_inputs:
msg = "\n".join(f"{key[:10]}... ({msg})" for key, msg in bad_inputs[:10])
if len(bad_inputs) > 10: msg += '\n...'
self.show_error(_("The following inputs could not be imported")
+ f' ({len(bad_inputs)}):\n' + msg)
self.address_list.update()
self.history_list.update()
def import_addresses(self):
if not self.wallet.can_import_address():
return
title, msg = _('Import addresses'), _("Enter addresses")+':'
self._do_import(title, msg, self.wallet.import_addresses)
@protected
def do_import_privkey(self, password):
if not self.wallet.can_import_privkey():
return
title = _('Import private keys')
header_layout = QHBoxLayout()
header_layout.addWidget(QLabel(_("Enter private keys")+':'))
header_layout.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
self._do_import(title, header_layout, lambda x: self.wallet.import_private_keys(x, password))
def update_fiat(self):
b = self.fx and self.fx.is_enabled()
self.fiat_send_e.setVisible(b)
self.fiat_receive_e.setVisible(b)
self.history_list.refresh_headers()
self.history_list.update()
self.address_list.refresh_headers()
self.address_list.update()
self.update_status()
def settings_dialog(self):
self.need_restart = False
d = WindowModalDialog(self, _('Preferences'))
vbox = QVBoxLayout()
tabs = QTabWidget()
gui_widgets = []
fee_widgets = []
tx_widgets = []
id_widgets = []
# language
lang_help = _('Select which language is used in the GUI (after restart).')
lang_label = HelpLabel(_('Language') + ':', lang_help)
lang_combo = QComboBox()
from electrum.i18n import languages
lang_combo.addItems(list(languages.values()))
lang_keys = list(languages.keys())
lang_cur_setting = self.config.get("language", '')
try:
index = lang_keys.index(lang_cur_setting)
except ValueError: # not in list
index = 0
lang_combo.setCurrentIndex(index)
if not self.config.is_modifiable('language'):
for w in [lang_combo, lang_label]: w.setEnabled(False)
def on_lang(x):
lang_request = list(languages.keys())[lang_combo.currentIndex()]
if lang_request != self.config.get('language'):
self.config.set_key("language", lang_request, True)
self.need_restart = True
lang_combo.currentIndexChanged.connect(on_lang)
gui_widgets.append((lang_label, lang_combo))
nz_help = _('Number of zeros displayed after the decimal point. For example, if this is set to 2, "1." will be displayed as "1.00"')
nz_label = HelpLabel(_('Zeros after decimal point') + ':', nz_help)
nz = QSpinBox()
nz.setMinimum(0)
nz.setMaximum(self.decimal_point)
nz.setValue(self.num_zeros)
if not self.config.is_modifiable('num_zeros'):
for w in [nz, nz_label]: w.setEnabled(False)
def on_nz():
value = nz.value()
if self.num_zeros != value:
self.num_zeros = value
self.config.set_key('num_zeros', value, True)
self.history_list.update()
self.address_list.update()
nz.valueChanged.connect(on_nz)
gui_widgets.append((nz_label, nz))
msg = '\n'.join([
_('Time based: fee rate is based on average confirmation time estimates'),
_('Mempool based: fee rate is targeting a depth in the memory pool')
]
)
fee_type_label = HelpLabel(_('Fee estimation') + ':', msg)
fee_type_combo = QComboBox()
fee_type_combo.addItems([_('Static'), _('ETA'), _('Mempool')])
fee_type_combo.setCurrentIndex((2 if self.config.use_mempool_fees() else 1) if self.config.is_dynfee() else 0)
def on_fee_type(x):
self.config.set_key('mempool_fees', x==2)
self.config.set_key('dynamic_fees', x>0)
self.fee_slider.update()
fee_type_combo.currentIndexChanged.connect(on_fee_type)
fee_widgets.append((fee_type_label, fee_type_combo))
feebox_cb = QCheckBox(_('Edit fees manually'))
feebox_cb.setChecked(self.config.get('show_fee', False))
feebox_cb.setToolTip(_("Show fee edit box in send tab."))
def on_feebox(x):
self.config.set_key('show_fee', x == Qt.Checked)
self.fee_adv_controls.setVisible(bool(x))
feebox_cb.stateChanged.connect(on_feebox)
fee_widgets.append((feebox_cb, None))
use_rbf = self.config.get('use_rbf', True)
use_rbf_cb = QCheckBox(_('Use Replace-By-Fee'))
use_rbf_cb.setChecked(use_rbf)
use_rbf_cb.setToolTip(
_('If you check this box, your transactions will be marked as non-final,') + '\n' + \
_('and you will have the possibility, while they are unconfirmed, to replace them with transactions that pay higher fees.') + '\n' + \
_('Note that some merchants do not accept non-final transactions until they are confirmed.'))
def on_use_rbf(x):
self.config.set_key('use_rbf', bool(x))
batch_rbf_cb.setEnabled(bool(x))
use_rbf_cb.stateChanged.connect(on_use_rbf)
fee_widgets.append((use_rbf_cb, None))
batch_rbf_cb = QCheckBox(_('Batch RBF transactions'))
batch_rbf_cb.setChecked(self.config.get('batch_rbf', False))
batch_rbf_cb.setEnabled(use_rbf)
batch_rbf_cb.setToolTip(
_('If you check this box, your unconfirmed transactions will be consolidated into a single transaction.') + '\n' + \
_('This will save fees.'))
def on_batch_rbf(x):
self.config.set_key('batch_rbf', bool(x))
batch_rbf_cb.stateChanged.connect(on_batch_rbf)
fee_widgets.append((batch_rbf_cb, None))
msg = _('OpenAlias record, used to receive coins and to sign payment requests.') + '\n\n'\
+ _('The following alias providers are available:') + '\n'\
+ '\n'.join(['https://cryptoname.co/', 'http://xmr.link']) + '\n\n'\
+ 'For more information, see https://openalias.org'
alias_label = HelpLabel(_('OpenAlias') + ':', msg)
alias = self.config.get('alias','')
alias_e = QLineEdit(alias)
def set_alias_color():
if not self.config.get('alias'):
alias_e.setStyleSheet("")
return
if self.alias_info:
alias_addr, alias_name, validated = self.alias_info
alias_e.setStyleSheet((ColorScheme.GREEN if validated else ColorScheme.RED).as_stylesheet(True))
else:
alias_e.setStyleSheet(ColorScheme.RED.as_stylesheet(True))
def on_alias_edit():
alias_e.setStyleSheet("")
alias = str(alias_e.text())
self.config.set_key('alias', alias, True)
if alias:
self.fetch_alias()
set_alias_color()
self.alias_received_signal.connect(set_alias_color)
alias_e.editingFinished.connect(on_alias_edit)
id_widgets.append((alias_label, alias_e))
# SSL certificate
msg = ' '.join([
_('SSL certificate used to sign payment requests.'),
_('Use setconfig to set ssl_chain and ssl_privkey.'),
])
if self.config.get('ssl_privkey') or self.config.get('ssl_chain'):
try:
SSL_identity = paymentrequest.check_ssl_config(self.config)
SSL_error = None
except BaseException as e:
SSL_identity = "error"
SSL_error = str(e)
else:
SSL_identity = ""
SSL_error = None
SSL_id_label = HelpLabel(_('SSL certificate') + ':', msg)
SSL_id_e = QLineEdit(SSL_identity)
SSL_id_e.setStyleSheet((ColorScheme.RED if SSL_error else ColorScheme.GREEN).as_stylesheet(True) if SSL_identity else '')
if SSL_error:
SSL_id_e.setToolTip(SSL_error)
SSL_id_e.setReadOnly(True)
id_widgets.append((SSL_id_label, SSL_id_e))
units = base_units_list
msg = (_('Base unit of your wallet.')
+ '\n1 NIX = 1000 mNIX. 1 mNIX = 1000 bits. 1 bit = 100 sat.\n'
+ _('This setting affects the Send tab, and all balance related fields.'))
unit_label = HelpLabel(_('Base unit') + ':', msg)
unit_combo = QComboBox()
unit_combo.addItems(units)
unit_combo.setCurrentIndex(units.index(self.base_unit()))
def on_unit(x, nz):
unit_result = units[unit_combo.currentIndex()]
if self.base_unit() == unit_result:
return
edits = self.amount_e, self.fee_e, self.receive_amount_e
amounts = [edit.get_amount() for edit in edits]
self.decimal_point = base_unit_name_to_decimal_point(unit_result)
self.config.set_key('decimal_point', self.decimal_point, True)
nz.setMaximum(self.decimal_point)
self.history_list.update()
self.request_list.update()
self.address_list.update()
for edit, amount in zip(edits, amounts):
edit.setAmount(amount)
self.update_status()
unit_combo.currentIndexChanged.connect(lambda x: on_unit(x, nz))
gui_widgets.append((unit_label, unit_combo))
block_explorers = sorted(util.block_explorer_info().keys())
msg = _('Choose which online block explorer to use for functions that open a web browser')
block_ex_label = HelpLabel(_('Online Block Explorer') + ':', msg)
block_ex_combo = QComboBox()
block_ex_combo.addItems(block_explorers)
block_ex_combo.setCurrentIndex(block_ex_combo.findText(util.block_explorer(self.config)))
def on_be(x):
be_result = block_explorers[block_ex_combo.currentIndex()]
self.config.set_key('block_explorer', be_result, True)
block_ex_combo.currentIndexChanged.connect(on_be)
gui_widgets.append((block_ex_label, block_ex_combo))
from electrum import qrscanner
system_cameras = qrscanner._find_system_cameras()
qr_combo = QComboBox()
qr_combo.addItem("Default","default")
for camera, device in system_cameras.items():
qr_combo.addItem(camera, device)
#combo.addItem("Manually specify a device", config.get("video_device"))
index = qr_combo.findData(self.config.get("video_device"))
qr_combo.setCurrentIndex(index)
msg = _("Install the zbar package to enable this.")
qr_label = HelpLabel(_('Video Device') + ':', msg)
qr_combo.setEnabled(qrscanner.libzbar is not None)
on_video_device = lambda x: self.config.set_key("video_device", qr_combo.itemData(x), True)
qr_combo.currentIndexChanged.connect(on_video_device)
gui_widgets.append((qr_label, qr_combo))
colortheme_combo = QComboBox()
colortheme_combo.addItem(_('Light'), 'default')
colortheme_combo.addItem(_('Dark'), 'dark')
index = colortheme_combo.findData(self.config.get('qt_gui_color_theme', 'default'))
colortheme_combo.setCurrentIndex(index)
colortheme_label = QLabel(_('Color theme') + ':')
def on_colortheme(x):
self.config.set_key('qt_gui_color_theme', colortheme_combo.itemData(x), True)
self.need_restart = True
colortheme_combo.currentIndexChanged.connect(on_colortheme)
gui_widgets.append((colortheme_label, colortheme_combo))
usechange_cb = QCheckBox(_('Use change addresses'))
usechange_cb.setChecked(self.wallet.use_change)
if not self.config.is_modifiable('use_change'): usechange_cb.setEnabled(False)
def on_usechange(x):
usechange_result = x == Qt.Checked
if self.wallet.use_change != usechange_result:
self.wallet.use_change = usechange_result
self.wallet.storage.put('use_change', self.wallet.use_change)
multiple_cb.setEnabled(self.wallet.use_change)
usechange_cb.stateChanged.connect(on_usechange)
usechange_cb.setToolTip(_('Using change addresses makes it more difficult for other people to track your transactions.'))
tx_widgets.append((usechange_cb, None))
def on_multiple(x):
multiple = x == Qt.Checked
if self.wallet.multiple_change != multiple:
self.wallet.multiple_change = multiple
self.wallet.storage.put('multiple_change', multiple)
multiple_change = self.wallet.multiple_change
multiple_cb = QCheckBox(_('Use multiple change addresses'))
multiple_cb.setEnabled(self.wallet.use_change)
multiple_cb.setToolTip('\n'.join([
_('In some cases, use up to 3 change addresses in order to break '
'up large coin amounts and obfuscate the recipient address.'),
_('This may result in higher transactions fees.')
]))
multiple_cb.setChecked(multiple_change)
multiple_cb.stateChanged.connect(on_multiple)
tx_widgets.append((multiple_cb, None))
def fmt_docs(key, klass):
lines = [ln.lstrip(" ") for ln in klass.__doc__.split("\n")]
return '\n'.join([key, "", " ".join(lines)])
choosers = sorted(coinchooser.COIN_CHOOSERS.keys())
if len(choosers) > 1:
chooser_name = coinchooser.get_name(self.config)
msg = _('Choose coin (UTXO) selection method. The following are available:\n\n')
msg += '\n\n'.join(fmt_docs(*item) for item in coinchooser.COIN_CHOOSERS.items())
chooser_label = HelpLabel(_('Coin selection') + ':', msg)
chooser_combo = QComboBox()
chooser_combo.addItems(choosers)
i = choosers.index(chooser_name) if chooser_name in choosers else 0
chooser_combo.setCurrentIndex(i)
def on_chooser(x):
chooser_name = choosers[chooser_combo.currentIndex()]
self.config.set_key('coin_chooser', chooser_name)
chooser_combo.currentIndexChanged.connect(on_chooser)
tx_widgets.append((chooser_label, chooser_combo))
def on_unconf(x):
self.config.set_key('confirmed_only', bool(x))
conf_only = self.config.get('confirmed_only', False)
unconf_cb = QCheckBox(_('Spend only confirmed coins'))
unconf_cb.setToolTip(_('Spend only confirmed inputs.'))
unconf_cb.setChecked(conf_only)
unconf_cb.stateChanged.connect(on_unconf)
tx_widgets.append((unconf_cb, None))
def on_outrounding(x):
self.config.set_key('coin_chooser_output_rounding', bool(x))
enable_outrounding = self.config.get('coin_chooser_output_rounding', False)
outrounding_cb = QCheckBox(_('Enable output value rounding'))
outrounding_cb.setToolTip(
_('Set the value of the change output so that it has similar precision to the other outputs.') + '\n' +
_('This might improve your privacy somewhat.') + '\n' +
_('If enabled, at most 100 satoshis might be lost due to this, per transaction.'))
outrounding_cb.setChecked(enable_outrounding)
outrounding_cb.stateChanged.connect(on_outrounding)
tx_widgets.append((outrounding_cb, None))
# Fiat Currency
hist_checkbox = QCheckBox()
hist_capgains_checkbox = QCheckBox()
fiat_address_checkbox = QCheckBox()
ccy_combo = QComboBox()
ex_combo = QComboBox()
def update_currencies():
if not self.fx: return
currencies = sorted(self.fx.get_currencies(self.fx.get_history_config()))
ccy_combo.clear()
ccy_combo.addItems([_('None')] + currencies)
if self.fx.is_enabled():
ccy_combo.setCurrentIndex(ccy_combo.findText(self.fx.get_currency()))
def update_history_cb():
if not self.fx: return
hist_checkbox.setChecked(self.fx.get_history_config())
hist_checkbox.setEnabled(self.fx.is_enabled())
def update_fiat_address_cb():
if not self.fx: return
fiat_address_checkbox.setChecked(self.fx.get_fiat_address_config())
def update_history_capgains_cb():
if not self.fx: return
hist_capgains_checkbox.setChecked(self.fx.get_history_capital_gains_config())
hist_capgains_checkbox.setEnabled(hist_checkbox.isChecked())
def update_exchanges():
if not self.fx: return
b = self.fx.is_enabled()
ex_combo.setEnabled(b)
if b:
h = self.fx.get_history_config()
c = self.fx.get_currency()
exchanges = self.fx.get_exchanges_by_ccy(c, h)
else:
exchanges = self.fx.get_exchanges_by_ccy('USD', False)
ex_combo.blockSignals(True)
ex_combo.clear()
ex_combo.addItems(sorted(exchanges))
ex_combo.setCurrentIndex(ex_combo.findText(self.fx.config_exchange()))
ex_combo.blockSignals(False)
def on_currency(hh):
if not self.fx: return
b = bool(ccy_combo.currentIndex())
ccy = str(ccy_combo.currentText()) if b else None
self.fx.set_enabled(b)
if b and ccy != self.fx.ccy:
self.fx.set_currency(ccy)
update_history_cb()
update_exchanges()
self.update_fiat()
def on_exchange(idx):
exchange = str(ex_combo.currentText())
if self.fx and self.fx.is_enabled() and exchange and exchange != self.fx.exchange.name():
self.fx.set_exchange(exchange)
def on_history(checked):
if not self.fx: return
self.fx.set_history_config(checked)
update_exchanges()
self.history_list.refresh_headers()
if self.fx.is_enabled() and checked:
self.fx.trigger_update()
update_history_capgains_cb()
def on_history_capgains(checked):
if not self.fx: return
self.fx.set_history_capital_gains_config(checked)
self.history_list.refresh_headers()
def on_fiat_address(checked):
if not self.fx: return
self.fx.set_fiat_address_config(checked)
self.address_list.refresh_headers()
self.address_list.update()
update_currencies()
update_history_cb()
update_history_capgains_cb()
update_fiat_address_cb()
update_exchanges()
ccy_combo.currentIndexChanged.connect(on_currency)
hist_checkbox.stateChanged.connect(on_history)
hist_capgains_checkbox.stateChanged.connect(on_history_capgains)
fiat_address_checkbox.stateChanged.connect(on_fiat_address)
ex_combo.currentIndexChanged.connect(on_exchange)
fiat_widgets = []
fiat_widgets.append((QLabel(_('Fiat currency')), ccy_combo))
fiat_widgets.append((QLabel(_('Show history rates')), hist_checkbox))
fiat_widgets.append((QLabel(_('Show capital gains in history')), hist_capgains_checkbox))
fiat_widgets.append((QLabel(_('Show Fiat balance for addresses')), fiat_address_checkbox))
fiat_widgets.append((QLabel(_('Source')), ex_combo))
tabs_info = [
(fee_widgets, _('Fees')),
(tx_widgets, _('Transactions')),
(gui_widgets, _('Appearance')),
(fiat_widgets, _('Fiat')),
(id_widgets, _('Identity')),
]
for widgets, name in tabs_info:
tab = QWidget()
grid = QGridLayout(tab)
grid.setColumnStretch(0,1)
for a,b in widgets:
i = grid.rowCount()
if b:
if a:
grid.addWidget(a, i, 0)
grid.addWidget(b, i, 1)
else:
grid.addWidget(a, i, 0, 1, 2)
tabs.addTab(tab, name)
vbox.addWidget(tabs)
vbox.addStretch(1)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
# run the dialog
d.exec_()
if self.fx:
self.fx.trigger_update()
self.alias_received_signal.disconnect(set_alias_color)
run_hook('close_settings_dialog')
if self.need_restart:
self.show_warning(_('Please restart Electrum NIX to activate the new GUI settings'), title=_('Success'))
def closeEvent(self, event):
# It seems in some rare cases this closeEvent() is called twice
if not self.cleaned_up:
self.cleaned_up = True
self.clean_up()
event.accept()
def clean_up(self):
self.wallet.thread.stop()
if self.network:
self.network.unregister_callback(self.on_network)
self.config.set_key("is_maximized", self.isMaximized())
if not self.isMaximized():
g = self.geometry()
self.wallet.storage.put("winpos-qt", [g.left(),g.top(),
g.width(),g.height()])
self.config.set_key("console-history", self.console.history[-50:],
True)
if self.qr_window:
self.qr_window.close()
self.close_wallet()
self.gui_object.close_window(self)
def plugins_dialog(self):
self.pluginsdialog = d = WindowModalDialog(self, _('Electrum Plugins'))
plugins = self.gui_object.plugins
vbox = QVBoxLayout(d)
# plugins
scroll = QScrollArea()
scroll.setEnabled(True)
scroll.setWidgetResizable(True)
scroll.setMinimumSize(400,250)
vbox.addWidget(scroll)
w = QWidget()
scroll.setWidget(w)
w.setMinimumHeight(plugins.count() * 35)
grid = QGridLayout()
grid.setColumnStretch(0,1)
w.setLayout(grid)
settings_widgets = {}
def enable_settings_widget(p, name, i):
widget = settings_widgets.get(name)
if not widget and p and p.requires_settings():
widget = settings_widgets[name] = p.settings_widget(d)
grid.addWidget(widget, i, 1)
if widget:
widget.setEnabled(bool(p and p.is_enabled()))
def do_toggle(cb, name, i):
p = plugins.toggle(name)
cb.setChecked(bool(p))
enable_settings_widget(p, name, i)
run_hook('init_qt', self.gui_object)
for i, descr in enumerate(plugins.descriptions.values()):
full_name = descr['__name__']
prefix, _separator, name = full_name.rpartition('.')
p = plugins.get(name)
if descr.get('registers_keystore'):
continue
try:
cb = QCheckBox(descr['fullname'])
plugin_is_loaded = p is not None
cb_enabled = (not plugin_is_loaded and plugins.is_available(name, self.wallet)
or plugin_is_loaded and p.can_user_disable())
cb.setEnabled(cb_enabled)
cb.setChecked(plugin_is_loaded and p.is_enabled())
grid.addWidget(cb, i, 0)
enable_settings_widget(p, name, i)
cb.clicked.connect(partial(do_toggle, cb, name, i))
msg = descr['description']
if descr.get('requires'):
msg += '\n\n' + _('Requires') + ':\n' + '\n'.join(map(lambda x: x[1], descr.get('requires')))
grid.addWidget(HelpButton(msg), i, 2)
except Exception:
self.print_msg("error: cannot display plugin", name)
traceback.print_exc(file=sys.stdout)
grid.setRowStretch(len(plugins.descriptions.values()), 1)
vbox.addLayout(Buttons(CloseButton(d)))
d.exec_()
def cpfp(self, parent_tx, new_tx):
total_size = parent_tx.estimated_size() + new_tx.estimated_size()
d = WindowModalDialog(self, _('Child Pays for Parent'))
vbox = QVBoxLayout(d)
msg = (
"A CPFP is a transaction that sends an unconfirmed output back to "
"yourself, with a high fee. The goal is to have miners confirm "
"the parent transaction in order to get the fee attached to the "
"child transaction.")
vbox.addWidget(WWLabel(_(msg)))
msg2 = ("The proposed fee is computed using your "
"fee/kB settings, applied to the total size of both child and "
"parent transactions. After you broadcast a CPFP transaction, "
"it is normal to see a new unconfirmed transaction in your history.")
vbox.addWidget(WWLabel(_(msg2)))
grid = QGridLayout()
grid.addWidget(QLabel(_('Total size') + ':'), 0, 0)
grid.addWidget(QLabel('%d bytes'% total_size), 0, 1)
max_fee = new_tx.output_value()
grid.addWidget(QLabel(_('Input amount') + ':'), 1, 0)
grid.addWidget(QLabel(self.format_amount(max_fee) + ' ' + self.base_unit()), 1, 1)
output_amount = QLabel('')
grid.addWidget(QLabel(_('Output amount') + ':'), 2, 0)
grid.addWidget(output_amount, 2, 1)
fee_e = BTCAmountEdit(self.get_decimal_point)
# FIXME with dyn fees, without estimates, there are all kinds of crashes here
def f(x):
a = max_fee - fee_e.get_amount()
output_amount.setText((self.format_amount(a) + ' ' + self.base_unit()) if a else '')
fee_e.textChanged.connect(f)
fee = self.config.fee_per_kb() * total_size / 1000
fee_e.setAmount(fee)
grid.addWidget(QLabel(_('Fee' + ':')), 3, 0)
grid.addWidget(fee_e, 3, 1)
def on_rate(dyn, pos, fee_rate):
fee = fee_rate * total_size / 1000
fee = min(max_fee, fee)
fee_e.setAmount(fee)
fee_slider = FeeSlider(self, self.config, on_rate)
fee_slider.update()
grid.addWidget(fee_slider, 4, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
fee = fee_e.get_amount()
if fee > max_fee:
self.show_error(_('Max fee exceeded'))
return
new_tx = self.wallet.cpfp(parent_tx, fee)
new_tx.set_rbf(True)
self.show_transaction(new_tx)
def bump_fee_dialog(self, tx):
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
if fee is None:
self.show_error(_("Can't bump fee: unknown fee for original transaction."))
return
tx_label = self.wallet.get_label(tx.txid())
tx_size = tx.estimated_size()
d = WindowModalDialog(self, _('Bump Fee'))
vbox = QVBoxLayout(d)
vbox.addWidget(WWLabel(_("Increase your transaction's fee to improve its position in mempool.")))
vbox.addWidget(QLabel(_('Current fee') + ': %s'% self.format_amount(fee) + ' ' + self.base_unit()))
vbox.addWidget(QLabel(_('New fee' + ':')))
fee_e = BTCAmountEdit(self.get_decimal_point)
fee_e.setAmount(fee * 1.5)
vbox.addWidget(fee_e)
def on_rate(dyn, pos, fee_rate):
fee = fee_rate * tx_size / 1000
fee_e.setAmount(fee)
fee_slider = FeeSlider(self, self.config, on_rate)
vbox.addWidget(fee_slider)
cb = QCheckBox(_('Final'))
vbox.addWidget(cb)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
is_final = cb.isChecked()
new_fee = fee_e.get_amount()
delta = new_fee - fee
if delta < 0:
self.show_error("fee too low")
return
try:
new_tx = self.wallet.bump_fee(tx, delta)
except CannotBumpFee as e:
self.show_error(str(e))
return
if is_final:
new_tx.set_rbf(False)
self.show_transaction(new_tx, tx_label)
def save_transaction_into_wallet(self, tx):
win = self.top_level_window()
try:
if not self.wallet.add_transaction(tx.txid(), tx):
win.show_error(_("Transaction could not be saved.") + "\n" +
_("It conflicts with current history."))
return False
except AddTransactionException as e:
win.show_error(e)
return False
else:
self.wallet.save_transactions(write=True)
# need to update at least: history_list, utxo_list, address_list
self.need_update.set()
msg = (_("Transaction added to wallet history.") + '\n\n' +
_("Note: this is an offline transaction, if you want the network "
"to see it, you need to broadcast it."))
win.msg_box(QPixmap(":icons/offline_tx.png"), None, _('Success'), msg)
return True
|
pseudoscience_stereo2depth.py
|
# Josh Gladstone made this in 2018
# But a lot of it came from Timotheos Samartzidis
# http://timosam.com/python_opencv_depthimage
# Enjoy!
import numpy as np
import sys, os
import glob
import time
import unicodedata
from threading import *
from os.path import join
# Python 3
import cv2
import urllib.request
from queue import *
from tkinter import *
from tkinter import filedialog
from tkinter import ttk
from tkinter import messagebox
# Python 2
'''
import sys
sys.path.append("/usr/local/lib/python2.7/site-packages")
import cv2
import urllib2
from Queue import *
from Tkinter import *
import tkFileDialog as filedialog
import ttk
import tkMessageBox as messagebox
'''
def Calculate():
global currentfile, img, height, width, imgL, imgR, titleStr, RightEye
if (currentfile != ''):
settings.title(titleStr + ' ( Working. . . )')
minDisparities=16
window_size = w2.get() # wsize default 3; 5; 7 for SGBM reduced size image; 15 for SGBM full size image (1300px and above); 5 Works nicely
rez = w0.get() / 20.0
if (rez > 0):
resL = cv2.resize(imgL,None,fx=rez, fy=rez, interpolation = cv2.INTER_AREA)
resR = cv2.resize(imgR,None,fx=rez, fy=rez, interpolation = cv2.INTER_AREA)
left_matcher = cv2.StereoSGBM_create(
minDisparity=0,
numDisparities=minDisparities, # max_disp has to be dividable by 16 f. E. HH 192, 256
blockSize= w1.get(),
P1=8 * 3 * window_size ** 2, # wsize default 3; 5; 7 for SGBM reduced size image; 15 for SGBM full size image (1300px and above); 5 Works nicely
P2=32 * 3 * window_size ** 2,
disp12MaxDiff=1,
uniquenessRatio=15,
speckleWindowSize=0,
speckleRange=2,
preFilterCap= w3.get(),
mode=cv2.STEREO_SGBM_MODE_HH
)
right_matcher = cv2.ximgproc.createRightMatcher(left_matcher)
lmbda = w4.get() * 1000
sigma = 1.2
visual_multiplier = 1
wls_filter = cv2.ximgproc.createDisparityWLSFilter(matcher_left=left_matcher)
wls_filter.setLambda(lmbda)
wls_filter.setSigmaColor(sigma)
displ = left_matcher.compute(resL, resR)
dispr = right_matcher.compute(resR, resL)
if (RightEye == False):
imgLb = cv2.copyMakeBorder(imgL, top=0, bottom=0, left=np.uint16(minDisparities), right=0, borderType= cv2.BORDER_CONSTANT, value=[155,155,155] )
else:
imgLb = cv2.copyMakeBorder(imgL, top=0, bottom=0, left=0, right=np.uint16(minDisparities), borderType= cv2.BORDER_CONSTANT, value=[155,155,155] )
filteredImg = wls_filter.filter(displ, imgLb, None, dispr)
filteredImg = filteredImg * rez
filteredImg = filteredImg + (w5.get()-100)
filteredImg = (w6.get()/10.0)*(filteredImg - 128) + 128
filteredImg = np.clip(filteredImg, 0, 255)
filteredImg = np.uint8(filteredImg)
if (precisealignmenthack == '0'):
if (oneeightysetting=='0'):
filteredImg = cv2.resize(filteredImg,(width,int(height/2)), interpolation = cv2.INTER_CUBIC) # Disparity truncation hack
filteredImg = filteredImg[0:height, np.uint16(minDisparities/rez):width] #
filteredImg = cv2.resize(filteredImg,(width,int(height/2)), interpolation = cv2.INTER_CUBIC) # Disparity truncation hack
else:
filteredImg = cv2.resize(filteredImg,(int(width/2), height), interpolation = cv2.INTER_CUBIC)
filteredImg = filteredImg[0:height, np.uint16(minDisparities/rez):width]
filteredImg = cv2.resize(filteredImg,(int(width/2), height), interpolation = cv2.INTER_CUBIC)
else:
imgL2 = cv2.flip(imgL, 1)
imgR2 = cv2.flip(imgR, 1)
resL2 = cv2.flip(resL, 1)
resR2 = cv2.flip(resR, 1)
left_matcher2 = cv2.StereoSGBM_create( # Another disparity truncation hack
minDisparity=0,
numDisparities=minDisparities, # max_disp has to be dividable by 16 f. E. HH 192, 256
blockSize= w1.get(),
P1=8 * 3 * window_size ** 2, # wsize default 3; 5; 7 for SGBM reduced size image; 15 for SGBM full size image (1300px and above); 5 Works nicely
P2=32 * 3 * window_size ** 2,
disp12MaxDiff=1,
uniquenessRatio=15,
speckleWindowSize=0,
speckleRange=2,
preFilterCap= w3.get(),
mode=cv2.STEREO_SGBM_MODE_HH
)
right_matcher2 = cv2.ximgproc.createRightMatcher(left_matcher2)
wls_filter2 = cv2.ximgproc.createDisparityWLSFilter(matcher_left=left_matcher2)
wls_filter2.setLambda(lmbda)
wls_filter2.setSigmaColor(sigma)
displ2 = left_matcher2.compute(resR2, resL2)
dispr2 = right_matcher2.compute(resL2, resR2)
if (RightEye == False):
imgLb2 = cv2.copyMakeBorder(imgL2, top=0, bottom=0, left=np.uint16(minDisparities), right=0, borderType= cv2.BORDER_CONSTANT, value=[155,155,155] )
else:
imgLb2 = cv2.copyMakeBorder(imgL2, top=0, bottom=0, left=0, right=np.uint16(minDisparities), borderType= cv2.BORDER_CONSTANT, value=[155,155,155] )
filteredImg2 = wls_filter.filter(displ2, imgLb2, None, dispr2)
filteredImg2 = filteredImg2 * rez
filteredImg2 = filteredImg2 + (w5.get()-100)
filteredImg2 = (w6.get()/10.0)*(filteredImg2 - 128) + 128
filteredImg2 = np.clip(filteredImg2, 0, 255)
filteredImg2 = np.uint8(filteredImg2)
filteredImg2 = cv2.flip(filteredImg2, 1)
M = np.float32([[1,0,-16],[0,1,0]])
if (oneeightysetting=='0'):
filteredImg = cv2.warpAffine(filteredImg, M, (width, int(height/2)), borderMode=cv2.BORDER_WRAP)
filteredImg2 = cv2.warpAffine(filteredImg2, M, (width, int(height/2)), borderMode=cv2.BORDER_WRAP)
filteredImg2 = filteredImg2[0:height, 0:int(width/10)]
filteredImg = filteredImg[0:height, int(width/10):width]
filteredImg = np.concatenate((filteredImg2, filteredImg), axis=1)
else:
filteredImg = cv2.warpAffine(filteredImg, M, (int(width/2), height), borderMode=cv2.BORDER_WRAP)
filteredImg2 = cv2.warpAffine(filteredImg2, M, (int(width/2), height), borderMode=cv2.BORDER_WRAP)
filteredImg2 = filteredImg2[0:height, 0:int(width/20)]
filteredImg = filteredImg[0:height, int(width/20):int(width/2)]
filteredImg = np.concatenate((filteredImg2, filteredImg), axis=1)
filteredImg = cv2.resize(filteredImg,(int(width/2), height), interpolation = cv2.INTER_CUBIC)
cv2.imshow('Left Source', imgL)
cv2.imshow('Right Source', imgR)
cv2.namedWindow('Depth Map', cv2.WINDOW_NORMAL)
cv2.imshow('Depth Map', filteredImg)
settings.title(titleStr)
return filteredImg
else:
print ('Resolution must be greater than 0.')
def ThreadedCalculate(q, w0val, w1val, w2val, w3val, w4val, w5val, w6val, w7val, savefile):
while True:
img, filename = q.get()
height, width = img.shape[:2]
if (oneeightysetting=='0'):
imgL = img[0:int((height/2)), 0:width]
imgR = img[int((height/2)):height, 0:width]
offsetValue = ((w7.get() - 50)/100)*width
Q = np.float32([[1,0, offsetValue],[0,1,0]])
img = cv2.warpAffine(img, Q, (width, int(height/2)), borderMode=cv2.BORDER_WRAP)
imgL = cv2.warpAffine(imgL, Q, (width, int(height/2)), borderMode=cv2.BORDER_WRAP)
imgR = cv2.warpAffine(imgR, Q, (width, int(height/2)), borderMode=cv2.BORDER_WRAP)
else:
imgL = img[0:height, 0:int((width/2))]
imgR = img[0:height, int((width/2)):width]
Q = np.float32([[1,0,0],[0,1,0]])
img = cv2.warpAffine(img, Q, (int(width/2), height), borderMode=cv2.BORDER_WRAP)
imgL = cv2.warpAffine(imgL, Q, (int(width/2), height), borderMode=cv2.BORDER_WRAP)
imgR = cv2.warpAffine(imgR, Q, (int(width/2), height), borderMode=cv2.BORDER_WRAP)
minDisparities=16
window_size = w2val # wsize default 3; 5; 7 for SGBM reduced size image; 15 for SGBM full size image (1300px and above); 5 Works nicely
rez = w0val / 20.0
if (rez > 0):
resL = cv2.resize(imgL,None,fx=rez, fy=rez, interpolation = cv2.INTER_AREA)
resR = cv2.resize(imgR,None,fx=rez, fy=rez, interpolation = cv2.INTER_AREA)
left_matcher = cv2.StereoSGBM_create(
minDisparity=0,
numDisparities=minDisparities, # max_disp has to be dividable by 16 f. E. HH 192, 256
blockSize= w1val,
P1=8 * 3 * window_size ** 2, # wsize default 3; 5; 7 for SGBM reduced size image; 15 for SGBM full size image (1300px and above); 5 Works nicely
P2=32 * 3 * window_size ** 2,
disp12MaxDiff=1,
uniquenessRatio=15,
speckleWindowSize=0,
speckleRange=2,
preFilterCap= w3val,
mode=cv2.STEREO_SGBM_MODE_HH
)
right_matcher = cv2.ximgproc.createRightMatcher(left_matcher)
lmbda = w4val * 1000
sigma = 1.2
visual_multiplier = 1
wls_filter = cv2.ximgproc.createDisparityWLSFilter(matcher_left=left_matcher)
wls_filter.setLambda(lmbda)
wls_filter.setSigmaColor(sigma)
displ = left_matcher.compute(resL, resR)
dispr = right_matcher.compute(resR, resL)
imgLb = cv2.copyMakeBorder(imgL, top=0, bottom=0, left=np.uint16(minDisparities), right=0, borderType= cv2.BORDER_CONSTANT, value=[155,155,155] )
filteredImg = wls_filter.filter(displ, imgLb, None, dispr)
filteredImg = filteredImg * rez
filteredImg = filteredImg + (w5val-100)
filteredImg = (w6val/10.0)*(filteredImg - 128) + 128
filteredImg = np.clip(filteredImg, 0, 255)
filteredImg = np.uint8(filteredImg)
if (stereodepthsetting == '1' and savefile == 1):
imgLb2 = cv2.copyMakeBorder(imgL, top=0, bottom=0, left=0, right=np.uint16(minDisparities), borderType= cv2.BORDER_CONSTANT, value=[155,155,155] )
filteredImgRight = wls_filter.filter(displ, imgLb2, None, dispr)
filteredImgRight = filteredImgRight * rez
filteredImgRight = filteredImgRight + (w5val-100)
filteredImgRight = (w6val/10.0)*(filteredImgRight - 128) + 128
filteredImgRight = np.clip(filteredImg, 0, 255)
filteredImgRight = np.uint8(filteredImgRight)
filteredImg = np.concatenate((filteredImg, filteredImgRight), axis=0)
if (precisealignmenthack == '0'):
if (oneeightysetting=='0'):
filteredImg = cv2.resize(filteredImg,(width,int(height/2)), interpolation = cv2.INTER_CUBIC) # Disparity truncation hack
filteredImg = filteredImg[0:height, np.uint16(minDisparities/rez):width] #
filteredImg = cv2.resize(filteredImg,(width,int(height/2)), interpolation = cv2.INTER_CUBIC) # Disparity truncation hack
else:
filteredImg = cv2.resize(filteredImg,(int(width/2), height), interpolation = cv2.INTER_CUBIC)
filteredImg = filteredImg[0:height, np.uint16(minDisparities/rez):width]
filteredImg = cv2.resize(filteredImg,(int(width/2), height), interpolation = cv2.INTER_CUBIC)
else:
imgL2 = cv2.flip(imgL, 1)
imgR2 = cv2.flip(imgR, 1)
resL2 = cv2.flip(resL, 1)
resR2 = cv2.flip(resR, 1)
left_matcher2 = cv2.StereoSGBM_create( # Another disparity truncation hack
minDisparity=0,
numDisparities=minDisparities, # max_disp has to be dividable by 16 f. E. HH 192, 256
blockSize= w1val,
P1=8 * 3 * window_size ** 2, # wsize default 3; 5; 7 for SGBM reduced size image; 15 for SGBM full size image (1300px and above); 5 Works nicely
P2=32 * 3 * window_size ** 2,
disp12MaxDiff=1,
uniquenessRatio=15,
speckleWindowSize=0,
speckleRange=2,
preFilterCap= w3val,
mode=cv2.STEREO_SGBM_MODE_HH
)
right_matcher2 = cv2.ximgproc.createRightMatcher(left_matcher2)
wls_filter2 = cv2.ximgproc.createDisparityWLSFilter(matcher_left=left_matcher2)
wls_filter2.setLambda(lmbda)
wls_filter2.setSigmaColor(sigma)
displ2 = left_matcher2.compute(resR2, resL2)
dispr2 = right_matcher2.compute(resL2, resR2)
imgLb2 = cv2.copyMakeBorder(imgL2, top=0, bottom=0, left=np.uint16(minDisparities), right=0, borderType= cv2.BORDER_CONSTANT, value=[155,155,155] )
filteredImg2 = wls_filter.filter(displ2, imgLb2, None, dispr2)
filteredImg2 = filteredImg2 * rez
filteredImg2 = filteredImg2 + (w5val-100)
filteredImg2 = (w6val/10.0)*(filteredImg2 - 128) + 128
filteredImg2 = np.clip(filteredImg2, 0, 255)
filteredImg2 = np.uint8(filteredImg2)
filteredImg2 = cv2.flip(filteredImg2, 1)
if (stereodepthsetting == '1' and savefile == 1):
imgLb2a = cv2.copyMakeBorder(imgL, top=0, bottom=0, left=0, right=np.uint16(minDisparities), borderType= cv2.BORDER_CONSTANT, value=[155,155,155] )
filteredImgRight2 = wls_filter.filter(displ2, imgLb2a, None, dispr2)
filteredImgRight2 = filteredImgRight2 * rez
filteredImgRight2 = filteredImgRight2 + (w5val-100)
filteredImgRight2 = (w6val/10.0)*(filteredImgRight2 - 128) + 128
filteredImgRight2 = np.clip(filteredImgRight2, 0, 255)
filteredImgRight2 = np.uint8(filteredImgRight2)
filteredImgRight2 = cv2.flip(filteredImgRight2, 1)
filteredImg2 = np.concatenate((filteredImg2, filteredImgRight2), axis=0)
M = np.float32([[1,0,-16],[0,1,0]])
if (oneeightysetting=='0'):
if (stereodepthsetting == '0' or savefile == 2):
filteredImg = cv2.warpAffine(filteredImg, M, (width, int(height/2)), borderMode=cv2.BORDER_WRAP)
filteredImg2 = cv2.warpAffine(filteredImg2, M, (width, int(height/2)), borderMode=cv2.BORDER_WRAP)
else:
filteredImg = cv2.warpAffine(filteredImg, M, (width, int(height)), borderMode=cv2.BORDER_WRAP)
filteredImg2 = cv2.warpAffine(filteredImg2, M, (width, int(height)), borderMode=cv2.BORDER_WRAP)
filteredImg2 = filteredImg2[0:height, 0:int(width/10)]
filteredImg = filteredImg[0:height, int(width/10):width]
filteredImg = np.concatenate((filteredImg2, filteredImg), axis=1)
else:
filteredImg = cv2.warpAffine(filteredImg, M, (int(width/2), height), borderMode=cv2.BORDER_WRAP)
filteredImg2 = cv2.warpAffine(filteredImg2, M, (int(width/2), height), borderMode=cv2.BORDER_WRAP)
filteredImg2 = filteredImg2[0:height, 0:int(width/20)]
filteredImg = filteredImg[0:height, int(width/20):int(width/2)]
filteredImg = np.concatenate((filteredImg2, filteredImg), axis=1)
filteredImg = cv2.resize(filteredImg,(int(width/2), height), interpolation = cv2.INTER_CUBIC)
#filteredImg = cv2.resize(filteredImg,(width,int(height/2)), interpolation = cv2.INTER_CUBIC)
dispthread = Thread(target=threadDisplay, args=(filteredImg,imgL,imgR))
dispthread.start()
if (savefile == 1):
if (savefiletype == 'JPEG'):
cv2.imwrite(batchpathname + '/' + filename + '.jpg', filteredImg, [cv2.IMWRITE_JPEG_QUALITY, jpegquality])
elif (savefiletype == 'PNG'):
cv2.imwrite(batchpathname + '/' + filename + '.png', filteredImg)
elif (savefiletype == 'TIFF'):
cv2.imwrite(batchpathname + '/' + filename + '.tif', filteredImg)
elif (savefile == 2):
filteredImg = cv2.cvtColor(filteredImg, cv2.COLOR_GRAY2RGB)
dof = np.concatenate((imgL, filteredImg), axis=0)
if (oneeightysetting=='1'):
border = int(((height*2)-(width/2))/2)
dof = cv2.copyMakeBorder(dof, 0, 0, border, border, cv2.BORDER_CONSTANT, value=(0.0, 0.0, 0.0))
if (oneeightysetting=='1'):
cv2.copyMakeBorder(dof, 0 , 0, 200, 200, cv2.BORDER_CONSTANT, value=[0, 1, 1])
if (savefiletype == 'JPEG'):
cv2.imwrite(batchpathname + '/' + filename + '.jpg', dof, [cv2.IMWRITE_JPEG_QUALITY, jpegquality])
elif (savefiletype == 'PNG'):
cv2.imwrite(batchpathname + '/' + filename + '.png', dof)
elif (savefiletype == 'TIFF'):
cv2.imwrite(batchpathname + '/' + filename + '.tif', dof)
else:
print ('Resolution must be greater than 0.')
q.task_done()
def openfile():
global currentfile, currentdirectory, img, height, width, imgL, imgR, titleStr, pathname, filename, files, currentfiletype, seekwindow, seekslider, framecount, InFrame, OutFrame, setInText, setOutText, durationtText
del files[:]
currentdirectory = ''
currentfile = filedialog.askopenfilename()
pathname = os.path.dirname(currentfile)
exttype = os.path.splitext(os.path.basename(currentfile))[1]
if (currentfile != ''):
if (exttype == '.mp4' or exttype == '.mov' or exttype == '.webm'):
currentfiletype = 'video'
cap = cv2.VideoCapture(currentfile)
ret,img = cap.read()
framecount = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) - 1
height, width = img.shape[:2]
if (oneeightysetting=='0'):
imgL = img[0:int((height/2)), 0:width]
imgR = img[int((height/2)):height, 0:width]
cv2.resizeWindow('Depth Map', 800,400)
# cv2.moveWindow('Depth Map', 580,225);
cv2.namedWindow('Left Source', cv2.WINDOW_NORMAL)
cv2.resizeWindow('Left Source', 250,125)
cv2.moveWindow('Left Source', 580,65);
cv2.namedWindow('Right Source', cv2.WINDOW_NORMAL)
cv2.resizeWindow('Right Source', 250,125)
cv2.moveWindow('Right Source', 830,65);
else:
imgL = img[0:height, 0:int((width/2))]
imgR = img[0:height, int((width/2)):width]
cv2.resizeWindow('Depth Map', 400,400)
# cv2.moveWindow('Depth Map', 580,225);
cv2.namedWindow('Left Source', cv2.WINDOW_NORMAL)
cv2.resizeWindow('Left Source', 125,125)
cv2.moveWindow('Left Source', 580,65);
cv2.namedWindow('Right Source', cv2.WINDOW_NORMAL)
cv2.resizeWindow('Right Source', 125,125)
cv2.moveWindow('Right Source', 705,65);
cv2.namedWindow('Left Source', cv2.WINDOW_NORMAL)
cv2.namedWindow('Right Source', cv2.WINDOW_NORMAL)
titleStr = 'Stereo2Depth [Batching ' + str(framecount) + ' frames]'
Calculate()
try:
seekwindow.deiconify()
except:
seekwindow = Tk()
seekwindow.title('Seek')
seekwindow.geometry('520x80+720+660')
seekslider = Scale(seekwindow, from_=1, to=100, orient=HORIZONTAL, length=500)
seekslider.bind('<ButtonRelease-1>', updateValue)
seekslider.grid(row=0,column=0,padx=5)
InCanvas = Canvas(seekwindow)
InCanvas.grid(row=1, column=0, padx=6, pady=8, sticky=W)
setInButton = Button(InCanvas, text='Set In', width=5, command=lambda:setinout(True))
setInButton.grid(row=0,column=0, sticky=W)
setInButton.configure(background='white')
setInText = Label(InCanvas, text='In Frame: ')
setInText.grid(row=0,column=1,padx=10,sticky=W)
OutCanvas = Canvas(seekwindow)
OutCanvas.grid(row=1, column=0, padx=6, pady=8, sticky=E)
setOutText = Label(OutCanvas, text='Out Frame: ')
setOutText.grid(row=0,column=2, padx=10)
setOutButton = Button(OutCanvas, text='Set Out', width=5, command=lambda:setinout(False))
setOutButton.grid(row=0,column=3)
setOutButton.configure(background='white')
durationtText = Label(seekwindow, justify=CENTER, text=' ')
durationtText.grid(row=1,column=0, padx=10)
seekslider = Scale(seekwindow, from_=1, to=framecount, orient=HORIZONTAL, length=500)
seekslider.bind('<ButtonRelease-1>', updateValue)
seekslider.grid(row=0,column=0,padx=5)
seekslider.set(1)
InFrame = 1
OutFrame = framecount
setInText.config(text='In Frame: ' + str(InFrame))
setOutText.config(text='Out Frame: ' + str(OutFrame))
durationtText.config(text=str(OutFrame - InFrame + 1) + ' frames')
elif (exttype == '.jpg' or exttype == '.jpeg' or exttype == '.png'):
currentfiletype = 'image'
filename = os.path.splitext(os.path.basename(currentfile))[0]
img = cv2.imread(currentfile)
height, width = img.shape[:2]
if (oneeightysetting=='0'):
imgL = img[0:int((height/2)), 0:width]
imgR = img[int((height/2)):height, 0:width]
cv2.resizeWindow('Depth Map', 800,400)
# cv2.moveWindow('Depth Map', 580,225);
cv2.namedWindow('Left Source', cv2.WINDOW_NORMAL)
cv2.resizeWindow('Left Source', 250,125)
cv2.moveWindow('Left Source', 580,65);
cv2.namedWindow('Right Source', cv2.WINDOW_NORMAL)
cv2.resizeWindow('Right Source', 250,125)
cv2.moveWindow('Right Source', 830,65);
else:
imgL = img[0:height, 0:int((width/2))]
imgR = img[0:height, int((width/2)):width]
cv2.resizeWindow('Depth Map', 400,400)
# cv2.moveWindow('Depth Map', 580,225);
cv2.namedWindow('Left Source', cv2.WINDOW_NORMAL)
cv2.resizeWindow('Left Source', 125,125)
cv2.moveWindow('Left Source', 580,65);
cv2.namedWindow('Right Source', cv2.WINDOW_NORMAL)
cv2.resizeWindow('Right Source', 125,125)
cv2.moveWindow('Right Source', 705,65);
cv2.namedWindow('Left Source', cv2.WINDOW_NORMAL)
cv2.namedWindow('Right Source', cv2.WINDOW_NORMAL)
titleStr = 'Stereo2Depth'
seekwindow.withdraw()
Calculate()
else:
print ('Unrecognized file type')
def openfolder():
global currentfile, currentdirectory, img, height, width, imgL, imgR, titleStr, pathname, filename, files, currentfiletype, seekwindow, seekslider, InFrame, OutFrame, setInText, setOutText, durationtText
del files[:]
currentfiletype = 'image'
currentdirectory = filedialog.askdirectory()
pathname = currentdirectory
extensions = ('*.jpg', '*.jpeg', '*.png', '*.tif', '*.tiff')
for extension in extensions:
files.extend(glob.glob(currentdirectory + '/' + extension))
print ('Batching ' + str(len(files)) + ' frames')
if (len(files) > 0):
currentfile = files[0]
filename = os.path.splitext(os.path.basename(currentfile))[0]
img = cv2.imread(currentfile)
height, width = img.shape[:2]
if (oneeightysetting=='0'):
imgL = img[0:int((height/2)), 0:width]
imgR = img[int((height/2)):height, 0:width]
cv2.resizeWindow('Depth Map', 800,400)
# cv2.moveWindow('Depth Map', 580,225);
cv2.namedWindow('Left Source', cv2.WINDOW_NORMAL)
cv2.resizeWindow('Left Source', 250,125)
cv2.moveWindow('Left Source', 580,65);
cv2.namedWindow('Right Source', cv2.WINDOW_NORMAL)
cv2.resizeWindow('Right Source', 250,125)
cv2.moveWindow('Right Source', 830,65);
else:
imgL = img[0:height, 0:int((width/2))]
imgR = img[0:height, int((width/2)):width]
cv2.resizeWindow('Depth Map', 400,400)
# cv2.moveWindow('Depth Map', 580,225);
cv2.namedWindow('Left Source', cv2.WINDOW_NORMAL)
cv2.resizeWindow('Left Source', 125,125)
cv2.moveWindow('Left Source', 580,65);
cv2.namedWindow('Right Source', cv2.WINDOW_NORMAL)
cv2.resizeWindow('Right Source', 125,125)
cv2.moveWindow('Right Source', 705,65);
cv2.namedWindow('Left Source', cv2.WINDOW_NORMAL)
cv2.namedWindow('Right Source', cv2.WINDOW_NORMAL)
titleStr = 'Stereo2Depth [Batching ' + str(len(files)) + ' files]'
Calculate()
try:
seekwindow.deiconify()
except:
seekwindow = Tk()
seekwindow.title('Seek')
seekwindow.geometry('520x80+720+660')
seekslider = Scale(seekwindow, from_=1, to=100, orient=HORIZONTAL, length=500)
seekslider.bind('<ButtonRelease-1>', updateValue)
seekslider.grid(row=0,column=0,padx=5)
InCanvas = Canvas(seekwindow)
InCanvas.grid(row=1, column=0, padx=6, pady=8, sticky=W)
setInButton = Button(InCanvas, text='Set In', width=5, command=lambda:setinout(True))
setInButton.grid(row=0,column=0, sticky=W)
setInButton.configure(background='white')
setInText = Label(InCanvas, text='In Frame: ')
setInText.grid(row=0,column=1,padx=10,sticky=W)
OutCanvas = Canvas(seekwindow)
OutCanvas.grid(row=1, column=0, padx=6, pady=8, sticky=E)
setOutText = Label(OutCanvas, text='Out Frame: ')
setOutText.grid(row=0,column=2, padx=10)
setOutButton = Button(OutCanvas, text='Set Out', width=5, command=lambda:setinout(False))
setOutButton.grid(row=0,column=3)
setOutButton.configure(background='white')
durationtText = Label(seekwindow, justify=CENTER, text=' ')
durationtText.grid(row=1,column=0, padx=10)
seekslider = Scale(seekwindow, from_=1, to=len(files), orient=HORIZONTAL, length=500)
seekslider.bind('<ButtonRelease-1>', updateValue)
seekslider.grid(row=0,column=0,padx=5)
seekslider.set(1)
InFrame = 1
OutFrame = len(files)
setInText.config(text='In Frame: ' + str(InFrame))
setOutText.config(text='Out Frame: ' + str(OutFrame))
durationtText.config(text=str(OutFrame - InFrame + 1) + ' frames')
else:
titleStr = 'Stereo2Depth'
def updateValue(event):
global seekslider, currentfile, framecount, img, height, width, imgL, imgR, currentfiletype, files
if (currentfiletype == 'video'):
cap = cv2.VideoCapture(currentfile)
cap.set(cv2.CAP_PROP_POS_FRAMES,seekslider.get()-1);
ret, img = cap.read()
elif (currentfiletype == 'image'):
try:
img = cv2.imread(files[seekslider.get()-1])
except:
img = cv2.imread(currentfile)
else:
return
height, width = img.shape[:2]
if (oneeightysetting=='0'):
imgL = img[0:int((height/2)), 0:width]
imgR = img[int((height/2)):height, 0:width]
offsetValue = ((w7.get() - 50)/100)*width
Q = np.float32([[1,0, offsetValue],[0,1,0]])
img = cv2.warpAffine(img, Q, (width, int(height/2)), borderMode=cv2.BORDER_WRAP)
imgL = cv2.warpAffine(imgL, Q, (width, int(height/2)), borderMode=cv2.BORDER_WRAP)
imgR = cv2.warpAffine(imgR, Q, (width, int(height/2)), borderMode=cv2.BORDER_WRAP)
cv2.resizeWindow('Depth Map', 800,400)
# cv2.moveWindow('Depth Map', 580,225);
cv2.namedWindow('Left Source', cv2.WINDOW_NORMAL)
cv2.resizeWindow('Left Source', 250,125)
cv2.moveWindow('Left Source', 580,65);
cv2.namedWindow('Right Source', cv2.WINDOW_NORMAL)
cv2.resizeWindow('Right Source', 250,125)
cv2.moveWindow('Right Source', 830,65);
else:
imgL = img[0:height, 0:int((width/2))]
imgR = img[0:height, int((width/2)):width]
Q = np.float32([[1,0,0],[0,1,0]])
img = cv2.warpAffine(img, Q, (int(width/2), height), borderMode=cv2.BORDER_WRAP)
imgL = cv2.warpAffine(imgL, Q, (int(width/2), height), borderMode=cv2.BORDER_WRAP)
imgR = cv2.warpAffine(imgR, Q, (int(width/2), height), borderMode=cv2.BORDER_WRAP)
cv2.resizeWindow('Depth Map', 400,400)
# cv2.moveWindow('Depth Map', 580,225);
cv2.namedWindow('Left Source', cv2.WINDOW_NORMAL)
cv2.resizeWindow('Left Source', 125,125)
cv2.moveWindow('Left Source', 580,65);
cv2.namedWindow('Right Source', cv2.WINDOW_NORMAL)
cv2.resizeWindow('Right Source', 125,125)
cv2.moveWindow('Right Source', 705,65);
Calculate()
def threadDisplay(depthmap, imgl, imgr):
try:
cv2.imshow('Depth Map', depthmap)
cv2.imshow('Left Source', imgl)
cv2.imshow('Right Source', imgr)
except:
pass
def SaveFile(savefile, batch):
global currentfile, currentdirectory, img, height, width, imgL, imgR, pathname, filename, files, batchpathname, currentfiletype, abort, InFrame, OutFrame, RightEye
if (batch == 0 and savefile != 0 and currentfile != ''):
filename = os.path.splitext(os.path.basename(currentfile))[0]
# //come back here
thedepth = Calculate()
if (stereodepthsetting == '1' and savefile == 1):
RightEye = True
thedepthright = Calculate()
RightEye = False
thedepth = np.concatenate((thedepth, thedepthright), axis=0)
if (savefile == 1):
if (savefiletype == 'JPEG'):
cv2.imwrite(pathname + '/' + filename + '_depthmap.jpg', thedepth, [cv2.IMWRITE_JPEG_QUALITY, jpegquality])
print ('Saved: ' + pathname + '/' + filename + '_depthmap.jpg\a')
elif (savefiletype == 'PNG'):
cv2.imwrite(pathname + '/' + filename + '_depthmap.png', thedepth)
print ('Saved: ' + pathname + '/' + filename + '_depthmap.pngs\a')
elif (savefiletype == 'TIFF'):
cv2.imwrite(pathname + '/' + filename + '_depthmap.tif', thedepth)
print ('Saved: ' + pathname + '/' + filename + '_depthmap.tif\a')
elif (savefile == 2):
thedepth = cv2.cvtColor(thedepth, cv2.COLOR_GRAY2RGB)
dof = np.concatenate((imgL, thedepth), axis=0)
if (oneeightysetting=='1'):
border = int(((height*2)-(width/2))/2)
dof = cv2.copyMakeBorder(dof, 0, 0, border, border, cv2.BORDER_CONSTANT, value=(0.0, 0.0, 0.0))
if (savefiletype == 'JPEG'):
cv2.imwrite(pathname + '/' + filename + '_6DoF.jpg', dof, [cv2.IMWRITE_JPEG_QUALITY, jpegquality])
print ('Saved: ' + pathname + '/' + filename + '_6DoF.jpg\a')
elif (savefiletype == 'PNG'):
cv2.imwrite(pathname + '/' + filename + '_6DoF.png', dof)
print ('Saved: ' + pathname + '/' + filename + '_6DoF.png\a')
elif (savefiletype == 'TIFF'):
cv2.imwrite(pathname + '/' + filename + '_6DoF.tif', dof)
print ('Saved: ' + pathname + '/' + filename + '_6DoF.tif\a')
elif (batch == 0):
print ('No file loaded')
if (batch == 1 and len(files) >= 1):
try:
progresswindow.deiconify()
except:
progresswindow = Tk()
progresswindow.title('Progress')
if (os.name == 'nt'):
progresswindow.geometry('520x110+720+660')
else:
progresswindow.geometry('520x100+720+660')
progressText = Label(progresswindow,justify=CENTER, text='File (5/10) -- 50%')
progressText.grid(row=0,column=0,padx=5,pady=5)
progressBar = ttk.Progressbar(progresswindow, orient='horizontal', length=500, mode='determinate')
progressBar.grid(row=1,column=0,padx=10, pady=10)
cancelButton = Button(progresswindow, text='Cancel Batch Export', width=40, command=cancelsave)
cancelButton.grid(row=2,column=0,padx=10, pady=5)
filename = os.path.splitext(os.path.basename(files[0]))[0]
#filename = re.sub(r'\W+', '', filename)
filename = str(unicodedata.normalize('NFKD', filename).encode('ASCII', 'ignore'))
filename = filename.lstrip('b')
filename = filename.strip('\'')
if (savefile == 1):
batchpathname = pathname + '/' + filename + '_depth'
else:
batchpathname = pathname + '/' + filename + '_6DoF'
if not os.path.exists(batchpathname):
os.makedirs(batchpathname)
print ('Saving to: ' + batchpathname + '/')
starttime = time.time()
q = Queue(maxsize=0)
for i in range(numberofthreads):
worker = Thread(target=ThreadedCalculate, args=(q, w0.get(), w1.get(), w2.get(), w3.get(), w4.get(), w5.get(), w6.get(), w7.get(), savefile))
worker.setDaemon(True)
worker.start()
index = InFrame-1
abort=False
settings.title(titleStr + ' ( Working. . . )')
lastSave = time.time()
while (index < OutFrame):
currentfile = files[index]
filename = os.path.splitext(os.path.basename(currentfile))[0]
img = cv2.imread(currentfile)
filename = str(unicodedata.normalize('NFKD', filename).encode('ASCII', 'ignore'))
filename = filename.lstrip('b')
filename = filename.strip('\'')
q.put((img, filename))
# print ('%0.2f' % (100 * (index) / len(files)) + '%')
timeperframe = time.time()-lastSave
progressText.config(text = 'File (' + str(index-InFrame+1) + '/' + str((OutFrame-InFrame)+1) + ') -- ' + '%0.2f' % (100 * ((index-InFrame+1) / (OutFrame-InFrame+1))) + '% -- ' + '%0.2f' % (timeperframe * ((OutFrame-InFrame+1) - (index - InFrame)) / 60) + ' minutes left')
progressBar['value'] = 100 * ((index-InFrame+1) / (OutFrame-InFrame+1))
progresswindow.update()
k = cv2.waitKey(1)
if (k==27): # Esc key to stop
abort = True
break
if (abort):
break
index = index + 1
lastSave = time.time()
q.join()
settings.title(titleStr)
progresswindow.withdraw()
if not abort:
print ('Batch export complete in ' + '%0.2f' % (time.time() - starttime) + ' seconds.\a')
else:
print('Batch export aborted after ' + '%0.2f' % (time.time() - starttime) + ' seconds.')
elif (batch == 1 and currentfiletype == 'video'):
try:
progresswindow.deiconify()
except:
progresswindow = Tk()
progresswindow.title('Progress')
if (os.name == 'nt'):
progresswindow.geometry('520x110+720+660')
else:
progresswindow.geometry('520x100+720+660')
progressText = Label(progresswindow,justify=CENTER, text='File (5/10) -- 50%')
progressText.grid(row=0,column=0,padx=5,pady=5)
progressBar = ttk.Progressbar(progresswindow, orient='horizontal', length=500, mode='determinate')
progressBar.grid(row=1,column=0,padx=10, pady=10)
cancelButton = Button(progresswindow, text='Cancel Batch Export', width=40, command=cancelsave)
cancelButton.grid(row=2,column=0,padx=10, pady=5)
filename = os.path.splitext(os.path.basename(currentfile))[0]
# filename = re.sub(r'\W+', '', filename)
filename = str(unicodedata.normalize('NFKD', filename).encode('ASCII', 'ignore'))
filename = filename.lstrip('b')
filename = filename.strip('\'')
if (savefile == 1):
batchpathname = pathname + '/' + filename + '_depth'
else:
batchpathname = pathname + '/' + filename + '_6DoF'
if not os.path.exists(batchpathname):
os.makedirs(batchpathname)
print ('Saving to: ' + batchpathname + '/')
starttime = time.time()
q = Queue(maxsize=0)
for i in range(numberofthreads):
worker = Thread(target=ThreadedCalculate, args=(q, w0.get(), w1.get(), w2.get(), w3.get(), w4.get(), w5.get(), w6.get(), w7.get(), savefile))
worker.daemon = True
worker.start()
index = InFrame-1
cap = cv2.VideoCapture(currentfile)
cap.set(cv2.CAP_PROP_POS_FRAMES,InFrame-1);
filenamebase = os.path.splitext(os.path.basename(currentfile))[0]
numberofdigits = len(str(int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) - 1))
abort=False
lastSave = time.time()
while (index < OutFrame):
settings.title(titleStr + ' ( Working. . . )')
ret, img = cap.read()
filename = filenamebase + '_' + str(index+1).zfill(numberofdigits)
filename = str(unicodedata.normalize('NFKD', filename).encode('ASCII', 'ignore'))
filename = filename.lstrip('b')
filename = filename.strip('\'')
q.put((img, filename))
# print ('%0.2f' % (100 * (index) / framecount) + '%')
timeperframe = time.time()-lastSave
progressText.config(text = 'File (' + str(index-InFrame+1) + '/' + str((OutFrame-InFrame)+1) + ') -- ' + '%0.2f' % (100 * ((index-InFrame+1) / (OutFrame-InFrame+1))) + '% -- ' + '%0.2f' % (timeperframe * ((OutFrame-InFrame+1) - (index - InFrame)) / 60) + ' minutes left')
progressBar['value'] = 100 * ((index-InFrame+1) / (OutFrame-InFrame+1))
progresswindow.update()
k = cv2.waitKey(1)
if (k==27): # Esc key to stop
abort = True
break
if (abort):
break
index = index + 1
lastSave = time.time()
q.join()
settings.title(titleStr)
progresswindow.withdraw()
if not abort:
print ('Batch export complete in ' + '%0.2f' % (time.time() - starttime) + ' seconds.\a')
else:
print('Batch export aborted after ' + '%0.2f' % (time.time() - starttime) + ' seconds.')
elif (batch == 1):
print ('No batch loaded')
def autoupdate(value):
global autoupdatebool
if (autoupdatebool.get() == 1):
#Calculate()
updateValue(0)
def cancelsave():
global abort
abort = True
def defaultSettings():
w0.set(4)
w1.set(5)
w2.set(5)
w3.set(60)
w4.set(80)
w5.set(100)
w6.set(10)
w7.set(50)
settings.update()
Calculate()
def advancedsettings(event):
global threadsslider, savefiletypestring, jpegqualityslider, precisealignmentbool, oneeightybool, stereodepthbool, numberofthreads, savefiletype, jpegquality, precisealignmenthack, oneeightysetting, stereodepthsetting
try:
numberofthreads = threadsslider.get()
savefiletype = savefiletypestring.get()
jpegquality = jpegqualityslider.get()
precisealignmenthack = precisealignmentbool.get()
oneeightysetting = oneeightybool.get()
stereodepthsetting = stereodepthbool.get()
except:
pass
def showadvancedsettings():
global advancedsettingswindow, threadsslider, savefiletypestring, precisealignmentbool, oneeightybool, stereodepthbool, jpegqualityslider, numberofthreads, savefiletype, jpegquality, precisealignmenthack, oneeightysetting, stereodepthsetting
try:
advancedsettingswindow.destroy()
except:
pass
advancedsettingswindow = Tk()
advancedsettingswindow.title('Advanced Settings')
advancedsettingswindow.geometry('450x340+85+350')
advancedsettingsCanvas = Canvas(advancedsettingswindow)
advancedsettingsCanvas.grid(row=0, column=0, padx=40, pady=15)
Label(advancedsettingsCanvas, text='Number of Threads').grid(row=0,column=0,padx=5,sticky=E)
threadsslider = Scale(advancedsettingsCanvas, from_=1, to=100, orient=HORIZONTAL, length=200, command=advancedsettings)
threadsslider.grid(row=0,column=1,padx=5)
threadsslider.set(numberofthreads)
Label(advancedsettingsCanvas, text='Save File Type').grid(row=1,column=0,padx=5,sticky=E)
savefiletypestring = StringVar(advancedsettingsCanvas)
savefiletypestring.set(savefiletype) # default value
savefiletypedropdown = OptionMenu(advancedsettingsCanvas, savefiletypestring, 'JPEG', 'PNG', 'TIFF', command=advancedsettings)
savefiletypedropdown.config(width=15)
savefiletypedropdown.grid(row=1,column=1,pady=15)
Label(advancedsettingsCanvas, text='Jpeg Quality').grid(row=2,column=0,padx=5,sticky=E)
jpegqualityslider = Scale(advancedsettingsCanvas, from_=1, to=100, orient=HORIZONTAL, length=200, command=advancedsettings)
jpegqualityslider.grid(row=2,column=1,padx=5)
jpegqualityslider.set(jpegquality)
Label(advancedsettingsCanvas, text='VR180 Input').grid(row=3,column=0,padx=5,sticky=E)
oneeightybool = StringVar(advancedsettingsCanvas)
oneeightybool.set(oneeightysetting)
oneeightycheck = Checkbutton(advancedsettingsCanvas, variable=oneeightybool, command=lambda:advancedsettings(0))
oneeightycheck.grid(row=3,column=1,pady=15,columnspan=2)
Label(advancedsettingsCanvas, text='Export Stereo Depthmaps').grid(row=4,column=0,padx=5,sticky=E)
stereodepthbool = StringVar(advancedsettingsCanvas)
stereodepthbool.set(stereodepthsetting)
stereodepthcheck = Checkbutton(advancedsettingsCanvas, variable=stereodepthbool, command=lambda:advancedsettings(0))
stereodepthcheck.grid(row=4,column=1,pady=15,columnspan=2)
Label(advancedsettingsCanvas, text='Precise Alignment Hack\n(doubles processing time)').grid(row=5,column=0,padx=5,sticky=E)
precisealignmentbool = StringVar(advancedsettingsCanvas)
precisealignmentbool.set(precisealignmenthack)
precisealignmentcheck = Checkbutton(advancedsettingsCanvas, variable=precisealignmentbool, command=lambda:advancedsettings(0))
#precisealignmentcheck.config(width=15)
precisealignmentcheck.grid(row=5,column=1,pady=15,columnspan=2)
def seekthread(seekto, cap):
x = 0
while (x + 1 < seekto):
cap.read()
x = x + 1
print ('Seeking -- ' + '%0.2f' % (100 * x / seekto) + '%')
def setinout(setin):
global InFrame, OutFrame, setInText, setOutText, titleStr
framenumber = seekslider.get()
if (setin == True):
setInText.config(text='In Frame: ' + str(framenumber))
InFrame = framenumber
else:
setOutText.config(text='Out Frame: ' + str(framenumber))
OutFrame = framenumber
if (InFrame <= OutFrame):
titleStr = 'Stereo2Depth [Batching ' + str(OutFrame - InFrame + 1) + ' frames]'
durationtText.config(text=str(OutFrame - InFrame + 1) + ' frames')
else:
titleStr = 'Stereo2Depth [In/Out Frame Error]'
durationtText.config(text='ERROR')
print ('ERROR: In Frame must be before Out Frame')
settings.title(titleStr)
def VersionCheck():
try:
with urllib.request.urlopen('http://pseudoscience.pictures/stereo2depth/latestversion.txt') as response:
html = str(response.read())
html = html.lstrip('b')
html = html.strip('\'')
theversion = sys.argv[0]
# theversion = theversion[-6:]
theversion = theversion[-8:]
theversion = theversion.rstrip('.py')
if (html != theversion):
print ('New version available! Check the sidebar at reddit.com/r/6DoF to download Stereo2Depth version ' + html)
messagebox.showwarning(
'Update Available',
'New version available! Check the sidebar at reddit.com/r/6DoF to download Stereo2Depth version ' + html
)
return
except:
pass
def saveSettings():
global pathname, filename, numberofthreads, savefiletype, jpegquality, oneeightysetting, stereodepthsetting, precisealignmenthack
settings.update()
advancedsettings(0)
settingsstring = str(w0.get()) + ',' + str(w1.get()) + ',' + str(w2.get()) + ',' + str(w3.get()) + ',' + str(w4.get()) + ',' + str(w5.get()) + ',' + str(w6.get()) + ',' + str(w7.get()) + ',' + str(numberofthreads) + "," + str(savefiletype) + "," + str(jpegquality) + "," + str(oneeightysetting) + "," + str(stereodepthsetting) + "," + str(precisealignmenthack)
try:
filename = os.path.splitext(os.path.basename(currentfile))[0]
settingssavepath = pathname + '/' + filename + '.s2d'
except:
settingsfile = filedialog.asksaveasfilename()
settingssavepath = settingsfile + '.s2d'
f = open(settingssavepath,'w')
f.write(settingsstring)
f.close()
def loadSettings():
global numberofthreads, savefiletype, jpegquality, oneeightysetting, stereodepthsetting, precisealignmenthack
settingsfile = filedialog.askopenfilename()
with open(settingsfile, "r") as filestream:
for line in filestream:
currentline = line.split(",")
#total = str(int(currentline[0]) + int(currentline[1]) + int(currentline [2])) + "\n"
w0.set(int(currentline[0]))
w1.set(int(currentline[1]))
w2.set(int(currentline[2]))
w3.set(int(currentline[3]))
w4.set(int(currentline[4]))
w5.set(int(currentline[5]))
w6.set(int(currentline[6]))
w7.set(int(currentline[7]))
numberofthreads = int(currentline[8])
savefiletype = currentline[9]
jpegquality = int(currentline[10])
oneeightysetting = int(currentline[11])
stereodepthsetting = int(currentline[12])
precisealignmenthack = int(currentline[13])
# settings.update()
showadvancedsettings()
advancedsettings(0)
updateValue(0)
advancedsettingswindow.withdraw()
currentfile = ''
currentfiletype = ''
titleStr = 'Stereo2Depth'
files = []
settings = Tk()
cv2.namedWindow('Depth Map', cv2.WINDOW_NORMAL)
cv2.namedWindow('Depth Map', cv2.WINDOW_AUTOSIZE)
cv2.resizeWindow('Depth Map', 800,400)
cv2.moveWindow('Depth Map', 580,225);
cv2.namedWindow('Left Source', cv2.WINDOW_NORMAL)
cv2.resizeWindow('Left Source', 250,125)
cv2.moveWindow('Left Source', 580,65);
cv2.namedWindow('Right Source', cv2.WINDOW_NORMAL)
cv2.resizeWindow('Right Source', 250,125)
cv2.moveWindow('Right Source', 830,65);
autoupdatebool = IntVar()
settings.title(titleStr)
if (os.name == 'nt'):
settings.geometry('520x670+50+65')
else:
settings.geometry('520x600+50+65')
settings.columnconfigure(0, weight=1)
settings.columnconfigure(1, weight=1)
seekwindow = Tk()
seekwindow.title('Seek')
seekwindow.geometry('520x80+720+660')
seekslider = Scale(seekwindow, from_=1, to=100, orient=HORIZONTAL, length=500)
seekslider.bind('<ButtonRelease-1>', updateValue)
seekslider.grid(row=0,column=0,padx=5)
InCanvas = Canvas(seekwindow)
InCanvas.grid(row=1, column=0, padx=6, pady=8, sticky=W)
setInButton = Button(InCanvas, text='Set In', width=5, command=lambda:setinout(True))
setInButton.grid(row=0,column=0, sticky=W)
setInButton.configure(background='white')
setInText = Label(InCanvas, text='In Frame: ')
setInText.grid(row=0,column=1,padx=10,sticky=W)
OutCanvas = Canvas(seekwindow)
OutCanvas.grid(row=1, column=0, padx=6, pady=8, sticky=E)
setOutText = Label(OutCanvas, text='Out Frame: ')
setOutText.grid(row=0,column=0, padx=10)
setOutButton = Button(OutCanvas, text='Set Out', width=5, command=lambda:setinout(False))
setOutButton.grid(row=0,column=1)
setOutButton.configure(background='white')
durationtText = Label(seekwindow, justify=CENTER, text=' ')
durationtText.grid(row=1,column=0, padx=10)
seekwindow.withdraw()
advancedsettingswindow = Tk()
advancedsettingswindow.title('Advanced Settings')
advancedsettingswindow.geometry('450x340+85+350')
advancedsettingsCanvas = Canvas(advancedsettingswindow)
advancedsettingsCanvas.grid(row=0, column=0, padx=40, pady=15)
Label(advancedsettingsCanvas, text='Number of Threads').grid(row=0,column=0,padx=5,sticky=E)
threadsslider = Scale(advancedsettingsCanvas, from_=1, to=100, orient=HORIZONTAL, length=200)
threadsslider.grid(row=0,column=1,padx=5)
threadsslider.set(20)
Label(advancedsettingsCanvas, text='Save File Type').grid(row=1,column=0,padx=5,sticky=E)
savefiletypestring = StringVar(advancedsettingsCanvas)
savefiletypestring.set('JPEG') # default value
savefiletypedropdown = OptionMenu(advancedsettingsCanvas, savefiletypestring, 'JPEG', 'PNG', 'TIFF')
savefiletypedropdown.config(width=15)
savefiletypedropdown.grid(row=1,column=1,pady=15)
Label(advancedsettingsCanvas, text='Jpeg Quality').grid(row=2,column=0,padx=5,sticky=E)
jpegqualityslider = Scale(advancedsettingsCanvas, from_=1, to=100, orient=HORIZONTAL, length=200)
jpegqualityslider.grid(row=2,column=1,padx=5)
jpegqualityslider.set(100)
Label(advancedsettingsCanvas, text='VR180 Input').grid(row=3,column=0,padx=5,sticky=E)
oneeightybool = StringVar(advancedsettingsCanvas)
oneeightycheck = Checkbutton(advancedsettingsCanvas, variable=oneeightybool, command=lambda:advancedsettings(0))
oneeightycheck.grid(row=3,column=1,pady=15,columnspan=2)
Label(advancedsettingsCanvas, text='Export Stereo Depthmaps').grid(row=4,column=0,padx=5,sticky=E)
stereodepthbool = StringVar(advancedsettingsCanvas)
stereodepthcheck = Checkbutton(advancedsettingsCanvas, variable=stereodepthbool, command=lambda:advancedsettings(0))
stereodepthcheck.grid(row=4,column=1,pady=15,columnspan=2)
Label(advancedsettingsCanvas, text='Precise Alignment Hack\n(doubles processing time)').grid(row=5,column=0,padx=5,sticky=E)
precisealignmentbool = StringVar(advancedsettingsCanvas)
precisealignmentcheck = Checkbutton(advancedsettingsCanvas, variable=precisealignmentbool)
#precisealignmentcheck.config(width=15)
precisealignmentcheck.grid(row=5,column=1,pady=15)
advancedsettings(0)
advancedsettingswindow.withdraw()
numberofthreads = 20
savefiletype='JPEG'
jpegquality=100
precisealignmenthack = '0'
oneeightysetting = '0'
stereodepthsetting = '0'
RightEye = False
progresswindow = Tk()
progresswindow.title('Progress')
if (os.name == 'nt'):
progresswindow.geometry('520x110+720+660')
else:
progresswindow.geometry('520x100+720+660')
progressText = Label(progresswindow,justify=CENTER, text='File (5/10) -- 50%')
progressText.grid(row=0,column=0,padx=5,pady=5)
progressBar = ttk.Progressbar(progresswindow, orient='horizontal', length=500, mode='determinate')
progressBar.grid(row=1,column=0,padx=10, pady=10)
cancelButton = Button(progresswindow, text='Cancel Batch Export', width=40, command=cancelsave)
cancelButton.grid(row=2,column=0,padx=10, pady=5)
progresswindow.withdraw()
sliderCanvas = Canvas(settings)
sliderCanvas.grid(row=0, column=0, padx=5, pady=15, columnspan=2)
Label(sliderCanvas,justify=LEFT, text='Resolution').grid(row=0,column=0,padx=5,sticky=E)
w0 = Scale(sliderCanvas, from_=1, to=20, orient=HORIZONTAL, length=350, showvalue=0, command=autoupdate)
w0.grid(row=0,column=1,padx=5,pady=7,sticky=W)
w0.set(4)
Label(sliderCanvas,justify=LEFT, text='Block Size').grid(row=1,column=0,padx=5,sticky=E)
w1 = Scale(sliderCanvas, from_=0, to=25, orient=HORIZONTAL, length=350, showvalue=0, command=autoupdate)
w1.grid(row=1,column=1,padx=5,pady=7,sticky=W)
w1.set(5)
Label(sliderCanvas,justify=LEFT, text='Window Size').grid(row=2,column=0,padx=5,sticky=E)
w2 = Scale(sliderCanvas, from_=0, to=15, orient=HORIZONTAL, length=350, showvalue=0, command=autoupdate)
w2.grid(row=2,column=1,padx=5,pady=7,sticky=W)
w2.set(5)
Label(sliderCanvas,justify=LEFT, text='Filter Cap').grid(row=3,column=0,padx=5,sticky=E)
w3 = Scale(sliderCanvas, from_=0, to=100, orient=HORIZONTAL, length=350, showvalue=0, command=autoupdate)
w3.grid(row=3,column=1,padx=5,pady=7,sticky=W)
w3.set(60)
Label(sliderCanvas,justify=LEFT, text='Lmbda').grid(row=4,column=0,padx=5,sticky=E)
w4 = Scale(sliderCanvas, from_=0, to=100, orient=HORIZONTAL, length=350, showvalue=0, command=autoupdate)
w4.grid(row=4,column=1,padx=5,pady=7,sticky=W)
w4.set(80)
Label(sliderCanvas,justify=LEFT, text='Brightness').grid(row=5,column=0,padx=5,sticky=E)
w5 = Scale(sliderCanvas, from_=0, to=200, orient=HORIZONTAL, length=350, showvalue=0, command=autoupdate)
w5.grid(row=5,column=1,padx=5,pady=7,sticky=W)
w5.set(100)
Label(sliderCanvas,justify=LEFT, text='Contrast').grid(row=6,column=0,padx=5,sticky=E)
w6 = Scale(sliderCanvas, from_=0, to=30, orient=HORIZONTAL, length=350, showvalue=0, command=autoupdate)
w6.grid(row=6,column=1,padx=5,pady=7,sticky=W)
w6.set(10)
Label(sliderCanvas,justify=LEFT, text='Horizontal Offset').grid(row=7,column=0,padx=5,sticky=E)
w7 = Scale(sliderCanvas, from_=0, to=100, orient=HORIZONTAL, length=350, showvalue=0, command=autoupdate)
w7.grid(row=7,column=1,padx=5,pady=7,sticky=W)
w7.set(50)
settings.update()
buttonCanvas = Canvas(settings)
buttonCanvas.grid(row=1, column=0, padx=10, columnspan=2)
updateCanvas = Canvas(buttonCanvas)
updateCanvas.grid(row=0, column=0, padx=0, pady=7, columnspan=2)
updateButton = Button(updateCanvas, text='Update', width=40, command=lambda:updateValue(0))
updateButton.grid(row=0,column=0, columnspan=2)
updateButton.configure(background='white')
checkbox = Checkbutton(updateCanvas, text='Auto-Update', variable=autoupdatebool)
checkbox.grid(row=1,column=0,columnspan=2)
openButton = Button(buttonCanvas, text='Open File', width=25, command=openfile)
openButton.grid(row=1,column=0,columnspan=2,pady=10,padx=20,sticky=W)
openButton.configure(background='white')
openbatchButton = Button(buttonCanvas, text='Open Directory', width=25, command=openfolder)
openbatchButton.grid(row=1,column=1,columnspan=2,pady=10,padx=20,sticky=E)
openbatchButton.configure(background='white')
saveButton = Button(buttonCanvas, text='Export Single Depth Map', width=25, command=lambda:SaveFile(1, 0))
saveButton.grid(row=2,column=0,pady=10,padx=20,sticky=E)
saveButton.configure(background='white')
savebatchButton = Button(buttonCanvas, text='Batch Export Depth Maps', width=25, command=lambda:SaveFile(1, 1))
savebatchButton.grid(row=2,column=1,pady=10,padx=20,sticky=E)
savebatchButton.configure(background='white')
saveButton = Button(buttonCanvas, text='Export Single 6DoF', width=25, command=lambda:SaveFile(2, 0))
saveButton.grid(row=3,column=0,pady=10,padx=20,sticky=W)
saveButton.configure(background='white')
savebatchButton = Button(buttonCanvas, text='Batch Export 6DoF', width=25, command=lambda:SaveFile(2, 1))
savebatchButton.grid(row=3,column=1,pady=10,padx=20,sticky=E)
savebatchButton.configure(background='white')
advancedsettingsButton = Button(buttonCanvas, text='Advanced Settings', width=40, command=showadvancedsettings)
advancedsettingsButton.grid(row=4,column=0,columnspan=2,pady=10,padx=20)
advancedsettingsButton.configure(background='white')
defaultsButton = Button(buttonCanvas, text='Reset Defaults', width=40, command=defaultSettings)
defaultsButton.grid(row=5,column=0,columnspan=2,pady=10,padx=20)
defaultsButton.configure(background='white')
loadsettingsButton = Button(buttonCanvas, text='Load Settings', width=16, command=loadSettings)
loadsettingsButton.grid(row=6,column=0,columnspan=1,pady=10,padx=20,sticky=E)
loadsettingsButton.configure(background='white')
savesettingsButton = Button(buttonCanvas, text='Save Settings', width=16, command=saveSettings)
savesettingsButton.grid(row=6,column=1,columnspan=1,pady=10,padx=20,sticky=W)
savesettingsButton.configure(background='white')
# showadvancedsettings(0)
# advancedsettingswindow.withdraw()
#VersionCheck()
mainloop()
|
tests.py
|
# -*- coding: utf-8 -*-
# Unit and doctests for specific database backends.
from __future__ import absolute_import, unicode_literals
import datetime
from decimal import Decimal
import threading
import unittest
from django.conf import settings
from django.core.management.color import no_style
from django.db import (connection, connections, DEFAULT_DB_ALIAS,
DatabaseError, IntegrityError, transaction)
from django.db.backends.signals import connection_created
from django.db.backends.sqlite3.base import DatabaseOperations
from django.db.backends.postgresql_psycopg2 import version as pg_version
from django.db.backends.util import format_number
from django.db.models import Sum, Avg, Variance, StdDev
from django.db.models.fields import (AutoField, DateField, DateTimeField,
DecimalField, IntegerField, TimeField)
from django.db.utils import ConnectionHandler
from django.test import (TestCase, skipUnlessDBFeature, skipIfDBFeature,
TransactionTestCase)
from django.test.utils import override_settings, str_prefix
from django.utils import six
from django.utils.six.moves import xrange
from . import models
class DummyBackendTest(TestCase):
def test_no_databases(self):
"""
Test that empty DATABASES setting default to the dummy backend.
"""
DATABASES = {}
conns = ConnectionHandler(DATABASES)
self.assertEqual(conns[DEFAULT_DB_ALIAS].settings_dict['ENGINE'],
'django.db.backends.dummy')
class OracleChecks(unittest.TestCase):
@unittest.skipUnless(connection.vendor == 'oracle',
"No need to check Oracle quote_name semantics")
def test_quote_name(self):
# Check that '%' chars are escaped for query execution.
name = '"SOME%NAME"'
quoted_name = connection.ops.quote_name(name)
self.assertEqual(quoted_name % (), name)
@unittest.skipUnless(connection.vendor == 'oracle',
"No need to check Oracle cursor semantics")
def test_dbms_session(self):
# If the backend is Oracle, test that we can call a standard
# stored procedure through our cursor wrapper.
from django.db.backends.oracle.base import convert_unicode
cursor = connection.cursor()
cursor.callproc(convert_unicode('DBMS_SESSION.SET_IDENTIFIER'),
[convert_unicode('_django_testing!')])
@unittest.skipUnless(connection.vendor == 'oracle',
"No need to check Oracle cursor semantics")
def test_cursor_var(self):
# If the backend is Oracle, test that we can pass cursor variables
# as query parameters.
from django.db.backends.oracle.base import Database
cursor = connection.cursor()
var = cursor.var(Database.STRING)
cursor.execute("BEGIN %s := 'X'; END; ", [var])
self.assertEqual(var.getvalue(), 'X')
@unittest.skipUnless(connection.vendor == 'oracle',
"No need to check Oracle cursor semantics")
def test_long_string(self):
# If the backend is Oracle, test that we can save a text longer
# than 4000 chars and read it properly
c = connection.cursor()
c.execute('CREATE TABLE ltext ("TEXT" NCLOB)')
long_str = ''.join([six.text_type(x) for x in xrange(4000)])
c.execute('INSERT INTO ltext VALUES (%s)', [long_str])
c.execute('SELECT text FROM ltext')
row = c.fetchone()
self.assertEqual(long_str, row[0].read())
c.execute('DROP TABLE ltext')
@unittest.skipUnless(connection.vendor == 'oracle',
"No need to check Oracle connection semantics")
def test_client_encoding(self):
# If the backend is Oracle, test that the client encoding is set
# correctly. This was broken under Cygwin prior to r14781.
connection.cursor() # Ensure the connection is initialized.
self.assertEqual(connection.connection.encoding, "UTF-8")
self.assertEqual(connection.connection.nencoding, "UTF-8")
@unittest.skipUnless(connection.vendor == 'oracle',
"No need to check Oracle connection semantics")
def test_order_of_nls_parameters(self):
# an 'almost right' datetime should work with configured
# NLS parameters as per #18465.
c = connection.cursor()
query = "select 1 from dual where '1936-12-29 00:00' < sysdate"
# Test that the query succeeds without errors - pre #18465 this
# wasn't the case.
c.execute(query)
self.assertEqual(c.fetchone()[0], 1)
class MySQLTests(TestCase):
@unittest.skipUnless(connection.vendor == 'mysql',
"Test valid only for MySQL")
def test_autoincrement(self):
"""
Check that auto_increment fields are reset correctly by sql_flush().
Before MySQL version 5.0.13 TRUNCATE did not do auto_increment reset.
Refs #16961.
"""
statements = connection.ops.sql_flush(no_style(),
tables=['test'],
sequences=[{
'table': 'test',
'col': 'somecol',
}])
found_reset = False
for sql in statements:
found_reset = found_reset or 'ALTER TABLE' in sql
if connection.mysql_version < (5, 0, 13):
self.assertTrue(found_reset)
else:
self.assertFalse(found_reset)
class DateQuotingTest(TestCase):
def test_django_date_trunc(self):
"""
Test the custom ``django_date_trunc method``, in particular against
fields which clash with strings passed to it (e.g. 'year') - see
#12818__.
__: http://code.djangoproject.com/ticket/12818
"""
updated = datetime.datetime(2010, 2, 20)
models.SchoolClass.objects.create(year=2009, last_updated=updated)
years = models.SchoolClass.objects.dates('last_updated', 'year')
self.assertEqual(list(years), [datetime.date(2010, 1, 1)])
def test_django_date_extract(self):
"""
Test the custom ``django_date_extract method``, in particular against fields
which clash with strings passed to it (e.g. 'day') - see #12818__.
__: http://code.djangoproject.com/ticket/12818
"""
updated = datetime.datetime(2010, 2, 20)
models.SchoolClass.objects.create(year=2009, last_updated=updated)
classes = models.SchoolClass.objects.filter(last_updated__day=20)
self.assertEqual(len(classes), 1)
@override_settings(DEBUG=True)
class LastExecutedQueryTest(TestCase):
def test_last_executed_query(self):
"""
last_executed_query should not raise an exception even if no previous
query has been run.
"""
cursor = connection.cursor()
try:
connection.ops.last_executed_query(cursor, '', ())
except Exception:
self.fail("'last_executed_query' should not raise an exception.")
def test_debug_sql(self):
list(models.Reporter.objects.filter(first_name="test"))
sql = connection.queries[-1]['sql'].lower()
self.assertIn("select", sql)
self.assertIn(models.Reporter._meta.db_table, sql)
def test_query_encoding(self):
"""
Test that last_executed_query() returns an Unicode string
"""
persons = models.Reporter.objects.filter(raw_data=b'\x00\x46 \xFE').extra(select={'föö': 1})
sql, params = persons.query.sql_with_params()
cursor = persons.query.get_compiler('default').execute_sql(None)
last_sql = cursor.db.ops.last_executed_query(cursor, sql, params)
self.assertIsInstance(last_sql, six.text_type)
@unittest.skipUnless(connection.vendor == 'sqlite',
"This test is specific to SQLite.")
def test_no_interpolation_on_sqlite(self):
# Regression for #17158
# This shouldn't raise an exception
query = "SELECT strftime('%Y', 'now');"
connection.cursor().execute(query)
self.assertEqual(connection.queries[-1]['sql'],
str_prefix("QUERY = %(_)s\"SELECT strftime('%%Y', 'now');\" - PARAMS = ()"))
class ParameterHandlingTest(TestCase):
def test_bad_parameter_count(self):
"An executemany call with too many/not enough parameters will raise an exception (Refs #12612)"
cursor = connection.cursor()
query = ('INSERT INTO %s (%s, %s) VALUES (%%s, %%s)' % (
connection.introspection.table_name_converter('backends_square'),
connection.ops.quote_name('root'),
connection.ops.quote_name('square')
))
self.assertRaises(Exception, cursor.executemany, query, [(1, 2, 3)])
self.assertRaises(Exception, cursor.executemany, query, [(1,)])
# Unfortunately, the following tests would be a good test to run on all
# backends, but it breaks MySQL hard. Until #13711 is fixed, it can't be run
# everywhere (although it would be an effective test of #13711).
class LongNameTest(TestCase):
"""Long primary keys and model names can result in a sequence name
that exceeds the database limits, which will result in truncation
on certain databases (e.g., Postgres). The backend needs to use
the correct sequence name in last_insert_id and other places, so
check it is. Refs #8901.
"""
@skipUnlessDBFeature('supports_long_model_names')
def test_sequence_name_length_limits_create(self):
"""Test creation of model with long name and long pk name doesn't error. Ref #8901"""
models.VeryLongModelNameZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ.objects.create()
@skipUnlessDBFeature('supports_long_model_names')
def test_sequence_name_length_limits_m2m(self):
"""Test an m2m save of a model with a long name and a long m2m field name doesn't error as on Django >=1.2 this now uses object saves. Ref #8901"""
obj = models.VeryLongModelNameZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ.objects.create()
rel_obj = models.Person.objects.create(first_name='Django', last_name='Reinhardt')
obj.m2m_also_quite_long_zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz.add(rel_obj)
@skipUnlessDBFeature('supports_long_model_names')
def test_sequence_name_length_limits_flush(self):
"""Test that sequence resetting as part of a flush with model with long name and long pk name doesn't error. Ref #8901"""
# A full flush is expensive to the full test, so we dig into the
# internals to generate the likely offending SQL and run it manually
# Some convenience aliases
VLM = models.VeryLongModelNameZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ
VLM_m2m = VLM.m2m_also_quite_long_zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz.through
tables = [
VLM._meta.db_table,
VLM_m2m._meta.db_table,
]
sequences = [
{
'column': VLM._meta.pk.column,
'table': VLM._meta.db_table
},
]
cursor = connection.cursor()
for statement in connection.ops.sql_flush(no_style(), tables, sequences):
cursor.execute(statement)
class SequenceResetTest(TestCase):
def test_generic_relation(self):
"Sequence names are correct when resetting generic relations (Ref #13941)"
# Create an object with a manually specified PK
models.Post.objects.create(id=10, name='1st post', text='hello world')
# Reset the sequences for the database
cursor = connection.cursor()
commands = connections[DEFAULT_DB_ALIAS].ops.sequence_reset_sql(no_style(), [models.Post])
for sql in commands:
cursor.execute(sql)
# If we create a new object now, it should have a PK greater
# than the PK we specified manually.
obj = models.Post.objects.create(name='New post', text='goodbye world')
self.assertTrue(obj.pk > 10)
class PostgresVersionTest(TestCase):
def assert_parses(self, version_string, version):
self.assertEqual(pg_version._parse_version(version_string), version)
def test_parsing(self):
"""Test PostgreSQL version parsing from `SELECT version()` output"""
self.assert_parses("PostgreSQL 8.3 beta4", 80300)
self.assert_parses("PostgreSQL 8.3", 80300)
self.assert_parses("EnterpriseDB 8.3", 80300)
self.assert_parses("PostgreSQL 8.3.6", 80306)
self.assert_parses("PostgreSQL 8.4beta1", 80400)
self.assert_parses("PostgreSQL 8.3.1 on i386-apple-darwin9.2.2, compiled by GCC i686-apple-darwin9-gcc-4.0.1 (GCC) 4.0.1 (Apple Inc. build 5478)", 80301)
def test_version_detection(self):
"""Test PostgreSQL version detection"""
# Helper mocks
class CursorMock(object):
"Very simple mock of DB-API cursor"
def execute(self, arg):
pass
def fetchone(self):
return ["PostgreSQL 8.3"]
class OlderConnectionMock(object):
"Mock of psycopg2 (< 2.0.12) connection"
def cursor(self):
return CursorMock()
# psycopg2 < 2.0.12 code path
conn = OlderConnectionMock()
self.assertEqual(pg_version.get_version(conn), 80300)
class PostgresNewConnectionTest(TestCase):
"""
#17062: PostgreSQL shouldn't roll back SET TIME ZONE, even if the first
transaction is rolled back.
"""
@unittest.skipUnless(
connection.vendor == 'postgresql',
"This test applies only to PostgreSQL")
def test_connect_and_rollback(self):
new_connections = ConnectionHandler(settings.DATABASES)
new_connection = new_connections[DEFAULT_DB_ALIAS]
try:
# Ensure the database default time zone is different than
# the time zone in new_connection.settings_dict. We can
# get the default time zone by reset & show.
cursor = new_connection.cursor()
cursor.execute("RESET TIMEZONE")
cursor.execute("SHOW TIMEZONE")
db_default_tz = cursor.fetchone()[0]
new_tz = 'Europe/Paris' if db_default_tz == 'UTC' else 'UTC'
new_connection.close()
# Fetch a new connection with the new_tz as default
# time zone, run a query and rollback.
new_connection.settings_dict['TIME_ZONE'] = new_tz
new_connection.enter_transaction_management()
cursor = new_connection.cursor()
new_connection.rollback()
# Now let's see if the rollback rolled back the SET TIME ZONE.
cursor.execute("SHOW TIMEZONE")
tz = cursor.fetchone()[0]
self.assertEqual(new_tz, tz)
finally:
try:
new_connection.close()
except DatabaseError:
pass
# This test needs to run outside of a transaction, otherwise closing the
# connection would implicitly rollback and cause problems during teardown.
class ConnectionCreatedSignalTest(TransactionTestCase):
available_apps = []
# Unfortunately with sqlite3 the in-memory test database cannot be closed,
# and so it cannot be re-opened during testing.
@skipUnlessDBFeature('test_db_allows_multiple_connections')
def test_signal(self):
data = {}
def receiver(sender, connection, **kwargs):
data["connection"] = connection
connection_created.connect(receiver)
connection.close()
connection.cursor()
self.assertTrue(data["connection"].connection is connection.connection)
connection_created.disconnect(receiver)
data.clear()
connection.cursor()
self.assertTrue(data == {})
class EscapingChecks(TestCase):
"""
All tests in this test case are also run with settings.DEBUG=True in
EscapingChecksDebug test case, to also test CursorDebugWrapper.
"""
# For Oracle, when you want to select a value, you need to specify the
# special pseudo-table 'dual'; a select with no from clause is invalid.
bare_select_suffix = " FROM DUAL" if connection.vendor == 'oracle' else ""
def test_paramless_no_escaping(self):
cursor = connection.cursor()
cursor.execute("SELECT '%s'" + self.bare_select_suffix)
self.assertEqual(cursor.fetchall()[0][0], '%s')
def test_parameter_escaping(self):
cursor = connection.cursor()
cursor.execute("SELECT '%%', %s" + self.bare_select_suffix, ('%d',))
self.assertEqual(cursor.fetchall()[0], ('%', '%d'))
@unittest.skipUnless(connection.vendor == 'sqlite',
"This is a sqlite-specific issue")
def test_sqlite_parameter_escaping(self):
#13648: '%s' escaping support for sqlite3
cursor = connection.cursor()
cursor.execute("select strftime('%s', date('now'))")
response = cursor.fetchall()[0][0]
# response should be an non-zero integer
self.assertTrue(int(response))
@override_settings(DEBUG=True)
class EscapingChecksDebug(EscapingChecks):
pass
class SqliteAggregationTests(TestCase):
"""
#19360: Raise NotImplementedError when aggregating on date/time fields.
"""
@unittest.skipUnless(connection.vendor == 'sqlite',
"No need to check SQLite aggregation semantics")
def test_aggregation(self):
for aggregate in (Sum, Avg, Variance, StdDev):
self.assertRaises(NotImplementedError,
models.Item.objects.all().aggregate, aggregate('time'))
self.assertRaises(NotImplementedError,
models.Item.objects.all().aggregate, aggregate('date'))
self.assertRaises(NotImplementedError,
models.Item.objects.all().aggregate, aggregate('last_modified'))
class SqliteChecks(TestCase):
@unittest.skipUnless(connection.vendor == 'sqlite',
"No need to do SQLite checks")
def test_convert_values_to_handle_null_value(self):
database_operations = DatabaseOperations(connection)
self.assertEqual(
None,
database_operations.convert_values(None, AutoField(primary_key=True))
)
self.assertEqual(
None,
database_operations.convert_values(None, DateField())
)
self.assertEqual(
None,
database_operations.convert_values(None, DateTimeField())
)
self.assertEqual(
None,
database_operations.convert_values(None, DecimalField())
)
self.assertEqual(
None,
database_operations.convert_values(None, IntegerField())
)
self.assertEqual(
None,
database_operations.convert_values(None, TimeField())
)
class BackendTestCase(TestCase):
def create_squares_with_executemany(self, args):
self.create_squares(args, 'format', True)
def create_squares(self, args, paramstyle, multiple):
cursor = connection.cursor()
opts = models.Square._meta
tbl = connection.introspection.table_name_converter(opts.db_table)
f1 = connection.ops.quote_name(opts.get_field('root').column)
f2 = connection.ops.quote_name(opts.get_field('square').column)
if paramstyle=='format':
query = 'INSERT INTO %s (%s, %s) VALUES (%%s, %%s)' % (tbl, f1, f2)
elif paramstyle=='pyformat':
query = 'INSERT INTO %s (%s, %s) VALUES (%%(root)s, %%(square)s)' % (tbl, f1, f2)
else:
raise ValueError("unsupported paramstyle in test")
if multiple:
cursor.executemany(query, args)
else:
cursor.execute(query, args)
def test_cursor_executemany(self):
#4896: Test cursor.executemany
args = [(i, i**2) for i in range(-5, 6)]
self.create_squares_with_executemany(args)
self.assertEqual(models.Square.objects.count(), 11)
for i in range(-5, 6):
square = models.Square.objects.get(root=i)
self.assertEqual(square.square, i**2)
def test_cursor_executemany_with_empty_params_list(self):
#4765: executemany with params=[] does nothing
args = []
self.create_squares_with_executemany(args)
self.assertEqual(models.Square.objects.count(), 0)
def test_cursor_executemany_with_iterator(self):
#10320: executemany accepts iterators
args = iter((i, i**2) for i in range(-3, 2))
self.create_squares_with_executemany(args)
self.assertEqual(models.Square.objects.count(), 5)
args = iter((i, i**2) for i in range(3, 7))
with override_settings(DEBUG=True):
# same test for DebugCursorWrapper
self.create_squares_with_executemany(args)
self.assertEqual(models.Square.objects.count(), 9)
@skipUnlessDBFeature('supports_paramstyle_pyformat')
def test_cursor_execute_with_pyformat(self):
#10070: Support pyformat style passing of paramters
args = {'root': 3, 'square': 9}
self.create_squares(args, 'pyformat', multiple=False)
self.assertEqual(models.Square.objects.count(), 1)
@skipUnlessDBFeature('supports_paramstyle_pyformat')
def test_cursor_executemany_with_pyformat(self):
#10070: Support pyformat style passing of paramters
args = [{'root': i, 'square': i**2} for i in range(-5, 6)]
self.create_squares(args, 'pyformat', multiple=True)
self.assertEqual(models.Square.objects.count(), 11)
for i in range(-5, 6):
square = models.Square.objects.get(root=i)
self.assertEqual(square.square, i**2)
@skipUnlessDBFeature('supports_paramstyle_pyformat')
def test_cursor_executemany_with_pyformat_iterator(self):
args = iter({'root': i, 'square': i**2} for i in range(-3, 2))
self.create_squares(args, 'pyformat', multiple=True)
self.assertEqual(models.Square.objects.count(), 5)
args = iter({'root': i, 'square': i**2} for i in range(3, 7))
with override_settings(DEBUG=True):
# same test for DebugCursorWrapper
self.create_squares(args, 'pyformat', multiple=True)
self.assertEqual(models.Square.objects.count(), 9)
def test_unicode_fetches(self):
#6254: fetchone, fetchmany, fetchall return strings as unicode objects
qn = connection.ops.quote_name
models.Person(first_name="John", last_name="Doe").save()
models.Person(first_name="Jane", last_name="Doe").save()
models.Person(first_name="Mary", last_name="Agnelline").save()
models.Person(first_name="Peter", last_name="Parker").save()
models.Person(first_name="Clark", last_name="Kent").save()
opts2 = models.Person._meta
f3, f4 = opts2.get_field('first_name'), opts2.get_field('last_name')
query2 = ('SELECT %s, %s FROM %s ORDER BY %s'
% (qn(f3.column), qn(f4.column), connection.introspection.table_name_converter(opts2.db_table),
qn(f3.column)))
cursor = connection.cursor()
cursor.execute(query2)
self.assertEqual(cursor.fetchone(), ('Clark', 'Kent'))
self.assertEqual(list(cursor.fetchmany(2)), [('Jane', 'Doe'), ('John', 'Doe')])
self.assertEqual(list(cursor.fetchall()), [('Mary', 'Agnelline'), ('Peter', 'Parker')])
def test_unicode_password(self):
old_password = connection.settings_dict['PASSWORD']
connection.settings_dict['PASSWORD'] = "françois"
try:
connection.cursor()
except DatabaseError:
# As password is probably wrong, a database exception is expected
pass
except Exception as e:
self.fail("Unexpected error raised with unicode password: %s" % e)
finally:
connection.settings_dict['PASSWORD'] = old_password
def test_database_operations_helper_class(self):
# Ticket #13630
self.assertTrue(hasattr(connection, 'ops'))
self.assertTrue(hasattr(connection.ops, 'connection'))
self.assertEqual(connection, connection.ops.connection)
def test_cached_db_features(self):
self.assertIn(connection.features.supports_transactions, (True, False))
self.assertIn(connection.features.supports_stddev, (True, False))
self.assertIn(connection.features.can_introspect_foreign_keys, (True, False))
def test_duplicate_table_error(self):
""" Test that creating an existing table returns a DatabaseError """
cursor = connection.cursor()
query = 'CREATE TABLE %s (id INTEGER);' % models.Article._meta.db_table
with self.assertRaises(DatabaseError):
cursor.execute(query)
# We don't make these tests conditional because that means we would need to
# check and differentiate between:
# * MySQL+InnoDB, MySQL+MYISAM (something we currently can't do).
# * if sqlite3 (if/once we get #14204 fixed) has referential integrity turned
# on or not, something that would be controlled by runtime support and user
# preference.
# verify if its type is django.database.db.IntegrityError.
class FkConstraintsTests(TransactionTestCase):
available_apps = ['backends']
def setUp(self):
# Create a Reporter.
self.r = models.Reporter.objects.create(first_name='John', last_name='Smith')
def test_integrity_checks_on_creation(self):
"""
Try to create a model instance that violates a FK constraint. If it
fails it should fail with IntegrityError.
"""
a = models.Article(headline="This is a test", pub_date=datetime.datetime(2005, 7, 27), reporter_id=30)
try:
a.save()
except IntegrityError:
return
self.skipTest("This backend does not support integrity checks.")
def test_integrity_checks_on_update(self):
"""
Try to update a model instance introducing a FK constraint violation.
If it fails it should fail with IntegrityError.
"""
# Create an Article.
models.Article.objects.create(headline="Test article", pub_date=datetime.datetime(2010, 9, 4), reporter=self.r)
# Retrive it from the DB
a = models.Article.objects.get(headline="Test article")
a.reporter_id = 30
try:
a.save()
except IntegrityError:
return
self.skipTest("This backend does not support integrity checks.")
def test_disable_constraint_checks_manually(self):
"""
When constraint checks are disabled, should be able to write bad data without IntegrityErrors.
"""
transaction.set_autocommit(False)
try:
# Create an Article.
models.Article.objects.create(headline="Test article", pub_date=datetime.datetime(2010, 9, 4), reporter=self.r)
# Retrive it from the DB
a = models.Article.objects.get(headline="Test article")
a.reporter_id = 30
try:
connection.disable_constraint_checking()
a.save()
connection.enable_constraint_checking()
except IntegrityError:
self.fail("IntegrityError should not have occurred.")
finally:
transaction.rollback()
finally:
transaction.set_autocommit(True)
def test_disable_constraint_checks_context_manager(self):
"""
When constraint checks are disabled (using context manager), should be able to write bad data without IntegrityErrors.
"""
transaction.set_autocommit(False)
try:
# Create an Article.
models.Article.objects.create(headline="Test article", pub_date=datetime.datetime(2010, 9, 4), reporter=self.r)
# Retrive it from the DB
a = models.Article.objects.get(headline="Test article")
a.reporter_id = 30
try:
with connection.constraint_checks_disabled():
a.save()
except IntegrityError:
self.fail("IntegrityError should not have occurred.")
finally:
transaction.rollback()
finally:
transaction.set_autocommit(True)
def test_check_constraints(self):
"""
Constraint checks should raise an IntegrityError when bad data is in the DB.
"""
try:
transaction.set_autocommit(False)
# Create an Article.
models.Article.objects.create(headline="Test article", pub_date=datetime.datetime(2010, 9, 4), reporter=self.r)
# Retrive it from the DB
a = models.Article.objects.get(headline="Test article")
a.reporter_id = 30
try:
with connection.constraint_checks_disabled():
a.save()
with self.assertRaises(IntegrityError):
connection.check_constraints()
finally:
transaction.rollback()
finally:
transaction.set_autocommit(True)
class ThreadTests(TestCase):
def test_default_connection_thread_local(self):
"""
Ensure that the default connection (i.e. django.db.connection) is
different for each thread.
Refs #17258.
"""
# Map connections by id because connections with identical aliases
# have the same hash.
connections_dict = {}
connection.cursor()
connections_dict[id(connection)] = connection
def runner():
# Passing django.db.connection between threads doesn't work while
# connections[DEFAULT_DB_ALIAS] does.
from django.db import connections
connection = connections[DEFAULT_DB_ALIAS]
# Allow thread sharing so the connection can be closed by the
# main thread.
connection.allow_thread_sharing = True
connection.cursor()
connections_dict[id(connection)] = connection
for x in range(2):
t = threading.Thread(target=runner)
t.start()
t.join()
# Check that each created connection got different inner connection.
self.assertEqual(
len(set(conn.connection for conn in connections_dict.values())),
3)
# Finish by closing the connections opened by the other threads (the
# connection opened in the main thread will automatically be closed on
# teardown).
for conn in connections_dict.values():
if conn is not connection:
conn.close()
def test_connections_thread_local(self):
"""
Ensure that the connections are different for each thread.
Refs #17258.
"""
# Map connections by id because connections with identical aliases
# have the same hash.
connections_dict = {}
for conn in connections.all():
connections_dict[id(conn)] = conn
def runner():
from django.db import connections
for conn in connections.all():
# Allow thread sharing so the connection can be closed by the
# main thread.
conn.allow_thread_sharing = True
connections_dict[id(conn)] = conn
for x in range(2):
t = threading.Thread(target=runner)
t.start()
t.join()
self.assertEqual(len(connections_dict), 6)
# Finish by closing the connections opened by the other threads (the
# connection opened in the main thread will automatically be closed on
# teardown).
for conn in connections_dict.values():
if conn is not connection:
conn.close()
def test_pass_connection_between_threads(self):
"""
Ensure that a connection can be passed from one thread to the other.
Refs #17258.
"""
models.Person.objects.create(first_name="John", last_name="Doe")
def do_thread():
def runner(main_thread_connection):
from django.db import connections
connections['default'] = main_thread_connection
try:
models.Person.objects.get(first_name="John", last_name="Doe")
except Exception as e:
exceptions.append(e)
t = threading.Thread(target=runner, args=[connections['default']])
t.start()
t.join()
# Without touching allow_thread_sharing, which should be False by default.
exceptions = []
do_thread()
# Forbidden!
self.assertIsInstance(exceptions[0], DatabaseError)
# If explicitly setting allow_thread_sharing to False
connections['default'].allow_thread_sharing = False
exceptions = []
do_thread()
# Forbidden!
self.assertIsInstance(exceptions[0], DatabaseError)
# If explicitly setting allow_thread_sharing to True
connections['default'].allow_thread_sharing = True
exceptions = []
do_thread()
# All good
self.assertEqual(exceptions, [])
def test_closing_non_shared_connections(self):
"""
Ensure that a connection that is not explicitly shareable cannot be
closed by another thread.
Refs #17258.
"""
# First, without explicitly enabling the connection for sharing.
exceptions = set()
def runner1():
def runner2(other_thread_connection):
try:
other_thread_connection.close()
except DatabaseError as e:
exceptions.add(e)
t2 = threading.Thread(target=runner2, args=[connections['default']])
t2.start()
t2.join()
t1 = threading.Thread(target=runner1)
t1.start()
t1.join()
# The exception was raised
self.assertEqual(len(exceptions), 1)
# Then, with explicitly enabling the connection for sharing.
exceptions = set()
def runner1():
def runner2(other_thread_connection):
try:
other_thread_connection.close()
except DatabaseError as e:
exceptions.add(e)
# Enable thread sharing
connections['default'].allow_thread_sharing = True
t2 = threading.Thread(target=runner2, args=[connections['default']])
t2.start()
t2.join()
t1 = threading.Thread(target=runner1)
t1.start()
t1.join()
# No exception was raised
self.assertEqual(len(exceptions), 0)
class MySQLPKZeroTests(TestCase):
"""
Zero as id for AutoField should raise exception in MySQL, because MySQL
does not allow zero for automatic primary key.
"""
@skipIfDBFeature('allows_primary_key_0')
def test_zero_as_autoval(self):
with self.assertRaises(ValueError):
models.Square.objects.create(id=0, root=0, square=1)
class DBConstraintTestCase(TransactionTestCase):
available_apps = ['backends']
def test_can_reference_existant(self):
obj = models.Object.objects.create()
ref = models.ObjectReference.objects.create(obj=obj)
self.assertEqual(ref.obj, obj)
ref = models.ObjectReference.objects.get(obj=obj)
self.assertEqual(ref.obj, obj)
def test_can_reference_non_existant(self):
self.assertFalse(models.Object.objects.filter(id=12345).exists())
ref = models.ObjectReference.objects.create(obj_id=12345)
ref_new = models.ObjectReference.objects.get(obj_id=12345)
self.assertEqual(ref, ref_new)
with self.assertRaises(models.Object.DoesNotExist):
ref.obj
def test_many_to_many(self):
obj = models.Object.objects.create()
obj.related_objects.create()
self.assertEqual(models.Object.objects.count(), 2)
self.assertEqual(obj.related_objects.count(), 1)
intermediary_model = models.Object._meta.get_field_by_name("related_objects")[0].rel.through
intermediary_model.objects.create(from_object_id=obj.id, to_object_id=12345)
self.assertEqual(obj.related_objects.count(), 1)
self.assertEqual(intermediary_model.objects.count(), 2)
class BackendUtilTests(TestCase):
def test_format_number(self):
"""
Test the format_number converter utility
"""
def equal(value, max_d, places, result):
self.assertEqual(format_number(Decimal(value), max_d, places), result)
equal('0', 12, 3,
'0.000')
equal('0', 12, 8,
'0.00000000')
equal('1', 12, 9,
'1.000000000')
equal('0.00000000', 12, 8,
'0.00000000')
equal('0.000000004', 12, 8,
'0.00000000')
equal('0.000000008', 12, 8,
'0.00000001')
equal('0.000000000000000000999', 10, 8,
'0.00000000')
equal('0.1234567890', 12, 10,
'0.1234567890')
equal('0.1234567890', 12, 9,
'0.123456789')
equal('0.1234567890', 12, 8,
'0.12345679')
equal('0.1234567890', 12, 5,
'0.12346')
equal('0.1234567890', 12, 3,
'0.123')
equal('0.1234567890', 12, 1,
'0.1')
equal('0.1234567890', 12, 0,
'0')
|
x.py
|
import argparse
import functools
import importlib.util
import logging
import signal
import sys
import os
import traceback
from multiprocessing import get_context
from typing import List, Text, Optional
import ruamel.yaml as yaml
from rasa.cli.utils import get_validated_path, print_warning, print_error
from rasa.cli.arguments import x as arguments
from rasa.constants import (
DEFAULT_ENDPOINTS_PATH,
DEFAULT_CREDENTIALS_PATH,
DEFAULT_DOMAIN_PATH,
DEFAULT_CONFIG_PATH,
DEFAULT_LOG_LEVEL_RASA_X,
)
import rasa.utils.io as io_utils
logger = logging.getLogger(__name__)
DEFAULT_RASA_X_HOST = "http://localhost:5002"
DEFAULT_TRACKER_DB = "tracker.db"
# noinspection PyProtectedMember
def add_subparser(
subparsers: argparse._SubParsersAction, parents: List[argparse.ArgumentParser]
):
x_parser_args = {
"parents": parents,
"conflict_handler": "resolve",
"formatter_class": argparse.ArgumentDefaultsHelpFormatter,
}
if is_rasa_x_installed():
# we'll only show the help msg for the command if Rasa X is actually installed
x_parser_args["help"] = "Starts the Rasa X interface."
shell_parser = subparsers.add_parser("x", **x_parser_args)
shell_parser.set_defaults(func=rasa_x)
arguments.set_x_arguments(shell_parser)
def _rasa_service(
args: argparse.Namespace, endpoints: "AvailableEndpoints", rasa_x_url=None
):
"""Starts the Rasa application."""
from rasa.core.run import serve_application
# needs separate logging configuration as it is started in its own process
logging.basicConfig(level=args.loglevel)
io_utils.configure_colored_logging(args.loglevel)
logging.getLogger("apscheduler").setLevel(logging.WARNING)
credentials_path = _prepare_credentials_for_rasa_x(
args.credentials, rasa_x_url=rasa_x_url
)
serve_application(
endpoints=endpoints,
port=args.port,
credentials=credentials_path,
cors=args.cors,
auth_token=args.auth_token,
enable_api=True,
jwt_secret=args.jwt_secret,
jwt_method=args.jwt_method,
)
def _prepare_credentials_for_rasa_x(
credentials_path: Optional[Text], rasa_x_url=None
) -> Text:
credentials_path = get_validated_path(
credentials_path, "credentials", DEFAULT_CREDENTIALS_PATH, True
)
if credentials_path:
credentials = io_utils.read_yaml_file(credentials_path)
else:
credentials = {}
# this makes sure the Rasa X is properly configured no matter what
if rasa_x_url:
credentials["rasa"] = {"url": rasa_x_url}
dumped_credentials = yaml.dump(credentials, default_flow_style=False)
tmp_credentials = io_utils.create_temporary_file(dumped_credentials, "yml")
return tmp_credentials
def _overwrite_endpoints_for_local_x(endpoints, rasa_x_token, rasa_x_url):
from rasa.utils.endpoints import EndpointConfig
endpoints.model = EndpointConfig(
"{}/projects/default/models/tags/production".format(rasa_x_url),
token=rasa_x_token,
wait_time_between_pulls=2,
)
if not endpoints.tracker_store:
endpoints.tracker_store = EndpointConfig(type="sql", db=DEFAULT_TRACKER_DB)
def start_rasa_for_local_rasa_x(args: argparse.Namespace, rasa_x_token: Text):
"""Starts the Rasa X API with Rasa as a background process."""
from rasa.core.utils import AvailableEndpoints
args.endpoints = get_validated_path(
args.endpoints, "endpoints", DEFAULT_ENDPOINTS_PATH, True
)
endpoints = AvailableEndpoints.read_endpoints(args.endpoints)
rasa_x_url = "{}/api".format(DEFAULT_RASA_X_HOST)
_overwrite_endpoints_for_local_x(endpoints, rasa_x_token, rasa_x_url)
vars(args).update(
dict(
nlu_model=None,
cors="*",
auth_token=args.auth_token,
enable_api=True,
endpoints=endpoints,
)
)
ctx = get_context("spawn")
p = ctx.Process(target=_rasa_service, args=(args, endpoints, rasa_x_url))
p.daemon = True
p.start()
return p
def is_rasa_x_installed():
"""Check if Rasa X is installed."""
# we could also do something like checking if `import rasax` works,
# the issue with that is that it actually does import the package and this
# takes some time that we don't want to spend when booting the CLI
return importlib.util.find_spec("rasax") is not None
def generate_rasa_x_token(length=16):
"""Generate a hexadecimal secret token used to access the Rasa X API.
A new token is generated on every `rasa x` command.
"""
from secrets import token_hex
return token_hex(length)
def _configure_logging(args):
from rasa.core.utils import configure_file_logging
from rasa.utils.common import set_log_level
log_level = args.loglevel or DEFAULT_LOG_LEVEL_RASA_X
if isinstance(log_level, str):
log_level = logging.getLevelName(log_level)
logging.basicConfig(level=log_level)
io_utils.configure_colored_logging(args.loglevel)
set_log_level(log_level)
configure_file_logging(log_level, args.log_file)
logging.getLogger("werkzeug").setLevel(logging.WARNING)
logging.getLogger("engineio").setLevel(logging.WARNING)
logging.getLogger("pika").setLevel(logging.WARNING)
logging.getLogger("socketio").setLevel(logging.ERROR)
if not log_level == logging.DEBUG:
logging.getLogger().setLevel(logging.WARNING)
logging.getLogger("py.warnings").setLevel(logging.ERROR)
def is_rasa_project_setup(project_path: Text):
mandatory_files = [DEFAULT_CONFIG_PATH, DEFAULT_DOMAIN_PATH]
for f in mandatory_files:
if not os.path.exists(os.path.join(project_path, f)):
return False
return True
def rasa_x(args: argparse.Namespace):
from rasa.cli.utils import print_success, print_error, signal_handler
from rasa.core.utils import AvailableEndpoints
signal.signal(signal.SIGINT, signal_handler)
_configure_logging(args)
if args.production:
print_success("Starting Rasa X in production mode... 🚀")
args.endpoints = get_validated_path(
args.endpoints, "endpoints", DEFAULT_ENDPOINTS_PATH, True
)
endpoints = AvailableEndpoints.read_endpoints(args.endpoints)
_rasa_service(args, endpoints)
else:
if not is_rasa_x_installed():
print_error(
"Rasa X is not installed. The `rasa x` "
"command requires an installation of Rasa X."
)
sys.exit(1)
project_path = "."
if not is_rasa_project_setup(project_path):
print_error(
"This directory is not a valid Rasa project. Use 'rasa init' "
"to create a new Rasa project or switch to a valid Rasa project "
"directory."
)
sys.exit(1)
_validate_domain(os.path.join(project_path, DEFAULT_DOMAIN_PATH))
if args.data and not os.path.exists(args.data):
print_warning(
"The provided data path ('{}') does not exists. Rasa X will start "
"without any training data.".format(args.data)
)
# noinspection PyUnresolvedReferences
from rasax.community import local
local.check_license_and_metrics(args)
rasa_x_token = generate_rasa_x_token()
process = start_rasa_for_local_rasa_x(args, rasa_x_token=rasa_x_token)
try:
local.main(args, project_path, args.data, token=rasa_x_token)
except Exception:
print (traceback.format_exc())
print_error(
"Sorry, something went wrong (see error above). Make sure to start "
"Rasa X with valid data and valid domain and config files. Please, "
"also check any warnings that popped up.\nIf you need help fixing "
"the issue visit our forum: https://forum.rasa.com/."
)
finally:
process.terminate()
def _validate_domain(domain_path: Text):
from rasa.core.domain import Domain, InvalidDomain
try:
Domain.load(domain_path)
except InvalidDomain as e:
print_error("The provided domain file could not be loaded. Error: {}".format(e))
sys.exit(1)
|
train_pg.py
|
import numpy as np
import tensorflow as tf
import gym
import logz
import scipy.signal
import os
import time
import inspect
from multiprocessing import Process
#============================================================================================#
# Utilities
#============================================================================================#
def build_mlp(
input_placeholder,
output_size,
scope,
n_layers=2,
size=64,
activation=tf.tanh,
output_activation=None
):
#========================================================================================#
# ----------SECTION 3----------
# Network building
#
# Your code should make a feedforward neural network (also called a multilayer perceptron)
# with 'n_layers' hidden layers of size 'size' units.
#
# The output layer should have size 'output_size' and activation 'output_activation'.
#
# Hint: use tf.layers.dense
#========================================================================================#
with tf.variable_scope(scope):
# YOUR_CODE_HERE
for i in range(n_layers):
if i == 0:
hidden = tf.layers.dense(input_placeholder, size, activation)
else:
hidden = tf.layers.dense(hidden, size, activation)
output = tf.layers.dense(hidden, output_size, output_activation)
return output
def pathlength(path):
return len(path["reward"])
#============================================================================================#
# Policy Gradient
#============================================================================================#
def train_PG(exp_name='',
env_name='CartPole-v0',
n_iter=100,
gamma=1.0,
min_timesteps_per_batch=1000,
max_path_length=None,
learning_rate=5e-3,
reward_to_go=True,
animate=True,
logdir=None,
normalize_advantages=True,
nn_baseline=False,
seed=0,
# network arguments
n_layers=1,
size=32
):
start = time.time()
# Configure output directory for logging
logz.configure_output_dir(logdir)
# Log experimental parameters
args = inspect.getargspec(train_PG)[0]
locals_ = locals()
params = {k: locals_[k] if k in locals_ else None for k in args}
logz.save_params(params)
# Set random seeds
tf.set_random_seed(seed)
np.random.seed(seed)
# Make the gym environment
env = gym.make(env_name)
# Is this env continuous, or discrete?
discrete = isinstance(env.action_space, gym.spaces.Discrete)
# Maximum length for episodes
max_path_length = max_path_length or env.spec.max_episode_steps
#========================================================================================#
# Notes on notation:
#
# Symbolic variables have the prefix sy_, to distinguish them from the numerical values
# that are computed later in the function
#
# Prefixes and suffixes:
# ob - observation
# ac - action
# _no - this tensor should have shape (batch size /n/, observation dim)
# _na - this tensor should have shape (batch size /n/, action dim)
# _n - this tensor should have shape (batch size /n/)
#
# Note: batch size /n/ is defined at runtime, and until then, the shape for that axis
# is None
#========================================================================================#
# Observation and action sizes
ob_dim = env.observation_space.shape[0]
ac_dim = env.action_space.n if discrete else env.action_space.shape[0]
#========================================================================================#
# ----------SECTION 4----------
# Placeholders
#
# Need these for batch observations / actions / advantages in policy gradient loss function.
#========================================================================================#
sy_ob_no = tf.placeholder(shape=[None, ob_dim], name="ob", dtype=tf.float32)
if discrete:
sy_ac_na = tf.placeholder(shape=[None], name="ac", dtype=tf.int32)
else:
sy_ac_na = tf.placeholder(shape=[None, ac_dim], name="ac", dtype=tf.float32)
# Define a placeholder for advantages
sy_adv_n = TODO
#========================================================================================#
# ----------SECTION 4----------
# Networks
#
# Make symbolic operations for
# 1. Policy network outputs which describe the policy distribution.
# a. For the discrete case, just logits for each action.
#
# b. For the continuous case, the mean / log std of a Gaussian distribution over
# actions.
#
# Hint: use the 'build_mlp' function you defined in utilities.
#
# Note: these ops should be functions of the placeholder 'sy_ob_no'
#
# 2. Producing samples stochastically from the policy distribution.
# a. For the discrete case, an op that takes in logits and produces actions.
#
# Should have shape [None]
#
# b. For the continuous case, use the reparameterization trick:
# The output from a Gaussian distribution with mean 'mu' and std 'sigma' is
#
# mu + sigma * z, z ~ N(0, I)
#
# This reduces the problem to just sampling z. (Hint: use tf.random_normal!)
#
# Should have shape [None, ac_dim]
#
# Note: these ops should be functions of the policy network output ops.
#
# 3. Computing the log probability of a set of actions that were actually taken,
# according to the policy.
#
# Note: these ops should be functions of the placeholder 'sy_ac_na', and the
# policy network output ops.
#
#========================================================================================#
if discrete:
# YOUR_CODE_HERE
sy_logits_na = TODO
sy_sampled_ac = TODO # Hint: Use the tf.multinomial op
sy_logprob_n = TODO
sy_logits_na = build_mlp(sy_ob_no, ac_dim, "policy_net", n_layers=2, //
size=64, activation=tf.tanh, output_activation=None)
sy_sampled_ac = tf.multinomial(sy_logits_na, 1)
ac_index = tf.one_hot(sy_sampled_ac, ac_dim)
prob = tf.softmax(sy_logits_na)
sy_logprob_n = ac_index*tf.log(prob)
else:
# YOUR_CODE_HERE
sy_mean = TODO
sy_logstd = TODO # logstd should just be a trainable variable, not a network output.
sy_sampled_ac = TODO
sy_logprob_n = TODO # Hint: Use the log probability under a multivariate gaussian.
#========================================================================================#
# ----------SECTION 4----------
# Loss Function and Training Operation
#========================================================================================#
loss = TODO # Loss function that we'll differentiate to get the policy gradient.
update_op = tf.train.AdamOptimizer(learning_rate).minimize(loss)
#========================================================================================#
# ----------SECTION 5----------
# Optional Baseline
#========================================================================================#
if nn_baseline:
baseline_prediction = tf.squeeze(build_mlp(
sy_ob_no,
1,
"nn_baseline",
n_layers=n_layers,
size=size))
# Define placeholders for targets, a loss function and an update op for fitting a
# neural network baseline. These will be used to fit the neural network baseline.
# YOUR_CODE_HERE
baseline_update_op = TODO
#========================================================================================#
# Tensorflow Engineering: Config, Session, Variable initialization
#========================================================================================#
tf_config = tf.ConfigProto(inter_op_parallelism_threads=1, intra_op_parallelism_threads=1)
sess = tf.Session(config=tf_config)
sess.__enter__() # equivalent to `with sess:`
tf.global_variables_initializer().run() #pylint: disable=E1101
#========================================================================================#
# Training Loop
#========================================================================================#
total_timesteps = 0
for itr in range(n_iter):
print("********** Iteration %i ************"%itr)
# Collect paths until we have enough timesteps
timesteps_this_batch = 0
paths = []
while True:
ob = env.reset()
obs, acs, rewards = [], [], []
animate_this_episode=(len(paths)==0 and (itr % 10 == 0) and animate)
steps = 0
while True:
if animate_this_episode:
env.render()
time.sleep(0.05)
obs.append(ob)
ac = sess.run(sy_sampled_ac, feed_dict={sy_ob_no : ob[None]})
ac = ac[0]
acs.append(ac)
ob, rew, done, _ = env.step(ac)
rewards.append(rew)
steps += 1
if done or steps > max_path_length:
break
path = {"observation" : np.array(obs),
"reward" : np.array(rewards),
"action" : np.array(acs)}
paths.append(path)
timesteps_this_batch += pathlength(path)
if timesteps_this_batch > min_timesteps_per_batch:
break
total_timesteps += timesteps_this_batch
# Build arrays for observation, action for the policy gradient update by concatenating
# across paths
ob_no = np.concatenate([path["observation"] for path in paths])
ac_na = np.concatenate([path["action"] for path in paths])
#====================================================================================#
# ----------SECTION 4----------
# Computing Q-values
#
# Your code should construct numpy arrays for Q-values which will be used to compute
# advantages (which will in turn be fed to the placeholder you defined above).
#
# Recall that the expression for the policy gradient PG is
#
# PG = E_{tau} [sum_{t=0}^T grad log pi(a_t|s_t) * (Q_t - b_t )]
#
# where
#
# tau=(s_0, a_0, ...) is a trajectory,
# Q_t is the Q-value at time t, Q^{pi}(s_t, a_t),
# and b_t is a baseline which may depend on s_t.
#
# You will write code for two cases, controlled by the flag 'reward_to_go':
#
# Case 1: trajectory-based PG
#
# (reward_to_go = False)
#
# Instead of Q^{pi}(s_t, a_t), we use the total discounted reward summed over
# entire trajectory (regardless of which time step the Q-value should be for).
#
# For this case, the policy gradient estimator is
#
# E_{tau} [sum_{t=0}^T grad log pi(a_t|s_t) * Ret(tau)]
#
# where
#
# Ret(tau) = sum_{t'=0}^T gamma^t' r_{t'}.
#
# Thus, you should compute
#
# Q_t = Ret(tau)
#
# Case 2: reward-to-go PG
#
# (reward_to_go = True)
#
# Here, you estimate Q^{pi}(s_t, a_t) by the discounted sum of rewards starting
# from time step t. Thus, you should compute
#
# Q_t = sum_{t'=t}^T gamma^(t'-t) * r_{t'}
#
#
# Store the Q-values for all timesteps and all trajectories in a variable 'q_n',
# like the 'ob_no' and 'ac_na' above.
#
#====================================================================================#
# YOUR_CODE_HERE
q_n = TODO
#====================================================================================#
# ----------SECTION 5----------
# Computing Baselines
#====================================================================================#
if nn_baseline:
# If nn_baseline is True, use your neural network to predict reward-to-go
# at each timestep for each trajectory, and save the result in a variable 'b_n'
# like 'ob_no', 'ac_na', and 'q_n'.
#
# Hint #bl1: rescale the output from the nn_baseline to match the statistics
# (mean and std) of the current or previous batch of Q-values. (Goes with Hint
# #bl2 below.)
b_n = TODO
adv_n = q_n - b_n
else:
adv_n = q_n.copy()
#====================================================================================#
# ----------SECTION 4----------
# Advantage Normalization
#====================================================================================#
if normalize_advantages:
# On the next line, implement a trick which is known empirically to reduce variance
# in policy gradient methods: normalize adv_n to have mean zero and std=1.
# YOUR_CODE_HERE
pass
#====================================================================================#
# ----------SECTION 5----------
# Optimizing Neural Network Baseline
#====================================================================================#
if nn_baseline:
# ----------SECTION 5----------
# If a neural network baseline is used, set up the targets and the inputs for the
# baseline.
#
# Fit it to the current batch in order to use for the next iteration. Use the
# baseline_update_op you defined earlier.
#
# Hint #bl2: Instead of trying to target raw Q-values directly, rescale the
# targets to have mean zero and std=1. (Goes with Hint #bl1 above.)
# YOUR_CODE_HERE
pass
#====================================================================================#
# ----------SECTION 4----------
# Performing the Policy Update
#====================================================================================#
# Call the update operation necessary to perform the policy gradient update based on
# the current batch of rollouts.
#
# For debug purposes, you may wish to save the value of the loss function before
# and after an update, and then log them below.
# YOUR_CODE_HERE
# Log diagnostics
returns = [path["reward"].sum() for path in paths]
ep_lengths = [pathlength(path) for path in paths]
logz.log_tabular("Time", time.time() - start)
logz.log_tabular("Iteration", itr)
logz.log_tabular("AverageReturn", np.mean(returns))
logz.log_tabular("StdReturn", np.std(returns))
logz.log_tabular("MaxReturn", np.max(returns))
logz.log_tabular("MinReturn", np.min(returns))
logz.log_tabular("EpLenMean", np.mean(ep_lengths))
logz.log_tabular("EpLenStd", np.std(ep_lengths))
logz.log_tabular("TimestepsThisBatch", timesteps_this_batch)
logz.log_tabular("TimestepsSoFar", total_timesteps)
logz.dump_tabular()
logz.pickle_tf_vars()
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('env_name', type=str)
parser.add_argument('--exp_name', type=str, default='vpg')
parser.add_argument('--render', action='store_true')
parser.add_argument('--discount', type=float, default=1.0)
parser.add_argument('--n_iter', '-n', type=int, default=100)
parser.add_argument('--batch_size', '-b', type=int, default=1000)
parser.add_argument('--ep_len', '-ep', type=float, default=-1.)
parser.add_argument('--learning_rate', '-lr', type=float, default=5e-3)
parser.add_argument('--reward_to_go', '-rtg', action='store_true')
parser.add_argument('--dont_normalize_advantages', '-dna', action='store_true')
parser.add_argument('--nn_baseline', '-bl', action='store_true')
parser.add_argument('--seed', type=int, default=1)
parser.add_argument('--n_experiments', '-e', type=int, default=1)
parser.add_argument('--n_layers', '-l', type=int, default=1)
parser.add_argument('--size', '-s', type=int, default=32)
args = parser.parse_args()
if not(os.path.exists('data')):
os.makedirs('data')
logdir = args.exp_name + '_' + args.env_name + '_' + time.strftime("%d-%m-%Y_%H-%M-%S")
logdir = os.path.join('data', logdir)
if not(os.path.exists(logdir)):
os.makedirs(logdir)
max_path_length = args.ep_len if args.ep_len > 0 else None
for e in range(args.n_experiments):
seed = args.seed + 10*e
print('Running experiment with seed %d'%seed)
def train_func():
train_PG(
exp_name=args.exp_name,
env_name=args.env_name,
n_iter=args.n_iter,
gamma=args.discount,
min_timesteps_per_batch=args.batch_size,
max_path_length=max_path_length,
learning_rate=args.learning_rate,
reward_to_go=args.reward_to_go,
animate=args.render,
logdir=os.path.join(logdir,'%d'%seed),
normalize_advantages=not(args.dont_normalize_advantages),
nn_baseline=args.nn_baseline,
seed=seed,
n_layers=args.n_layers,
size=args.size
)
# Awkward hacky process runs, because Tensorflow does not like
# repeatedly calling train_PG in the same thread.
p = Process(target=train_func, args=tuple())
p.start()
p.join()
if __name__ == "__main__":
main()
|
autoCharger.py
|
#!/usr/bin/env python
from io import BytesIO
from time import sleep
from picamera import PiCamera
from PIL import Image
import zbar
import getpass
import requests
from requests_ntlm import HttpNtlmAuth
import untangle
import rospy
from std_msgs.msg import Float32
from std_msgs.msg import Float32MultiArray
from std_msgs.msg import Int32
from std_srvs.srv import Trigger
from icharger_usb.srv import SetCurrent
from recordtype import recordtype
import signal
import sys
import threading
import xml.etree.ElementTree as ET
ChannelStruct = recordtype("ChannelStruct", "charger channel pack_voltage cell_voltages charge status dv num_cells")
channels = []
# Capture Ctrl-C (seems to be necessary to escape from ROS)
def signal_handler(sig, frame):
print('You pressed Ctrl+C!')
sys.exit(0)
# Capture channel status
def statusCallback(data, channel):
channels[channel].status = data.data;
# Capture pack voltage
def packVoltageCallback(data, channel):
channels[channel].pack_voltage = data.data;
# Capture cell voltages
def cellVoltagesCallback(data, channel):
channels[channel].cell_voltages = data.data;
minCell = float('inf');
maxCell = float('-inf');
channels[channel].num_cells = 0
# Count channels, sanity check voltages and capture min/max
for cell in channels[channel].cell_voltages:
if cell > 2.5:
channels[channel].num_cells+=1
if cell < 1.0:
break
if cell > maxCell:
maxCell = cell
if cell < minCell:
minCell = cell
# Calculate dV
if channels[channel].num_cells > 0:
channels[channel].dv = maxCell - minCell
# Capture channel charge
def chargeCallback(data, channel):
channels[channel].charge = data.data;
# Once charing has been initiated, this function is called in a thread to monitor the charging status
def monitorChannel(sharepointSession, logEtag, logID, ch, spID):
# Wait a while for charge to initialise
sleep(10)
comments = ""
# Loop forever unless charge completes or is interrupted
# TODO: Figure out if charger has any error status codes and check for these too
while (True):
# r = sharepointSession.post('https://teams.ljmu.ac.uk/7/Drones/Operations/_api/contextinfo')
# rootXML = ET.fromstring(r.text)
# digest = rootXML[1].text
# cells = ','.join(str(i) for i in channels[ch].cell_voltages)
# payload = "{{ '__metadata': {{ 'type': 'SP.Data.Battery_x0020_charge_x0020_dataListItem'}},'Battery': {battery},'Voltage': {voltage}, 'Charge': {charge}, 'Cells','{cells}'}}".format(battery=spID,voltage=channels[ch].pack_voltage,charge=float(channels[ch].charge)/1000.0,cells=cells)
# r = sharepointSession.post("https://teams.ljmu.ac.uk/7/Drones/Operations/_api/Web/Lists/GetByTitle('Battery charge data')/items", timeout=10, data=payload, headers={"X-RequestDigest":digest,"content-type": "application/json;odata=verbose"})
# print(r.text)
if channels[ch].status == 40:
print("Charger " + str(channels[ch].charger) + " channel " + str(channels[ch].channel) + " is done")
break
if channels[ch].status == 0 or channels[ch].status == 1:
print("Charger " + str(channels[ch].charger) + " channel " + str(channels[ch].channel) + " interrupted")
comments = "Incomplete, interrupted"
break
sleep(1)
# Update charepoint record with post-charge data
r = sharepointSession.post('https://teams.ljmu.ac.uk/7/Drones/Operations/_api/contextinfo')
rootXML = ET.fromstring(r.text)
digest = rootXML[1].text
payload = "{{ '__metadata': {{ 'type': 'SP.Data.Battery_x0020_logsListItem'}},'Post_x002d_V': {postv},'Post_x002d_dV': {postdv}, 'Charge_x0020__x0028_Ah_x0029_': {charge}, 'Comments': '{comments}'}}".format(postv=channels[ch].pack_voltage,postdv=channels[ch].dv,charge=float(channels[ch].charge)/1000.0,comments=comments)
r = sharepointSession.patch("https://teams.ljmu.ac.uk/7/Drones/Operations/_api/Web/Lists/GetByTitle('Battery logs')/getItemById('{logID}')".format(logID=logID), timeout=30, data=payload, headers={"X-RequestDigest":digest,"content-type": "application/json;odata=verbose", "X-Http-Method": "PATCH", "If-Match": logEtag})
# Main function
def autoCharger():
# Initialise some things
signal.signal(signal.SIGINT, signal_handler)
camera = PiCamera(resolution = (480,320), framerate=30)
rospy.init_node('autoCharger', anonymous=True)
topics = sorted(rospy.get_published_topics())
# Identify chargers available and subscribe to their topics
# NOTE: This only runs once, because running repeatedly could break things
# All charger nodes need to be running before we start this node
# TODO: Handle hotplugging of chargers gracefully
channel = 0
for topic in topics:
if 'status' in topic[0]:
topicFields = topic[0].split('/')
chargerNumber = topicFields[1].split('_')[1]
channelNumber = topicFields[2].split('_')[1]
channels.append(ChannelStruct(chargerNumber, channelNumber, 0.0, [], 0, 0, 0.0, 0))
rospy.Subscriber(topic[0], Int32, callback=statusCallback, callback_args = channel)
rospy.Subscriber(topic[0].replace('status','pack_voltage'), Float32, callback=packVoltageCallback, callback_args = channel)
rospy.Subscriber(topic[0].replace('status','cell_voltages'), Float32MultiArray, callback=cellVoltagesCallback, callback_args = channel)
rospy.Subscriber(topic[0].replace('status','charge'), Int32, callback=chargeCallback, callback_args = channel)
channel+=1
print('Found ' + str(channel) + ' charger channels')
# Loop forever
while True:
# Start a session for data logging
sharepointSession = requests.Session()
# Loop forever until a successful login is made
while True:
username = raw_input("Username: ")
if username == "":
continue
password = getpass.getpass("Password: ")
# Authenticate session then delete password immediately
sharepointSession.auth = HttpNtlmAuth('USERS\\' + username,password)
del(password)
# Poll the battery table and make sure we're an authorised user
r = sharepointSession.get("https://teams.ljmu.ac.uk/7/Drones/Operations/_api/Web/Lists/GetByTitle('Battery register')", timeout=30)
if r.status_code == 200:
print("Login successful")
break;
else: # Otherwise try again
print("Unable to log in, code " + str(r.status_code))
# Prompt user to scan barcode
print("Please scan barcode")
sleep(3)
# Display camera view
camera.start_preview()
# create a reader
scanner = zbar.ImageScanner()
# configure the reader
scanner.parse_config('enable')
while True:
# Capture frame from camera
stream = BytesIO()
camera.capture(stream, format='jpeg', use_video_port=True)
# "Rewind" the stream to the beginning so we can read its content
stream.seek(0)
# Pass image to zbars
pil = Image.open(stream).convert('L')
width, height = pil.size
raw = pil.tobytes()
image = zbar.Image(width, height, 'Y800', raw)
scanner.scan(image)
# Extract results
batteryID = ""
for symbol in image:
batteryID = symbol.data
print("Detected battery ID: " + batteryID)
break
if batteryID != "":
break
sleep(0.01)
# Tidy up scannign resources
del(image)
del(stream)
camera.stop_preview()
# Look up battery data on sharepoint
print("Looking up data")
try:
r = sharepointSession.get("https://teams.ljmu.ac.uk/7/Drones/Operations/_api/Web/Lists/GetByTitle('Battery register')/items?$select=ID,Title,Brand,Model,Cells,Capacity_x0020__x0028_Ah_x0029_&$filter=Title eq '" + symbol.data + "'", timeout=30)
except requests.Timeout, e:
print("Error looking up data, please try again")
continue
batteryData = untangle.parse(r.text)
spID = batteryData.feed.entry.content.m_properties.d_ID.cdata;
brand = batteryData.feed.entry.content.m_properties.d_Brand.cdata;
model = batteryData.feed.entry.content.m_properties.d_Model.cdata;
cells = batteryData.feed.entry.content.m_properties.d_Cells.cdata;
capacity = batteryData.feed.entry.content.m_properties.d_Capacity_x0020__x0028_Ah_x0029_.cdata;
print(batteryID + ": " + brand + " " + model + " " + cells + "S " + capacity + "Ah")
query = raw_input("Is this correct (y/N)?: ")
# If information is correct, proceed
if query.upper() == "Y":
# Find an empty charging channel
chIdx = 0;
chargerNum = -1;
channelNum = -1;
for channel in channels:
if (channel.status == 0 or channel.status == 1 or channel.status == 40) and sum(channel.cell_voltages) < 1.0:
chargerNum = channel.charger
channelNum = channel.channel
break
chIdx += 1
if chargerNum == -1 and channelNum == -1:
print("No charger channels available, try agian later...")
continue
print("Please connect to charger " + str(chargerNum) + " channel " + str(channelNum))
# Wait until battery is connected (check both pack voltage and number of cells)
while abs(sum(channels[chIdx].cell_voltages) - channels[chIdx].pack_voltage) > 0.2 and channels[chIdx].num_cells != float(cells):
sleep(1)
# Charge or storage
option = raw_input("(C)harge or (S)torage?: ")
# Sanity check pack health
#if channels[chIdx].dv > 0.05:
# print("Pack has a dV of >50mV, do not charge and inform DFO as soon as possible")
# continue
if channels[chIdx].pack_voltage < 3*float(cells) or channels[chIdx].pack_voltage > 4.2*float(cells):
print("Pack voltage is out of range, do not charge and inform DFO as soon as possible")
continue
# Take the relevent actions
action = ""
if option.upper() == "C":
action = "Charge"
elif option.upper() == "S":
action = "Storage"
else:
print("Unrecognised option, try again...")
continue
# Create new sharepoint record for this activity
r = sharepointSession.post('https://teams.ljmu.ac.uk/7/Drones/Operations/_api/contextinfo')
rootXML = ET.fromstring(r.text)
digest = rootXML[1].text
payload = "{{ '__metadata': {{ 'type': 'SP.Data.Battery_x0020_logsListItem'}},'BatteryId': {battery},'Action': '{action}','Pre_x002d_V': {prev},'Pre_x002d_dV': {predv}}}".format(battery=spID,action=action,prev=channels[chIdx].pack_voltage,predv=channels[chIdx].dv)
while True:
try:
r = sharepointSession.post("https://teams.ljmu.ac.uk/7/Drones/Operations/_api/Web/Lists/GetByTitle('Battery logs')/items", timeout=10, data=payload, headers={"X-RequestDigest":digest,"content-type": "application/json;odata=verbose"})
break;
except:
sleep(1)
# Returned record contains log ID and ETag, both necessary for updating record
# Extract these and pass to monitoring thread
if option.upper() == "C":
setCurrent = rospy.ServiceProxy("/charger_" + str(chargerNum) +"/charge_current", SetCurrent)
setCurrent(float(capacity))
startCharge = rospy.ServiceProxy("/charger_" + str(chargerNum) + "/channel_" + str(channelNum) + "/start_charge", Trigger)
startCharge()
print("Charge started")
elif option.upper() == "S":
setCurrent = rospy.ServiceProxy("/charger_" + str(chargerNum) +"/charge_current", SetCurrent)
setCurrent(float(capacity))
setCurrent = rospy.ServiceProxy("/charger_" + str(chargerNum) +"/discharge_current", SetCurrent)
setCurrent(float(capacity))
startStorage = rospy.ServiceProxy("/charger_" + str(chargerNum) + "/channel_" + str(channelNum) + "/start_storage", Trigger)
startStorage()
print("Storage started")
else:
print("Unrecognised option, try again...")
continue
batteryLogData = untangle.parse(r.text)
logEtag = batteryLogData.entry['m:etag']
logID = batteryLogData.entry.content.m_properties.d_ID.cdata;
t = threading.Thread(target=monitorChannel, args=(sharepointSession,logEtag,logID,chIdx,spID,))
t.daemon = True
t.start()
else:
print("Restarting...")
continue
# ROS main
if __name__ == '__main__':
try:
autoCharger()
except rospy.ROSInterruptException:
pass
|
tests.py
|
import threading
from datetime import datetime, timedelta
from unittest import mock
from django.core.exceptions import MultipleObjectsReturned, ObjectDoesNotExist
from django.db import DEFAULT_DB_ALIAS, DatabaseError, connections, models
from django.db.models.manager import BaseManager
from django.db.models.query import MAX_GET_RESULTS, EmptyQuerySet
from django.test import (
SimpleTestCase, TestCase, TransactionTestCase, skipUnlessDBFeature,
)
from django.utils.translation import gettext_lazy
from .models import (
Article, ArticleSelectOnSave, FeaturedArticle, PrimaryKeyWithDefault,
SelfRef,
)
class ModelInstanceCreationTests(TestCase):
def test_object_is_not_written_to_database_until_save_was_called(self):
a = Article(
id=None,
headline='Parrot programs in Python',
pub_date=datetime(2005, 7, 28),
)
self.assertIsNone(a.id)
self.assertEqual(Article.objects.all().count(), 0)
# Save it into the database. You have to call save() explicitly.
a.save()
self.assertIsNotNone(a.id)
self.assertEqual(Article.objects.all().count(), 1)
def test_can_initialize_model_instance_using_positional_arguments(self):
"""
You can initialize a model instance using positional arguments,
which should match the field order as defined in the model.
"""
a = Article(None, 'Second article', datetime(2005, 7, 29))
a.save()
self.assertEqual(a.headline, 'Second article')
self.assertEqual(a.pub_date, datetime(2005, 7, 29, 0, 0))
def test_can_create_instance_using_kwargs(self):
a = Article(
id=None,
headline='Third article',
pub_date=datetime(2005, 7, 30),
)
a.save()
self.assertEqual(a.headline, 'Third article')
self.assertEqual(a.pub_date, datetime(2005, 7, 30, 0, 0))
def test_autofields_generate_different_values_for_each_instance(self):
a1 = Article.objects.create(headline='First', pub_date=datetime(2005, 7, 30, 0, 0))
a2 = Article.objects.create(headline='First', pub_date=datetime(2005, 7, 30, 0, 0))
a3 = Article.objects.create(headline='First', pub_date=datetime(2005, 7, 30, 0, 0))
self.assertNotEqual(a3.id, a1.id)
self.assertNotEqual(a3.id, a2.id)
def test_can_mix_and_match_position_and_kwargs(self):
# You can also mix and match position and keyword arguments, but
# be sure not to duplicate field information.
a = Article(None, 'Fourth article', pub_date=datetime(2005, 7, 31))
a.save()
self.assertEqual(a.headline, 'Fourth article')
def test_cannot_create_instance_with_invalid_kwargs(self):
with self.assertRaisesMessage(TypeError, "Article() got an unexpected keyword argument 'foo'"):
Article(
id=None,
headline='Some headline',
pub_date=datetime(2005, 7, 31),
foo='bar',
)
def test_can_leave_off_value_for_autofield_and_it_gets_value_on_save(self):
"""
You can leave off the value for an AutoField when creating an
object, because it'll get filled in automatically when you save().
"""
a = Article(headline='Article 5', pub_date=datetime(2005, 7, 31))
a.save()
self.assertEqual(a.headline, 'Article 5')
self.assertIsNotNone(a.id)
def test_leaving_off_a_field_with_default_set_the_default_will_be_saved(self):
a = Article(pub_date=datetime(2005, 7, 31))
a.save()
self.assertEqual(a.headline, 'Default headline')
def test_for_datetimefields_saves_as_much_precision_as_was_given(self):
"""as much precision in *seconds*"""
a1 = Article(
headline='Article 7',
pub_date=datetime(2005, 7, 31, 12, 30),
)
a1.save()
self.assertEqual(Article.objects.get(id__exact=a1.id).pub_date, datetime(2005, 7, 31, 12, 30))
a2 = Article(
headline='Article 8',
pub_date=datetime(2005, 7, 31, 12, 30, 45),
)
a2.save()
self.assertEqual(Article.objects.get(id__exact=a2.id).pub_date, datetime(2005, 7, 31, 12, 30, 45))
def test_saving_an_object_again_does_not_create_a_new_object(self):
a = Article(headline='original', pub_date=datetime(2014, 5, 16))
a.save()
current_id = a.id
a.save()
self.assertEqual(a.id, current_id)
a.headline = 'Updated headline'
a.save()
self.assertEqual(a.id, current_id)
def test_querysets_checking_for_membership(self):
headlines = [
'Parrot programs in Python', 'Second article', 'Third article']
some_pub_date = datetime(2014, 5, 16, 12, 1)
for headline in headlines:
Article(headline=headline, pub_date=some_pub_date).save()
a = Article(headline='Some headline', pub_date=some_pub_date)
a.save()
# You can use 'in' to test for membership...
self.assertIn(a, Article.objects.all())
# ... but there will often be more efficient ways if that is all you need:
self.assertTrue(Article.objects.filter(id=a.id).exists())
def test_save_primary_with_default(self):
# An UPDATE attempt is skipped when a primary key has default.
with self.assertNumQueries(1):
PrimaryKeyWithDefault().save()
class ModelTest(TestCase):
def test_objects_attribute_is_only_available_on_the_class_itself(self):
with self.assertRaisesMessage(AttributeError, "Manager isn't accessible via Article instances"):
getattr(Article(), "objects",)
self.assertFalse(hasattr(Article(), 'objects'))
self.assertTrue(hasattr(Article, 'objects'))
def test_queryset_delete_removes_all_items_in_that_queryset(self):
headlines = [
'An article', 'Article One', 'Amazing article', 'Boring article']
some_pub_date = datetime(2014, 5, 16, 12, 1)
for headline in headlines:
Article(headline=headline, pub_date=some_pub_date).save()
self.assertQuerysetEqual(
Article.objects.all().order_by('headline'),
["<Article: Amazing article>",
"<Article: An article>",
"<Article: Article One>",
"<Article: Boring article>"]
)
Article.objects.filter(headline__startswith='A').delete()
self.assertQuerysetEqual(Article.objects.all().order_by('headline'), ["<Article: Boring article>"])
def test_not_equal_and_equal_operators_behave_as_expected_on_instances(self):
some_pub_date = datetime(2014, 5, 16, 12, 1)
a1 = Article.objects.create(headline='First', pub_date=some_pub_date)
a2 = Article.objects.create(headline='Second', pub_date=some_pub_date)
self.assertNotEqual(a1, a2)
self.assertEqual(a1, Article.objects.get(id__exact=a1.id))
self.assertNotEqual(Article.objects.get(id__exact=a1.id), Article.objects.get(id__exact=a2.id))
def test_microsecond_precision(self):
a9 = Article(
headline='Article 9',
pub_date=datetime(2005, 7, 31, 12, 30, 45, 180),
)
a9.save()
self.assertEqual(Article.objects.get(pk=a9.pk).pub_date, datetime(2005, 7, 31, 12, 30, 45, 180))
def test_manually_specify_primary_key(self):
# You can manually specify the primary key when creating a new object.
a101 = Article(
id=101,
headline='Article 101',
pub_date=datetime(2005, 7, 31, 12, 30, 45),
)
a101.save()
a101 = Article.objects.get(pk=101)
self.assertEqual(a101.headline, 'Article 101')
def test_create_method(self):
# You can create saved objects in a single step
a10 = Article.objects.create(
headline="Article 10",
pub_date=datetime(2005, 7, 31, 12, 30, 45),
)
self.assertEqual(Article.objects.get(headline="Article 10"), a10)
def test_year_lookup_edge_case(self):
# Edge-case test: A year lookup should retrieve all objects in
# the given year, including Jan. 1 and Dec. 31.
Article.objects.create(
headline='Article 11',
pub_date=datetime(2008, 1, 1),
)
Article.objects.create(
headline='Article 12',
pub_date=datetime(2008, 12, 31, 23, 59, 59, 999999),
)
self.assertQuerysetEqual(
Article.objects.filter(pub_date__year=2008),
["<Article: Article 11>", "<Article: Article 12>"]
)
def test_unicode_data(self):
# Unicode data works, too.
a = Article(
headline='\u6797\u539f \u3081\u3050\u307f',
pub_date=datetime(2005, 7, 28),
)
a.save()
self.assertEqual(Article.objects.get(pk=a.id).headline, '\u6797\u539f \u3081\u3050\u307f')
def test_hash_function(self):
# Model instances have a hash function, so they can be used in sets
# or as dictionary keys. Two models compare as equal if their primary
# keys are equal.
a10 = Article.objects.create(
headline="Article 10",
pub_date=datetime(2005, 7, 31, 12, 30, 45),
)
a11 = Article.objects.create(
headline='Article 11',
pub_date=datetime(2008, 1, 1),
)
a12 = Article.objects.create(
headline='Article 12',
pub_date=datetime(2008, 12, 31, 23, 59, 59, 999999),
)
s = {a10, a11, a12}
self.assertIn(Article.objects.get(headline='Article 11'), s)
def test_extra_method_select_argument_with_dashes_and_values(self):
# The 'select' argument to extra() supports names with dashes in
# them, as long as you use values().
Article.objects.bulk_create([
Article(headline='Article 10', pub_date=datetime(2005, 7, 31, 12, 30, 45)),
Article(headline='Article 11', pub_date=datetime(2008, 1, 1)),
Article(headline='Article 12', pub_date=datetime(2008, 12, 31, 23, 59, 59, 999999)),
])
dicts = Article.objects.filter(
pub_date__year=2008).extra(
select={'dashed-value': '1'}).values('headline', 'dashed-value')
self.assertEqual(
[sorted(d.items()) for d in dicts],
[[('dashed-value', 1), ('headline', 'Article 11')], [('dashed-value', 1), ('headline', 'Article 12')]]
)
def test_extra_method_select_argument_with_dashes(self):
# If you use 'select' with extra() and names containing dashes on a
# query that's *not* a values() query, those extra 'select' values
# will silently be ignored.
Article.objects.bulk_create([
Article(headline='Article 10', pub_date=datetime(2005, 7, 31, 12, 30, 45)),
Article(headline='Article 11', pub_date=datetime(2008, 1, 1)),
Article(headline='Article 12', pub_date=datetime(2008, 12, 31, 23, 59, 59, 999999)),
])
articles = Article.objects.filter(
pub_date__year=2008).extra(select={'dashed-value': '1', 'undashedvalue': '2'})
self.assertEqual(articles[0].undashedvalue, 2)
def test_create_relation_with_gettext_lazy(self):
"""
gettext_lazy objects work when saving model instances
through various methods. Refs #10498.
"""
notlazy = 'test'
lazy = gettext_lazy(notlazy)
Article.objects.create(headline=lazy, pub_date=datetime.now())
article = Article.objects.get()
self.assertEqual(article.headline, notlazy)
# test that assign + save works with Promise objects
article.headline = lazy
article.save()
self.assertEqual(article.headline, notlazy)
# test .update()
Article.objects.update(headline=lazy)
article = Article.objects.get()
self.assertEqual(article.headline, notlazy)
# still test bulk_create()
Article.objects.all().delete()
Article.objects.bulk_create([Article(headline=lazy, pub_date=datetime.now())])
article = Article.objects.get()
self.assertEqual(article.headline, notlazy)
def test_emptyqs(self):
msg = "EmptyQuerySet can't be instantiated"
with self.assertRaisesMessage(TypeError, msg):
EmptyQuerySet()
self.assertIsInstance(Article.objects.none(), EmptyQuerySet)
self.assertNotIsInstance('', EmptyQuerySet)
def test_emptyqs_values(self):
# test for #15959
Article.objects.create(headline='foo', pub_date=datetime.now())
with self.assertNumQueries(0):
qs = Article.objects.none().values_list('pk')
self.assertIsInstance(qs, EmptyQuerySet)
self.assertEqual(len(qs), 0)
def test_emptyqs_customqs(self):
# A hacky test for custom QuerySet subclass - refs #17271
Article.objects.create(headline='foo', pub_date=datetime.now())
class CustomQuerySet(models.QuerySet):
def do_something(self):
return 'did something'
qs = Article.objects.all()
qs.__class__ = CustomQuerySet
qs = qs.none()
with self.assertNumQueries(0):
self.assertEqual(len(qs), 0)
self.assertIsInstance(qs, EmptyQuerySet)
self.assertEqual(qs.do_something(), 'did something')
def test_emptyqs_values_order(self):
# Tests for ticket #17712
Article.objects.create(headline='foo', pub_date=datetime.now())
with self.assertNumQueries(0):
self.assertEqual(len(Article.objects.none().values_list('id').order_by('id')), 0)
with self.assertNumQueries(0):
self.assertEqual(len(Article.objects.none().filter(
id__in=Article.objects.values_list('id', flat=True))), 0)
@skipUnlessDBFeature('can_distinct_on_fields')
def test_emptyqs_distinct(self):
# Tests for #19426
Article.objects.create(headline='foo', pub_date=datetime.now())
with self.assertNumQueries(0):
self.assertEqual(len(Article.objects.none().distinct('headline', 'pub_date')), 0)
def test_ticket_20278(self):
sr = SelfRef.objects.create()
with self.assertRaises(ObjectDoesNotExist):
SelfRef.objects.get(selfref=sr)
def test_eq(self):
self.assertEqual(Article(id=1), Article(id=1))
self.assertNotEqual(Article(id=1), object())
self.assertNotEqual(object(), Article(id=1))
a = Article()
self.assertEqual(a, a)
self.assertEqual(a, mock.ANY)
self.assertNotEqual(Article(), a)
def test_hash(self):
# Value based on PK
self.assertEqual(hash(Article(id=1)), hash(1))
msg = 'Model instances without primary key value are unhashable'
with self.assertRaisesMessage(TypeError, msg):
# No PK value -> unhashable (because save() would then change
# hash)
hash(Article())
def test_missing_hash_not_inherited(self):
class NoHash(models.Model):
def __eq__(self, other):
return super.__eq__(other)
with self.assertRaisesMessage(TypeError, "unhashable type: 'NoHash'"):
hash(NoHash(id=1))
def test_specified_parent_hash_inherited(self):
class ParentHash(models.Model):
def __eq__(self, other):
return super.__eq__(other)
__hash__ = models.Model.__hash__
self.assertEqual(hash(ParentHash(id=1)), 1)
def test_delete_and_access_field(self):
# Accessing a field after it's deleted from a model reloads its value.
pub_date = datetime.now()
article = Article.objects.create(headline='foo', pub_date=pub_date)
new_pub_date = article.pub_date + timedelta(days=10)
article.headline = 'bar'
article.pub_date = new_pub_date
del article.headline
with self.assertNumQueries(1):
self.assertEqual(article.headline, 'foo')
# Fields that weren't deleted aren't reloaded.
self.assertEqual(article.pub_date, new_pub_date)
def test_multiple_objects_max_num_fetched(self):
max_results = MAX_GET_RESULTS - 1
Article.objects.bulk_create(
Article(headline='Area %s' % i, pub_date=datetime(2005, 7, 28))
for i in range(max_results)
)
self.assertRaisesMessage(
MultipleObjectsReturned,
'get() returned more than one Article -- it returned %d!' % max_results,
Article.objects.get,
headline__startswith='Area',
)
Article.objects.create(headline='Area %s' % max_results, pub_date=datetime(2005, 7, 28))
self.assertRaisesMessage(
MultipleObjectsReturned,
'get() returned more than one Article -- it returned more than %d!' % max_results,
Article.objects.get,
headline__startswith='Area',
)
class ModelLookupTest(TestCase):
@classmethod
def setUpTestData(cls):
# Create an Article.
cls.a = Article(
id=None,
headline='Swallow programs in Python',
pub_date=datetime(2005, 7, 28),
)
# Save it into the database. You have to call save() explicitly.
cls.a.save()
def test_all_lookup(self):
# Change values by changing the attributes, then calling save().
self.a.headline = 'Parrot programs in Python'
self.a.save()
# Article.objects.all() returns all the articles in the database.
self.assertQuerysetEqual(Article.objects.all(), ['<Article: Parrot programs in Python>'])
def test_rich_lookup(self):
# Django provides a rich database lookup API.
self.assertEqual(Article.objects.get(id__exact=self.a.id), self.a)
self.assertEqual(Article.objects.get(headline__startswith='Swallow'), self.a)
self.assertEqual(Article.objects.get(pub_date__year=2005), self.a)
self.assertEqual(Article.objects.get(pub_date__year=2005, pub_date__month=7), self.a)
self.assertEqual(Article.objects.get(pub_date__year=2005, pub_date__month=7, pub_date__day=28), self.a)
self.assertEqual(Article.objects.get(pub_date__week_day=5), self.a)
def test_equal_lookup(self):
# The "__exact" lookup type can be omitted, as a shortcut.
self.assertEqual(Article.objects.get(id=self.a.id), self.a)
self.assertEqual(Article.objects.get(headline='Swallow programs in Python'), self.a)
self.assertQuerysetEqual(
Article.objects.filter(pub_date__year=2005),
['<Article: Swallow programs in Python>'],
)
self.assertQuerysetEqual(
Article.objects.filter(pub_date__year=2004),
[],
)
self.assertQuerysetEqual(
Article.objects.filter(pub_date__year=2005, pub_date__month=7),
['<Article: Swallow programs in Python>'],
)
self.assertQuerysetEqual(
Article.objects.filter(pub_date__week_day=5),
['<Article: Swallow programs in Python>'],
)
self.assertQuerysetEqual(
Article.objects.filter(pub_date__week_day=6),
[],
)
def test_does_not_exist(self):
# Django raises an Article.DoesNotExist exception for get() if the
# parameters don't match any object.
with self.assertRaisesMessage(ObjectDoesNotExist, "Article matching query does not exist."):
Article.objects.get(id__exact=2000,)
# To avoid dict-ordering related errors check only one lookup
# in single assert.
with self.assertRaises(ObjectDoesNotExist):
Article.objects.get(pub_date__year=2005, pub_date__month=8)
with self.assertRaisesMessage(ObjectDoesNotExist, "Article matching query does not exist."):
Article.objects.get(pub_date__week_day=6,)
def test_lookup_by_primary_key(self):
# Lookup by a primary key is the most common case, so Django
# provides a shortcut for primary-key exact lookups.
# The following is identical to articles.get(id=a.id).
self.assertEqual(Article.objects.get(pk=self.a.id), self.a)
# pk can be used as a shortcut for the primary key name in any query.
self.assertQuerysetEqual(Article.objects.filter(pk__in=[self.a.id]), ["<Article: Swallow programs in Python>"])
# Model instances of the same type and same ID are considered equal.
a = Article.objects.get(pk=self.a.id)
b = Article.objects.get(pk=self.a.id)
self.assertEqual(a, b)
def test_too_many(self):
# Create a very similar object
a = Article(
id=None,
headline='Swallow bites Python',
pub_date=datetime(2005, 7, 28),
)
a.save()
self.assertEqual(Article.objects.count(), 2)
# Django raises an Article.MultipleObjectsReturned exception if the
# lookup matches more than one object
msg = "get() returned more than one Article -- it returned 2!"
with self.assertRaisesMessage(MultipleObjectsReturned, msg):
Article.objects.get(headline__startswith='Swallow',)
with self.assertRaisesMessage(MultipleObjectsReturned, msg):
Article.objects.get(pub_date__year=2005,)
with self.assertRaisesMessage(MultipleObjectsReturned, msg):
Article.objects.get(pub_date__year=2005, pub_date__month=7)
class ConcurrentSaveTests(TransactionTestCase):
available_apps = ['basic']
@skipUnlessDBFeature('test_db_allows_multiple_connections')
def test_concurrent_delete_with_save(self):
"""
Test fetching, deleting and finally saving an object - we should get
an insert in this case.
"""
a = Article.objects.create(headline='foo', pub_date=datetime.now())
exceptions = []
def deleter():
try:
# Do not delete a directly - doing so alters its state.
Article.objects.filter(pk=a.pk).delete()
except Exception as e:
exceptions.append(e)
finally:
connections[DEFAULT_DB_ALIAS].close()
self.assertEqual(len(exceptions), 0)
t = threading.Thread(target=deleter)
t.start()
t.join()
a.save()
self.assertEqual(Article.objects.get(pk=a.pk).headline, 'foo')
class ManagerTest(SimpleTestCase):
QUERYSET_PROXY_METHODS = [
'none',
'count',
'dates',
'datetimes',
'distinct',
'extra',
'get',
'get_or_create',
'update_or_create',
'create',
'bulk_create',
'bulk_update',
'filter',
'aggregate',
'annotate',
'complex_filter',
'exclude',
'in_bulk',
'iterator',
'earliest',
'latest',
'first',
'last',
'order_by',
'select_for_update',
'select_related',
'prefetch_related',
'values',
'values_list',
'update',
'reverse',
'defer',
'only',
'using',
'exists',
'explain',
'_insert',
'_update',
'raw',
'union',
'intersection',
'difference',
]
def test_manager_methods(self):
"""
This test ensures that the correct set of methods from `QuerySet`
are copied onto `Manager`.
It's particularly useful to prevent accidentally leaking new methods
into `Manager`. New `QuerySet` methods that should also be copied onto
`Manager` will need to be added to `ManagerTest.QUERYSET_PROXY_METHODS`.
"""
self.assertEqual(
sorted(BaseManager._get_queryset_methods(models.QuerySet)),
sorted(self.QUERYSET_PROXY_METHODS),
)
class SelectOnSaveTests(TestCase):
def test_select_on_save(self):
a1 = Article.objects.create(pub_date=datetime.now())
with self.assertNumQueries(1):
a1.save()
asos = ArticleSelectOnSave.objects.create(pub_date=datetime.now())
with self.assertNumQueries(2):
asos.save()
with self.assertNumQueries(1):
asos.save(force_update=True)
Article.objects.all().delete()
with self.assertRaisesMessage(DatabaseError, 'Forced update did not affect any rows.'):
with self.assertNumQueries(1):
asos.save(force_update=True)
def test_select_on_save_lying_update(self):
"""
select_on_save works correctly if the database doesn't return correct
information about matched rows from UPDATE.
"""
# Change the manager to not return "row matched" for update().
# We are going to change the Article's _base_manager class
# dynamically. This is a bit of a hack, but it seems hard to
# test this properly otherwise. Article's manager, because
# proxy models use their parent model's _base_manager.
orig_class = Article._base_manager._queryset_class
class FakeQuerySet(models.QuerySet):
# Make sure the _update method below is in fact called.
called = False
def _update(self, *args, **kwargs):
FakeQuerySet.called = True
super()._update(*args, **kwargs)
return 0
try:
Article._base_manager._queryset_class = FakeQuerySet
asos = ArticleSelectOnSave.objects.create(pub_date=datetime.now())
with self.assertNumQueries(3):
asos.save()
self.assertTrue(FakeQuerySet.called)
# This is not wanted behavior, but this is how Django has always
# behaved for databases that do not return correct information
# about matched rows for UPDATE.
with self.assertRaisesMessage(DatabaseError, 'Forced update did not affect any rows.'):
asos.save(force_update=True)
msg = (
"An error occurred in the current transaction. You can't "
"execute queries until the end of the 'atomic' block."
)
with self.assertRaisesMessage(DatabaseError, msg):
asos.save(update_fields=['pub_date'])
finally:
Article._base_manager._queryset_class = orig_class
class ModelRefreshTests(TestCase):
def test_refresh(self):
a = Article.objects.create(pub_date=datetime.now())
Article.objects.create(pub_date=datetime.now())
Article.objects.filter(pk=a.pk).update(headline='new headline')
with self.assertNumQueries(1):
a.refresh_from_db()
self.assertEqual(a.headline, 'new headline')
orig_pub_date = a.pub_date
new_pub_date = a.pub_date + timedelta(10)
Article.objects.update(headline='new headline 2', pub_date=new_pub_date)
with self.assertNumQueries(1):
a.refresh_from_db(fields=['headline'])
self.assertEqual(a.headline, 'new headline 2')
self.assertEqual(a.pub_date, orig_pub_date)
with self.assertNumQueries(1):
a.refresh_from_db()
self.assertEqual(a.pub_date, new_pub_date)
def test_unknown_kwarg(self):
s = SelfRef.objects.create()
msg = "refresh_from_db() got an unexpected keyword argument 'unknown_kwarg'"
with self.assertRaisesMessage(TypeError, msg):
s.refresh_from_db(unknown_kwarg=10)
def test_lookup_in_fields(self):
s = SelfRef.objects.create()
msg = 'Found "__" in fields argument. Relations and transforms are not allowed in fields.'
with self.assertRaisesMessage(ValueError, msg):
s.refresh_from_db(fields=['foo__bar'])
def test_refresh_fk(self):
s1 = SelfRef.objects.create()
s2 = SelfRef.objects.create()
s3 = SelfRef.objects.create(selfref=s1)
s3_copy = SelfRef.objects.get(pk=s3.pk)
s3_copy.selfref.touched = True
s3.selfref = s2
s3.save()
with self.assertNumQueries(1):
s3_copy.refresh_from_db()
with self.assertNumQueries(1):
# The old related instance was thrown away (the selfref_id has
# changed). It needs to be reloaded on access, so one query
# executed.
self.assertFalse(hasattr(s3_copy.selfref, 'touched'))
self.assertEqual(s3_copy.selfref, s2)
def test_refresh_null_fk(self):
s1 = SelfRef.objects.create()
s2 = SelfRef.objects.create(selfref=s1)
s2.selfref = None
s2.refresh_from_db()
self.assertEqual(s2.selfref, s1)
def test_refresh_unsaved(self):
pub_date = datetime.now()
a = Article.objects.create(pub_date=pub_date)
a2 = Article(id=a.pk)
with self.assertNumQueries(1):
a2.refresh_from_db()
self.assertEqual(a2.pub_date, pub_date)
self.assertEqual(a2._state.db, "default")
def test_refresh_fk_on_delete_set_null(self):
a = Article.objects.create(
headline='Parrot programs in Python',
pub_date=datetime(2005, 7, 28),
)
s1 = SelfRef.objects.create(article=a)
a.delete()
s1.refresh_from_db()
self.assertIsNone(s1.article_id)
self.assertIsNone(s1.article)
def test_refresh_no_fields(self):
a = Article.objects.create(pub_date=datetime.now())
with self.assertNumQueries(0):
a.refresh_from_db(fields=[])
def test_refresh_clears_reverse_related(self):
"""refresh_from_db() clear cached reverse relations."""
article = Article.objects.create(
headline='Parrot programs in Python',
pub_date=datetime(2005, 7, 28),
)
self.assertFalse(hasattr(article, 'featured'))
FeaturedArticle.objects.create(article_id=article.pk)
article.refresh_from_db()
self.assertTrue(hasattr(article, 'featured'))
def test_refresh_clears_one_to_one_field(self):
article = Article.objects.create(
headline='Parrot programs in Python',
pub_date=datetime(2005, 7, 28),
)
featured = FeaturedArticle.objects.create(article_id=article.pk)
self.assertEqual(featured.article.headline, 'Parrot programs in Python')
article.headline = 'Parrot programs in Python 2.0'
article.save()
featured.refresh_from_db()
self.assertEqual(featured.article.headline, 'Parrot programs in Python 2.0')
def test_prefetched_cache_cleared(self):
a = Article.objects.create(pub_date=datetime(2005, 7, 28))
s = SelfRef.objects.create(article=a)
# refresh_from_db() without fields=[...]
a1_prefetched = Article.objects.prefetch_related('selfref_set').first()
self.assertCountEqual(a1_prefetched.selfref_set.all(), [s])
s.article = None
s.save()
# Relation is cleared and prefetch cache is stale.
self.assertCountEqual(a1_prefetched.selfref_set.all(), [s])
a1_prefetched.refresh_from_db()
# Cache was cleared and new results are available.
self.assertCountEqual(a1_prefetched.selfref_set.all(), [])
# refresh_from_db() with fields=[...]
a2_prefetched = Article.objects.prefetch_related('selfref_set').first()
self.assertCountEqual(a2_prefetched.selfref_set.all(), [])
s.article = a
s.save()
# Relation is added and prefetch cache is stale.
self.assertCountEqual(a2_prefetched.selfref_set.all(), [])
a2_prefetched.refresh_from_db(fields=['selfref_set'])
# Cache was cleared and new results are available.
self.assertCountEqual(a2_prefetched.selfref_set.all(), [s])
|
test_socket_manager.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import time
import uuid
from unittest import mock
from parlai.mturk.core.socket_manager import Packet, SocketManager
from parlai.mturk.core.shared_utils import AssignState
import parlai.utils.testing as testing_utils
import parlai.mturk.core.data_model as data_model
import parlai.mturk.core.shared_utils as shared_utils
import threading
from websocket_server import WebsocketServer
import json
TEST_WORKER_ID_1 = 'TEST_WORKER_ID_1'
TEST_ASSIGNMENT_ID_1 = 'TEST_ASSIGNMENT_ID_1'
TEST_HIT_ID_1 = 'TEST_HIT_ID_1'
TEST_WORKER_ID_2 = 'TEST_WORKER_ID_2'
TEST_ASSIGNMENT_ID_2 = 'TEST_ASSIGNMENT_ID_2'
TEST_HIT_ID_2 = 'TEST_HIT_ID_2'
TEST_CONV_ID_1 = 'TEST_CONV_ID_1'
FAKE_ID = 'BOGUS'
MESSAGE_ID_1 = 'MESSAGE_ID_1'
MESSAGE_ID_2 = 'MESSAGE_ID_2'
MESSAGE_ID_3 = 'MESSAGE_ID_3'
MESSAGE_ID_4 = 'MESSAGE_ID_4'
COMMAND_ID_1 = 'COMMAND_ID_1'
MESSAGE_TYPE = data_model.MESSAGE_TYPE_MESSAGE
COMMAND_TYPE = data_model.MESSAGE_TYPE_COMMAND
MESSAGE_1 = {'message_id': MESSAGE_ID_1, 'type': MESSAGE_TYPE}
MESSAGE_2 = {'message_id': MESSAGE_ID_2, 'type': MESSAGE_TYPE}
COMMAND_1 = {'message_id': COMMAND_ID_1, 'type': COMMAND_TYPE}
AGENT_ID = 'AGENT_ID'
ACT_1 = {'text': 'THIS IS A MESSAGE', 'id': AGENT_ID}
ACT_2 = {'text': 'THIS IS A MESSAGE AGAIN', 'id': AGENT_ID}
active_statuses = [
AssignState.STATUS_NONE,
AssignState.STATUS_ONBOARDING,
AssignState.STATUS_WAITING,
AssignState.STATUS_IN_TASK,
]
complete_statuses = [
AssignState.STATUS_DONE,
AssignState.STATUS_DISCONNECT,
AssignState.STATUS_PARTNER_DISCONNECT,
AssignState.STATUS_PARTNER_DISCONNECT_EARLY,
AssignState.STATUS_EXPIRED,
AssignState.STATUS_RETURNED,
]
statuses = active_statuses + complete_statuses
TASK_GROUP_ID_1 = 'TASK_GROUP_ID_1'
SocketManager.DEF_MISSED_PONGS = 3
SocketManager.HEARTBEAT_RATE = 0.6
SocketManager.DEF_DEAD_TIME = 0.6
SocketManager.ACK_TIME = {Packet.TYPE_ALIVE: 0.4, Packet.TYPE_MESSAGE: 0.2}
shared_utils.THREAD_SHORT_SLEEP = 0.05
shared_utils.THREAD_MEDIUM_SLEEP = 0.15
TIMEOUT_VERIFICATION = 8.5
class TestPacket(unittest.TestCase):
"""
Various unit tests for the AssignState class.
"""
ID = 'ID'
SENDER_ID = 'SENDER_ID'
RECEIVER_ID = 'RECEIVER_ID'
ASSIGNMENT_ID = 'ASSIGNMENT_ID'
DATA = 'DATA'
CONVERSATION_ID = 'CONVERSATION_ID'
REQUIRES_ACK = True
BLOCKING = False
ACK_FUNCTION = 'ACK_FUNCTION'
def setUp(self):
self.packet_1 = Packet(
self.ID,
Packet.TYPE_MESSAGE,
self.SENDER_ID,
self.RECEIVER_ID,
self.ASSIGNMENT_ID,
self.DATA,
conversation_id=self.CONVERSATION_ID,
requires_ack=self.REQUIRES_ACK,
blocking=self.BLOCKING,
ack_func=self.ACK_FUNCTION,
)
self.packet_2 = Packet(
self.ID,
Packet.TYPE_HEARTBEAT,
self.SENDER_ID,
self.RECEIVER_ID,
self.ASSIGNMENT_ID,
self.DATA,
)
self.packet_3 = Packet(
self.ID,
Packet.TYPE_ALIVE,
self.SENDER_ID,
self.RECEIVER_ID,
self.ASSIGNMENT_ID,
self.DATA,
)
def tearDown(self):
pass
def test_packet_init(self):
"""
Test proper initialization of packet fields.
"""
self.assertEqual(self.packet_1.id, self.ID)
self.assertEqual(self.packet_1.type, Packet.TYPE_MESSAGE)
self.assertEqual(self.packet_1.sender_id, self.SENDER_ID)
self.assertEqual(self.packet_1.receiver_id, self.RECEIVER_ID)
self.assertEqual(self.packet_1.assignment_id, self.ASSIGNMENT_ID)
self.assertEqual(self.packet_1.data, self.DATA)
self.assertEqual(self.packet_1.conversation_id, self.CONVERSATION_ID)
self.assertEqual(self.packet_1.requires_ack, self.REQUIRES_ACK)
self.assertEqual(self.packet_1.blocking, self.BLOCKING)
self.assertEqual(self.packet_1.ack_func, self.ACK_FUNCTION)
self.assertEqual(self.packet_1.status, Packet.STATUS_INIT)
self.assertEqual(self.packet_2.id, self.ID)
self.assertEqual(self.packet_2.type, Packet.TYPE_HEARTBEAT)
self.assertEqual(self.packet_2.sender_id, self.SENDER_ID)
self.assertEqual(self.packet_2.receiver_id, self.RECEIVER_ID)
self.assertEqual(self.packet_2.assignment_id, self.ASSIGNMENT_ID)
self.assertEqual(self.packet_2.data, self.DATA)
self.assertIsNone(self.packet_2.conversation_id)
self.assertFalse(self.packet_2.requires_ack)
self.assertFalse(self.packet_2.blocking)
self.assertIsNone(self.packet_2.ack_func)
self.assertEqual(self.packet_2.status, Packet.STATUS_INIT)
self.assertEqual(self.packet_3.id, self.ID)
self.assertEqual(self.packet_3.type, Packet.TYPE_ALIVE)
self.assertEqual(self.packet_3.sender_id, self.SENDER_ID)
self.assertEqual(self.packet_3.receiver_id, self.RECEIVER_ID)
self.assertEqual(self.packet_3.assignment_id, self.ASSIGNMENT_ID)
self.assertEqual(self.packet_3.data, self.DATA)
self.assertIsNone(self.packet_3.conversation_id)
self.assertTrue(self.packet_3.requires_ack)
self.assertTrue(self.packet_3.blocking)
self.assertIsNone(self.packet_3.ack_func)
self.assertEqual(self.packet_3.status, Packet.STATUS_INIT)
def test_dict_conversion(self):
"""
Ensure packets can be converted to and from a representative dict.
"""
converted_packet = Packet.from_dict(self.packet_1.as_dict())
self.assertEqual(self.packet_1.id, converted_packet.id)
self.assertEqual(self.packet_1.type, converted_packet.type)
self.assertEqual(self.packet_1.sender_id, converted_packet.sender_id)
self.assertEqual(self.packet_1.receiver_id, converted_packet.receiver_id)
self.assertEqual(self.packet_1.assignment_id, converted_packet.assignment_id)
self.assertEqual(self.packet_1.data, converted_packet.data)
self.assertEqual(
self.packet_1.conversation_id, converted_packet.conversation_id
)
packet_dict = self.packet_1.as_dict()
self.assertDictEqual(packet_dict, Packet.from_dict(packet_dict).as_dict())
def test_connection_ids(self):
"""
Ensure that connection ids are reported as we expect them.
"""
sender_conn_id = '{}_{}'.format(self.SENDER_ID, self.ASSIGNMENT_ID)
receiver_conn_id = '{}_{}'.format(self.RECEIVER_ID, self.ASSIGNMENT_ID)
self.assertEqual(self.packet_1.get_sender_connection_id(), sender_conn_id)
self.assertEqual(self.packet_1.get_receiver_connection_id(), receiver_conn_id)
def test_packet_conversions(self):
"""
Ensure that packet copies and acts are produced properly.
"""
# Copy important packet
message_packet_copy = self.packet_1.new_copy()
self.assertNotEqual(message_packet_copy.id, self.ID)
self.assertNotEqual(message_packet_copy, self.packet_1)
self.assertEqual(message_packet_copy.type, self.packet_1.type)
self.assertEqual(message_packet_copy.sender_id, self.packet_1.sender_id)
self.assertEqual(message_packet_copy.receiver_id, self.packet_1.receiver_id)
self.assertEqual(message_packet_copy.assignment_id, self.packet_1.assignment_id)
self.assertEqual(message_packet_copy.data, self.packet_1.data)
self.assertEqual(
message_packet_copy.conversation_id, self.packet_1.conversation_id
)
self.assertEqual(message_packet_copy.requires_ack, self.packet_1.requires_ack)
self.assertEqual(message_packet_copy.blocking, self.packet_1.blocking)
self.assertIsNone(message_packet_copy.ack_func)
self.assertEqual(message_packet_copy.status, Packet.STATUS_INIT)
# Copy non-important packet
hb_packet_copy = self.packet_2.new_copy()
self.assertNotEqual(hb_packet_copy.id, self.ID)
self.assertNotEqual(hb_packet_copy, self.packet_2)
self.assertEqual(hb_packet_copy.type, self.packet_2.type)
self.assertEqual(hb_packet_copy.sender_id, self.packet_2.sender_id)
self.assertEqual(hb_packet_copy.receiver_id, self.packet_2.receiver_id)
self.assertEqual(hb_packet_copy.assignment_id, self.packet_2.assignment_id)
self.assertEqual(hb_packet_copy.data, self.packet_2.data)
self.assertEqual(hb_packet_copy.conversation_id, self.packet_2.conversation_id)
self.assertEqual(hb_packet_copy.requires_ack, self.packet_2.requires_ack)
self.assertEqual(hb_packet_copy.blocking, self.packet_2.blocking)
self.assertIsNone(hb_packet_copy.ack_func)
self.assertEqual(hb_packet_copy.status, Packet.STATUS_INIT)
# ack important packet
ack_packet = self.packet_1.get_ack()
self.assertEqual(ack_packet.id, self.ID)
self.assertEqual(ack_packet.type, Packet.TYPE_ACK)
self.assertEqual(ack_packet.sender_id, self.RECEIVER_ID)
self.assertEqual(ack_packet.receiver_id, self.SENDER_ID)
self.assertEqual(ack_packet.assignment_id, self.ASSIGNMENT_ID)
self.assertEqual(ack_packet.data, '')
self.assertEqual(ack_packet.conversation_id, self.CONVERSATION_ID)
self.assertFalse(ack_packet.requires_ack)
self.assertFalse(ack_packet.blocking)
self.assertIsNone(ack_packet.ack_func)
self.assertEqual(ack_packet.status, Packet.STATUS_INIT)
def test_packet_modifications(self):
"""
Ensure that packet copies and acts are produced properly.
"""
# All operations return the packet
self.assertEqual(self.packet_1.swap_sender(), self.packet_1)
self.assertEqual(self.packet_1.set_type(Packet.TYPE_ACK), self.packet_1)
self.assertEqual(self.packet_1.set_data(None), self.packet_1)
# Ensure all of the operations worked
self.assertEqual(self.packet_1.sender_id, self.RECEIVER_ID)
self.assertEqual(self.packet_1.receiver_id, self.SENDER_ID)
self.assertEqual(self.packet_1.type, Packet.TYPE_ACK)
self.assertIsNone(self.packet_1.data)
class MockSocket:
def __init__(self):
self.last_messages = {}
self.connected = False
self.disconnected = False
self.closed = False
self.ws = None
self.should_heartbeat = True
self.fake_workers = []
self.port = None
self.launch_socket()
self.handlers = {}
while self.ws is None:
time.sleep(0.05)
time.sleep(1)
def send(self, packet):
self.ws.send_message_to_all(packet)
def close(self):
if not self.closed:
self.ws.server_close()
self.ws.shutdown()
self.closed = True
def do_nothing(self, *args):
pass
def launch_socket(self):
def on_message(client, server, message):
if self.closed:
raise Exception('Socket is already closed...')
if message == '':
return
packet_dict = json.loads(message)
if packet_dict['content']['id'] == 'WORLD_ALIVE':
self.ws.send_message(client, json.dumps({'type': 'conn_success'}))
self.connected = True
elif packet_dict['content']['type'] == 'heartbeat':
pong = packet_dict['content'].copy()
pong['type'] = 'pong'
self.ws.send_message(
client,
json.dumps(
{'type': data_model.SOCKET_ROUTE_PACKET_STRING, 'content': pong}
),
)
if 'receiver_id' in packet_dict['content']:
receiver_id = packet_dict['content']['receiver_id']
use_func = self.handlers.get(receiver_id, self.do_nothing)
use_func(packet_dict['content'])
def on_connect(client, server):
pass
def on_disconnect(client, server):
self.disconnected = True
def run_socket(*args):
port = 3030
while self.port is None:
try:
self.ws = WebsocketServer(port, host='127.0.0.1')
self.port = port
except OSError:
port += 1
self.ws.set_fn_client_left(on_disconnect)
self.ws.set_fn_new_client(on_connect)
self.ws.set_fn_message_received(on_message)
self.ws.run_forever()
self.listen_thread = threading.Thread(
target=run_socket, name='Fake-Socket-Thread'
)
self.listen_thread.daemon = True
self.listen_thread.start()
class MockAgent(object):
"""
Class that pretends to be an MTurk agent interacting through the webpage by
simulating the same commands that are sent from the core.html file.
Exposes methods to use for testing and checking status
"""
def __init__(self, hit_id, assignment_id, worker_id, task_group_id):
self.conversation_id = None
self.id = None
self.assignment_id = assignment_id
self.hit_id = hit_id
self.worker_id = worker_id
self.some_agent_disconnected = False
self.disconnected = False
self.task_group_id = task_group_id
self.ws = None
self.always_beat = True
self.send_acks = True
self.ready = False
self.wants_to_send = False
def send_packet(self, packet):
def callback(*args):
pass
event_name = data_model.SOCKET_ROUTE_PACKET_STRING
self.ws.send(json.dumps({'type': event_name, 'content': packet.as_dict()}))
def register_to_socket(self, ws, on_ack, on_hb, on_msg):
handler = self.make_packet_handler(on_ack, on_hb, on_msg)
self.ws = ws
self.ws.handlers[self.worker_id] = handler
def make_packet_handler(self, on_ack, on_hb, on_msg):
"""
A packet handler that properly sends heartbeats.
"""
def handler_mock(pkt):
if pkt['type'] == Packet.TYPE_ACK:
self.ready = True
packet = Packet.from_dict(pkt)
on_ack(packet)
elif pkt['type'] == Packet.TYPE_HEARTBEAT:
packet = Packet.from_dict(pkt)
on_hb(packet)
if self.always_beat:
self.send_heartbeat()
elif pkt['type'] == Packet.TYPE_MESSAGE:
packet = Packet.from_dict(pkt)
if self.send_acks:
self.send_packet(packet.get_ack())
on_msg(packet)
elif pkt['type'] == Packet.TYPE_ALIVE:
raise Exception('Invalid alive packet {}'.format(pkt))
else:
raise Exception(
'Invalid Packet type {} received in {}'.format(pkt['type'], pkt)
)
return handler_mock
def build_and_send_packet(self, packet_type, data):
msg = {
'id': str(uuid.uuid4()),
'type': packet_type,
'sender_id': self.worker_id,
'assignment_id': self.assignment_id,
'conversation_id': self.conversation_id,
'receiver_id': '[World_' + self.task_group_id + ']',
'data': data,
}
event_name = data_model.SOCKET_ROUTE_PACKET_STRING
if packet_type == Packet.TYPE_ALIVE:
event_name = data_model.SOCKET_AGENT_ALIVE_STRING
self.ws.send(json.dumps({'type': event_name, 'content': msg}))
return msg['id']
def send_message(self, text):
data = {
'text': text,
'id': self.id,
'message_id': str(uuid.uuid4()),
'episode_done': False,
}
self.wants_to_send = False
return self.build_and_send_packet(Packet.TYPE_MESSAGE, data)
def send_alive(self):
data = {
'hit_id': self.hit_id,
'assignment_id': self.assignment_id,
'worker_id': self.worker_id,
'conversation_id': self.conversation_id,
}
return self.build_and_send_packet(Packet.TYPE_ALIVE, data)
def send_heartbeat(self):
"""
Sends a heartbeat to the world.
"""
hb = {
'id': str(uuid.uuid4()),
'receiver_id': '[World_' + self.task_group_id + ']',
'assignment_id': self.assignment_id,
'sender_id': self.worker_id,
'conversation_id': self.conversation_id,
'type': Packet.TYPE_HEARTBEAT,
'data': None,
}
self.ws.send(
json.dumps({'type': data_model.SOCKET_ROUTE_PACKET_STRING, 'content': hb})
)
def wait_for_alive(self):
last_time = time.time()
while not self.ready:
self.send_alive()
time.sleep(0.5)
assert (
time.time() - last_time < 10
), 'Timed out wating for server to acknowledge {} alive'.format(
self.worker_id
)
class TestSocketManagerSetupAndFunctions(unittest.TestCase):
"""
Unit/integration tests for starting up a socket.
"""
def setUp(self):
self.fake_socket = MockSocket()
time.sleep(1)
def tearDown(self):
self.fake_socket.close()
def test_init_and_reg_shutdown(self):
"""
Test initialization of a socket manager.
"""
self.assertFalse(self.fake_socket.connected)
# Callbacks should never trigger during proper setup and shutdown
nop_called = False
def nop(*args):
nonlocal nop_called # noqa 999 we don't support py2
nop_called = True
socket_manager = SocketManager(
'https://127.0.0.1',
self.fake_socket.port,
nop,
nop,
nop,
TASK_GROUP_ID_1,
0.3,
nop,
)
self.assertTrue(self.fake_socket.connected)
self.assertFalse(nop_called)
# Test shutdown
self.assertFalse(self.fake_socket.disconnected)
self.assertFalse(socket_manager.is_shutdown)
self.assertTrue(socket_manager.alive)
socket_manager.shutdown()
self.assertTrue(self.fake_socket.disconnected)
self.assertTrue(socket_manager.is_shutdown)
self.assertFalse(nop_called)
def assertEqualBy(self, val_func, val, max_time):
start_time = time.time()
while val_func() != val:
assert time.time() - start_time < max_time, (
"Value was not attained in specified time, was {} rather "
"than {}".format(val_func(), val)
)
time.sleep(0.1)
@testing_utils.retry()
def test_init_and_socket_shutdown(self):
"""
Test initialization of a socket manager with a failed shutdown.
"""
self.assertFalse(self.fake_socket.connected)
# Callbacks should never trigger during proper setup and shutdown
nop_called = False
def nop(*args):
nonlocal nop_called # noqa 999 we don't support py2
nop_called = True
server_death_called = False
def server_death(*args):
nonlocal server_death_called
server_death_called = True
socket_manager = SocketManager(
'https://127.0.0.1',
self.fake_socket.port,
nop,
nop,
nop,
TASK_GROUP_ID_1,
0.4,
server_death,
)
self.assertTrue(self.fake_socket.connected)
self.assertFalse(nop_called)
self.assertFalse(server_death_called)
# Test shutdown
self.assertFalse(self.fake_socket.disconnected)
self.assertFalse(socket_manager.is_shutdown)
self.assertTrue(socket_manager.alive)
self.fake_socket.close()
self.assertEqualBy(
lambda: socket_manager.alive, False, 8 * socket_manager.HEARTBEAT_RATE
)
self.assertEqualBy(
lambda: server_death_called, True, 4 * socket_manager.HEARTBEAT_RATE
)
self.assertFalse(nop_called)
socket_manager.shutdown()
@testing_utils.retry()
def test_init_and_socket_shutdown_then_restart(self):
"""
Test restoring connection to a socket.
"""
self.assertFalse(self.fake_socket.connected)
# Callbacks should never trigger during proper setup and shutdown
nop_called = False
def nop(*args):
nonlocal nop_called # noqa 999 we don't support py2
nop_called = True
server_death_called = False
def server_death(*args):
nonlocal server_death_called
server_death_called = True
socket_manager = SocketManager(
'https://127.0.0.1',
self.fake_socket.port,
nop,
nop,
nop,
TASK_GROUP_ID_1,
0.4,
server_death,
)
self.assertTrue(self.fake_socket.connected)
self.assertFalse(nop_called)
self.assertFalse(server_death_called)
# Test shutdown
self.assertFalse(self.fake_socket.disconnected)
self.assertFalse(socket_manager.is_shutdown)
self.assertTrue(socket_manager.alive)
self.fake_socket.close()
self.assertEqualBy(
lambda: socket_manager.alive, False, 8 * socket_manager.HEARTBEAT_RATE
)
self.assertFalse(socket_manager.alive)
self.fake_socket = MockSocket()
self.assertEqualBy(
lambda: socket_manager.alive, True, 4 * socket_manager.HEARTBEAT_RATE
)
self.assertFalse(nop_called)
self.assertFalse(server_death_called)
socket_manager.shutdown()
@testing_utils.retry()
def test_init_world_dead(self):
"""
Test initialization of a socket manager with a failed startup.
"""
self.assertFalse(self.fake_socket.connected)
self.fake_socket.close()
# Callbacks should never trigger during proper setup and shutdown
nop_called = False
def nop(*args):
nonlocal nop_called # noqa 999 we don't support py2
nop_called = True
server_death_called = False
def server_death(*args):
nonlocal server_death_called
server_death_called = True
with self.assertRaises(ConnectionRefusedError):
socket_manager = SocketManager(
'https://127.0.0.1',
self.fake_socket.port,
nop,
nop,
nop,
TASK_GROUP_ID_1,
0.4,
server_death,
)
self.assertIsNone(socket_manager)
self.assertFalse(nop_called)
self.assertTrue(server_death_called)
class TestSocketManagerRoutingFunctionality(unittest.TestCase):
ID = 'ID'
SENDER_ID = 'SENDER_ID'
ASSIGNMENT_ID = 'ASSIGNMENT_ID'
DATA = 'DATA'
CONVERSATION_ID = 'CONVERSATION_ID'
REQUIRES_ACK = True
BLOCKING = False
ACK_FUNCTION = 'ACK_FUNCTION'
WORLD_ID = '[World_{}]'.format(TASK_GROUP_ID_1)
def on_alive(self, packet):
self.alive_packet = packet
def on_message(self, packet):
self.message_packet = packet
def on_worker_death(self, worker_id, assignment_id):
self.dead_worker_id = worker_id
self.dead_assignment_id = assignment_id
def on_server_death(self):
self.server_died = True
def setUp(self):
self.AGENT_HEARTBEAT_PACKET = Packet(
self.ID,
Packet.TYPE_HEARTBEAT,
self.SENDER_ID,
self.WORLD_ID,
self.ASSIGNMENT_ID,
self.DATA,
self.CONVERSATION_ID,
)
self.AGENT_ALIVE_PACKET = Packet(
MESSAGE_ID_1,
Packet.TYPE_ALIVE,
self.SENDER_ID,
self.WORLD_ID,
self.ASSIGNMENT_ID,
self.DATA,
self.CONVERSATION_ID,
)
self.MESSAGE_SEND_PACKET_1 = Packet(
MESSAGE_ID_2,
Packet.TYPE_MESSAGE,
self.WORLD_ID,
self.SENDER_ID,
self.ASSIGNMENT_ID,
self.DATA,
self.CONVERSATION_ID,
)
self.MESSAGE_SEND_PACKET_2 = Packet(
MESSAGE_ID_3,
Packet.TYPE_MESSAGE,
self.WORLD_ID,
self.SENDER_ID,
self.ASSIGNMENT_ID,
self.DATA,
self.CONVERSATION_ID,
requires_ack=False,
)
self.MESSAGE_SEND_PACKET_3 = Packet(
MESSAGE_ID_4,
Packet.TYPE_MESSAGE,
self.WORLD_ID,
self.SENDER_ID,
self.ASSIGNMENT_ID,
self.DATA,
self.CONVERSATION_ID,
blocking=False,
)
self.fake_socket = MockSocket()
time.sleep(0.3)
self.alive_packet = None
self.message_packet = None
self.dead_worker_id = None
self.dead_assignment_id = None
self.server_died = False
self.socket_manager = SocketManager(
'https://127.0.0.1',
self.fake_socket.port,
self.on_alive,
self.on_message,
self.on_worker_death,
TASK_GROUP_ID_1,
1,
self.on_server_death,
)
def tearDown(self):
self.socket_manager.shutdown()
self.fake_socket.close()
def test_init_state(self):
"""
Ensure all of the initial state of the socket_manager is ready.
"""
self.assertEqual(self.socket_manager.server_url, 'https://127.0.0.1')
self.assertEqual(self.socket_manager.port, self.fake_socket.port)
self.assertEqual(self.socket_manager.alive_callback, self.on_alive)
self.assertEqual(self.socket_manager.message_callback, self.on_message)
self.assertEqual(self.socket_manager.socket_dead_callback, self.on_worker_death)
self.assertEqual(self.socket_manager.task_group_id, TASK_GROUP_ID_1)
self.assertEqual(
self.socket_manager.missed_pongs, 1 + (1 / SocketManager.HEARTBEAT_RATE)
)
self.assertIsNotNone(self.socket_manager.ws)
self.assertTrue(self.socket_manager.keep_running)
self.assertIsNotNone(self.socket_manager.listen_thread)
self.assertDictEqual(self.socket_manager.queues, {})
self.assertDictEqual(self.socket_manager.threads, {})
self.assertDictEqual(self.socket_manager.run, {})
self.assertDictEqual(self.socket_manager.last_sent_heartbeat_time, {})
self.assertDictEqual(self.socket_manager.last_received_heartbeat, {})
self.assertDictEqual(self.socket_manager.pongs_without_heartbeat, {})
self.assertDictEqual(self.socket_manager.packet_map, {})
self.assertTrue(self.socket_manager.alive)
self.assertFalse(self.socket_manager.is_shutdown)
self.assertEqual(self.socket_manager.get_my_sender_id(), self.WORLD_ID)
def test_needed_heartbeat(self):
"""
Ensure needed heartbeat sends heartbeats at the right time.
"""
self.socket_manager._safe_send = mock.MagicMock()
connection_id = self.AGENT_HEARTBEAT_PACKET.get_sender_connection_id()
# Ensure no failure under uninitialized cases
self.socket_manager._send_needed_heartbeat(connection_id)
self.socket_manager.last_received_heartbeat[connection_id] = None
self.socket_manager._send_needed_heartbeat(connection_id)
self.socket_manager._safe_send.assert_not_called()
# assert not called when called too recently
self.socket_manager.last_received_heartbeat[
connection_id
] = self.AGENT_HEARTBEAT_PACKET
self.socket_manager.last_sent_heartbeat_time[connection_id] = time.time() + 10
self.socket_manager._send_needed_heartbeat(connection_id)
self.socket_manager._safe_send.assert_not_called()
# Assert called when supposed to
self.socket_manager.last_sent_heartbeat_time[connection_id] = (
time.time() - SocketManager.HEARTBEAT_RATE
)
self.assertGreater(
time.time() - self.socket_manager.last_sent_heartbeat_time[connection_id],
SocketManager.HEARTBEAT_RATE,
)
self.socket_manager._send_needed_heartbeat(connection_id)
self.assertLess(
time.time() - self.socket_manager.last_sent_heartbeat_time[connection_id],
SocketManager.HEARTBEAT_RATE,
)
used_packet_json = self.socket_manager._safe_send.call_args[0][0]
used_packet_dict = json.loads(used_packet_json)
self.assertEqual(
used_packet_dict['type'], data_model.SOCKET_ROUTE_PACKET_STRING
)
used_packet = Packet.from_dict(used_packet_dict['content'])
self.assertNotEqual(self.AGENT_HEARTBEAT_PACKET.id, used_packet.id)
self.assertEqual(used_packet.type, Packet.TYPE_HEARTBEAT)
self.assertEqual(used_packet.sender_id, self.WORLD_ID)
self.assertEqual(used_packet.receiver_id, self.SENDER_ID)
self.assertEqual(used_packet.assignment_id, self.ASSIGNMENT_ID)
self.assertEqual(used_packet.data, '')
self.assertEqual(used_packet.conversation_id, self.CONVERSATION_ID)
self.assertEqual(used_packet.requires_ack, False)
self.assertEqual(used_packet.blocking, False)
def test_ack_send(self):
"""
Ensure acks are being properly created and sent.
"""
self.socket_manager._safe_send = mock.MagicMock()
self.socket_manager._send_ack(self.AGENT_ALIVE_PACKET)
used_packet_json = self.socket_manager._safe_send.call_args[0][0]
used_packet_dict = json.loads(used_packet_json)
self.assertEqual(
used_packet_dict['type'], data_model.SOCKET_ROUTE_PACKET_STRING
)
used_packet = Packet.from_dict(used_packet_dict['content'])
self.assertEqual(self.AGENT_ALIVE_PACKET.id, used_packet.id)
self.assertEqual(used_packet.type, Packet.TYPE_ACK)
self.assertEqual(used_packet.sender_id, self.WORLD_ID)
self.assertEqual(used_packet.receiver_id, self.SENDER_ID)
self.assertEqual(used_packet.assignment_id, self.ASSIGNMENT_ID)
self.assertEqual(used_packet.conversation_id, self.CONVERSATION_ID)
self.assertEqual(used_packet.requires_ack, False)
self.assertEqual(used_packet.blocking, False)
self.assertEqual(self.AGENT_ALIVE_PACKET.status, Packet.STATUS_SENT)
def _send_packet_in_background(self, packet, send_time):
"""
creates a thread to handle waiting for a packet send.
"""
def do_send():
self.socket_manager._send_packet(
packet, packet.get_receiver_connection_id(), send_time
)
self.sent = True
send_thread = threading.Thread(target=do_send, daemon=True)
send_thread.start()
time.sleep(0.02)
def test_blocking_ack_packet_send(self):
"""
Checks to see if ack'ed blocking packets are working properly.
"""
self.socket_manager._safe_send = mock.MagicMock()
self.socket_manager._safe_put = mock.MagicMock()
self.sent = False
# Test a blocking acknowledged packet
send_time = time.time()
self.assertEqual(self.MESSAGE_SEND_PACKET_1.status, Packet.STATUS_INIT)
self._send_packet_in_background(self.MESSAGE_SEND_PACKET_1, send_time)
self.assertEqual(self.MESSAGE_SEND_PACKET_1.status, Packet.STATUS_SENT)
self.socket_manager._safe_send.assert_called_once()
connection_id = self.MESSAGE_SEND_PACKET_1.get_receiver_connection_id()
self.socket_manager._safe_put.assert_called_once_with(
connection_id, (send_time, self.MESSAGE_SEND_PACKET_1)
)
self.assertTrue(self.sent)
self.socket_manager._safe_send.reset_mock()
self.socket_manager._safe_put.reset_mock()
# Send it again - end outcome should be a call to send only
# with sent set
self.MESSAGE_SEND_PACKET_1.status = Packet.STATUS_ACK
self._send_packet_in_background(self.MESSAGE_SEND_PACKET_1, send_time)
self.socket_manager._safe_send.assert_not_called()
self.socket_manager._safe_put.assert_not_called()
def test_non_blocking_ack_packet_send(self):
"""
Checks to see if ack'ed non-blocking packets are working.
"""
self.socket_manager._safe_send = mock.MagicMock()
self.socket_manager._safe_put = mock.MagicMock()
self.sent = False
# Test a blocking acknowledged packet
send_time = time.time()
self.assertEqual(self.MESSAGE_SEND_PACKET_3.status, Packet.STATUS_INIT)
self._send_packet_in_background(self.MESSAGE_SEND_PACKET_3, send_time)
self.assertEqual(self.MESSAGE_SEND_PACKET_3.status, Packet.STATUS_SENT)
self.socket_manager._safe_send.assert_called_once()
self.socket_manager._safe_put.assert_called_once()
self.assertTrue(self.sent)
call_args = self.socket_manager._safe_put.call_args[0]
connection_id = call_args[0]
queue_item = call_args[1]
self.assertEqual(
connection_id, self.MESSAGE_SEND_PACKET_3.get_receiver_connection_id()
)
expected_send_time = (
send_time + SocketManager.ACK_TIME[self.MESSAGE_SEND_PACKET_3.type]
)
self.assertAlmostEqual(queue_item[0], expected_send_time, places=2)
self.assertEqual(queue_item[1], self.MESSAGE_SEND_PACKET_3)
used_packet_json = self.socket_manager._safe_send.call_args[0][0]
used_packet_dict = json.loads(used_packet_json)
self.assertEqual(
used_packet_dict['type'], data_model.SOCKET_ROUTE_PACKET_STRING
)
self.assertDictEqual(
used_packet_dict['content'], self.MESSAGE_SEND_PACKET_3.as_dict()
)
def test_non_ack_packet_send(self):
"""
Checks to see if non-ack'ed packets are working.
"""
self.socket_manager._safe_send = mock.MagicMock()
self.socket_manager._safe_put = mock.MagicMock()
self.sent = False
# Test a blocking acknowledged packet
send_time = time.time()
self.assertEqual(self.MESSAGE_SEND_PACKET_2.status, Packet.STATUS_INIT)
self._send_packet_in_background(self.MESSAGE_SEND_PACKET_2, send_time)
self.assertEqual(self.MESSAGE_SEND_PACKET_2.status, Packet.STATUS_SENT)
self.socket_manager._safe_send.assert_called_once()
self.socket_manager._safe_put.assert_not_called()
self.assertTrue(self.sent)
used_packet_json = self.socket_manager._safe_send.call_args[0][0]
used_packet_dict = json.loads(used_packet_json)
self.assertEqual(
used_packet_dict['type'], data_model.SOCKET_ROUTE_PACKET_STRING
)
self.assertDictEqual(
used_packet_dict['content'], self.MESSAGE_SEND_PACKET_2.as_dict()
)
def test_simple_packet_channel_management(self):
"""
Ensure that channels are created, managed, and then removed as expected.
"""
self.socket_manager._safe_put = mock.MagicMock()
use_packet = self.MESSAGE_SEND_PACKET_1
worker_id = use_packet.receiver_id
assignment_id = use_packet.assignment_id
# Open a channel and assert it is there
self.socket_manager.open_channel(worker_id, assignment_id)
time.sleep(0.1)
connection_id = use_packet.get_receiver_connection_id()
self.assertTrue(self.socket_manager.run[connection_id])
self.assertIsNotNone(self.socket_manager.queues[connection_id])
self.assertEqual(self.socket_manager.last_sent_heartbeat_time[connection_id], 0)
self.assertEqual(self.socket_manager.pongs_without_heartbeat[connection_id], 0)
self.assertIsNone(self.socket_manager.last_received_heartbeat[connection_id])
self.assertTrue(self.socket_manager.socket_is_open(connection_id))
self.assertFalse(self.socket_manager.socket_is_open(FAKE_ID))
# Send a bad packet, ensure it is ignored
resp = self.socket_manager.queue_packet(self.AGENT_ALIVE_PACKET)
self.socket_manager._safe_put.assert_not_called()
self.assertFalse(resp)
self.assertNotIn(self.AGENT_ALIVE_PACKET.id, self.socket_manager.packet_map)
# Send a packet to an open socket, ensure it got queued
resp = self.socket_manager.queue_packet(use_packet)
self.socket_manager._safe_put.assert_called_once()
self.assertIn(use_packet.id, self.socket_manager.packet_map)
self.assertTrue(resp)
# Assert we can get the status of a packet in the map, but not
# existing doesn't throw an error
self.assertEqual(
self.socket_manager.get_status(use_packet.id), use_packet.status
)
self.assertEqual(self.socket_manager.get_status(FAKE_ID), Packet.STATUS_NONE)
# Assert that closing a thread does the correct cleanup work
self.socket_manager.close_channel(connection_id)
time.sleep(0.2)
self.assertFalse(self.socket_manager.run[connection_id])
self.assertNotIn(connection_id, self.socket_manager.queues)
self.assertNotIn(connection_id, self.socket_manager.threads)
self.assertNotIn(use_packet.id, self.socket_manager.packet_map)
# Assert that opening multiple threads and closing them is possible
self.socket_manager.open_channel(worker_id, assignment_id)
self.socket_manager.open_channel(worker_id + '2', assignment_id)
time.sleep(0.1)
self.assertEqual(len(self.socket_manager.queues), 2)
self.socket_manager.close_all_channels()
time.sleep(0.1)
self.assertEqual(len(self.socket_manager.queues), 0)
def test_safe_put(self):
"""
Test safe put and queue retrieval mechanisms.
"""
self.socket_manager._send_packet = mock.MagicMock()
use_packet = self.MESSAGE_SEND_PACKET_1
worker_id = use_packet.receiver_id
assignment_id = use_packet.assignment_id
connection_id = use_packet.get_receiver_connection_id()
# Open a channel and assert it is there
self.socket_manager.open_channel(worker_id, assignment_id)
send_time = time.time()
self.socket_manager._safe_put(connection_id, (send_time, use_packet))
# Wait for the sending thread to try to pull the packet from the queue
time.sleep(0.3)
# Ensure the right packet was popped and sent.
self.socket_manager._send_packet.assert_called_once()
call_args = self.socket_manager._send_packet.call_args[0]
self.assertEqual(use_packet, call_args[0])
self.assertEqual(connection_id, call_args[1])
self.assertEqual(send_time, call_args[2])
self.socket_manager.close_all_channels()
time.sleep(0.1)
self.socket_manager._safe_put(connection_id, (send_time, use_packet))
self.assertEqual(use_packet.status, Packet.STATUS_FAIL)
class TestSocketManagerMessageHandling(unittest.TestCase):
"""
Test sending messages to the world and then to each of two agents, along with
failure cases for each.
"""
def on_alive(self, packet):
self.alive_packet = packet
self.socket_manager.open_channel(packet.sender_id, packet.assignment_id)
def on_message(self, packet):
self.message_packet = packet
def on_worker_death(self, worker_id, assignment_id):
self.dead_worker_id = worker_id
self.dead_assignment_id = assignment_id
def on_server_death(self):
self.server_died = True
def assertEqualBy(self, val_func, val, max_time):
start_time = time.time()
while val_func() != val:
assert (
time.time() - start_time < max_time
), "Value was not attained in specified time"
time.sleep(0.1)
def setUp(self):
self.fake_socket = MockSocket()
time.sleep(0.3)
self.agent1 = MockAgent(
TEST_HIT_ID_1, TEST_ASSIGNMENT_ID_1, TEST_WORKER_ID_1, TASK_GROUP_ID_1
)
self.agent2 = MockAgent(
TEST_HIT_ID_2, TEST_ASSIGNMENT_ID_2, TEST_WORKER_ID_2, TASK_GROUP_ID_1
)
self.alive_packet = None
self.message_packet = None
self.dead_worker_id = None
self.dead_assignment_id = None
self.server_died = False
self.socket_manager = SocketManager(
'https://127.0.0.1',
3030,
self.on_alive,
self.on_message,
self.on_worker_death,
TASK_GROUP_ID_1,
1,
self.on_server_death,
)
def tearDown(self):
self.socket_manager.shutdown()
self.fake_socket.close()
@testing_utils.retry()
def test_alive_send_and_disconnect(self):
acked_packet = None
incoming_hb = None
message_packet = None
hb_count = 0
def on_ack(*args):
nonlocal acked_packet
acked_packet = args[0]
def on_hb(*args):
nonlocal incoming_hb, hb_count
incoming_hb = args[0]
hb_count += 1
def on_msg(*args):
nonlocal message_packet
message_packet = args[0]
self.agent1.register_to_socket(self.fake_socket, on_ack, on_hb, on_msg)
self.assertIsNone(acked_packet)
self.assertIsNone(incoming_hb)
self.assertIsNone(message_packet)
self.assertEqual(hb_count, 0)
# Assert alive is registered
alive_id = self.agent1.send_alive()
self.assertEqualBy(lambda: acked_packet is None, False, TIMEOUT_VERIFICATION)
self.assertIsNone(incoming_hb)
self.assertIsNone(message_packet)
self.assertIsNone(self.message_packet)
self.assertEqualBy(
lambda: self.alive_packet is None, False, TIMEOUT_VERIFICATION
)
self.assertEqual(self.alive_packet.id, alive_id)
self.assertEqual(acked_packet.id, alive_id, 'Alive was not acked')
acked_packet = None
# assert sending heartbeats actually works, and that heartbeats don't
# get acked
self.agent1.send_heartbeat()
self.assertEqualBy(lambda: incoming_hb is None, False, TIMEOUT_VERIFICATION)
self.assertIsNone(acked_packet)
self.assertGreater(hb_count, 0)
# Test message send from agent
test_message_text_1 = 'test_message_text_1'
msg_id = self.agent1.send_message(test_message_text_1)
self.assertEqualBy(
lambda: self.message_packet is None, False, TIMEOUT_VERIFICATION
)
self.assertEqualBy(lambda: acked_packet is None, False, TIMEOUT_VERIFICATION)
self.assertEqual(self.message_packet.id, acked_packet.id)
self.assertEqual(self.message_packet.id, msg_id)
self.assertEqual(self.message_packet.data['text'], test_message_text_1)
# Test message send to agent
manager_message_id = 'message_id_from_manager'
test_message_text_2 = 'test_message_text_2'
message_send_packet = Packet(
manager_message_id,
Packet.TYPE_MESSAGE,
self.socket_manager.get_my_sender_id(),
TEST_WORKER_ID_1,
TEST_ASSIGNMENT_ID_1,
test_message_text_2,
't2',
)
self.socket_manager.queue_packet(message_send_packet)
self.assertEqualBy(lambda: message_packet is None, False, TIMEOUT_VERIFICATION)
self.assertEqual(message_packet.id, manager_message_id)
self.assertEqual(message_packet.data, test_message_text_2)
self.assertIn(manager_message_id, self.socket_manager.packet_map)
self.assertEqualBy(
lambda: self.socket_manager.packet_map[manager_message_id].status,
Packet.STATUS_ACK,
6,
)
# Test agent disconnect
self.agent1.always_beat = False
self.assertEqualBy(
lambda: self.dead_worker_id, TEST_WORKER_ID_1, TIMEOUT_VERIFICATION
)
self.assertEqual(self.dead_assignment_id, TEST_ASSIGNMENT_ID_1)
self.assertGreater(hb_count, 1)
@testing_utils.retry()
def test_failed_ack_resend(self):
"""
Ensures when a message from the manager is dropped, it gets retried until it
works as long as there hasn't been a disconnect.
"""
acked_packet = None
incoming_hb = None
message_packet = None
hb_count = 0
def on_ack(*args):
nonlocal acked_packet
acked_packet = args[0]
def on_hb(*args):
nonlocal incoming_hb, hb_count
incoming_hb = args[0]
hb_count += 1
def on_msg(*args):
nonlocal message_packet
message_packet = args[0]
self.agent1.register_to_socket(self.fake_socket, on_ack, on_hb, on_msg)
self.assertIsNone(acked_packet)
self.assertIsNone(incoming_hb)
self.assertIsNone(message_packet)
self.assertEqual(hb_count, 0)
# Assert alive is registered
alive_id = self.agent1.send_alive()
self.assertEqualBy(lambda: acked_packet is None, False, TIMEOUT_VERIFICATION)
self.assertIsNone(incoming_hb)
self.assertIsNone(message_packet)
self.assertIsNone(self.message_packet)
self.assertEqualBy(
lambda: self.alive_packet is None, False, TIMEOUT_VERIFICATION
)
self.assertEqual(self.alive_packet.id, alive_id)
self.assertEqual(acked_packet.id, alive_id, 'Alive was not acked')
acked_packet = None
# assert sending heartbeats actually works, and that heartbeats don't
# get acked
self.agent1.send_heartbeat()
self.assertEqualBy(lambda: incoming_hb is None, False, TIMEOUT_VERIFICATION)
self.assertIsNone(acked_packet)
self.assertGreater(hb_count, 0)
# Test message send to agent
manager_message_id = 'message_id_from_manager'
test_message_text_2 = 'test_message_text_2'
self.agent1.send_acks = False
message_send_packet = Packet(
manager_message_id,
Packet.TYPE_MESSAGE,
self.socket_manager.get_my_sender_id(),
TEST_WORKER_ID_1,
TEST_ASSIGNMENT_ID_1,
test_message_text_2,
't2',
)
self.socket_manager.queue_packet(message_send_packet)
self.assertEqualBy(lambda: message_packet is None, False, TIMEOUT_VERIFICATION)
self.assertEqual(message_packet.id, manager_message_id)
self.assertEqual(message_packet.data, test_message_text_2)
self.assertIn(manager_message_id, self.socket_manager.packet_map)
self.assertNotEqual(
self.socket_manager.packet_map[manager_message_id].status, Packet.STATUS_ACK
)
message_packet = None
self.agent1.send_acks = True
self.assertEqualBy(lambda: message_packet is None, False, TIMEOUT_VERIFICATION)
self.assertEqual(message_packet.id, manager_message_id)
self.assertEqual(message_packet.data, test_message_text_2)
self.assertIn(manager_message_id, self.socket_manager.packet_map)
self.assertEqualBy(
lambda: self.socket_manager.packet_map[manager_message_id].status,
Packet.STATUS_ACK,
6,
)
@testing_utils.retry()
def test_one_agent_disconnect_other_alive(self):
acked_packet = None
incoming_hb = None
message_packet = None
hb_count = 0
def on_ack(*args):
nonlocal acked_packet
acked_packet = args[0]
def on_hb(*args):
nonlocal incoming_hb, hb_count
incoming_hb = args[0]
hb_count += 1
def on_msg(*args):
nonlocal message_packet
message_packet = args[0]
self.agent1.register_to_socket(self.fake_socket, on_ack, on_hb, on_msg)
self.agent2.register_to_socket(self.fake_socket, on_ack, on_hb, on_msg)
self.assertIsNone(acked_packet)
self.assertIsNone(incoming_hb)
self.assertIsNone(message_packet)
self.assertEqual(hb_count, 0)
# Assert alive is registered
self.agent1.send_alive()
self.agent2.send_alive()
self.assertEqualBy(lambda: acked_packet is None, False, TIMEOUT_VERIFICATION)
self.assertIsNone(incoming_hb)
self.assertIsNone(message_packet)
# Start sending heartbeats
self.agent1.send_heartbeat()
self.agent2.send_heartbeat()
# Kill second agent
self.agent2.always_beat = False
self.assertEqualBy(
lambda: self.dead_worker_id, TEST_WORKER_ID_2, TIMEOUT_VERIFICATION
)
self.assertEqual(self.dead_assignment_id, TEST_ASSIGNMENT_ID_2)
# Run rest of tests
# Test message send from agent
acked_packet = None
test_message_text_1 = 'test_message_text_1'
msg_id = self.agent1.send_message(test_message_text_1)
self.assertEqualBy(
lambda: self.message_packet is None, False, TIMEOUT_VERIFICATION
)
self.assertEqualBy(lambda: acked_packet is None, False, TIMEOUT_VERIFICATION)
self.assertEqual(
self.message_packet.id,
acked_packet.id,
'Packet {} was not the expected acked packet {}'.format(
self.message_packet, acked_packet
),
)
self.assertEqual(self.message_packet.id, msg_id)
self.assertEqual(self.message_packet.data['text'], test_message_text_1)
# Test message send to agent
manager_message_id = 'message_id_from_manager'
test_message_text_2 = 'test_message_text_2'
message_send_packet = Packet(
manager_message_id,
Packet.TYPE_MESSAGE,
self.socket_manager.get_my_sender_id(),
TEST_WORKER_ID_1,
TEST_ASSIGNMENT_ID_1,
test_message_text_2,
't2',
)
self.socket_manager.queue_packet(message_send_packet)
self.assertEqualBy(lambda: message_packet is None, False, TIMEOUT_VERIFICATION)
self.assertEqual(message_packet.id, manager_message_id)
self.assertEqual(message_packet.data, test_message_text_2)
self.assertIn(manager_message_id, self.socket_manager.packet_map)
self.assertEqualBy(
lambda: self.socket_manager.packet_map[manager_message_id].status,
Packet.STATUS_ACK,
6,
)
# Test agent disconnect
self.agent1.always_beat = False
self.assertEqualBy(
lambda: self.dead_worker_id, TEST_WORKER_ID_1, TIMEOUT_VERIFICATION
)
self.assertEqual(self.dead_assignment_id, TEST_ASSIGNMENT_ID_1)
if __name__ == '__main__':
unittest.main(buffer=True)
|
testutils.py
|
from __future__ import print_function
import os
import sys
from types import TracebackType
import isodate
import datetime
import random
from contextlib import AbstractContextManager, contextmanager
from typing import (
Iterable,
List,
Optional,
TYPE_CHECKING,
Type,
Iterator,
Set,
Tuple,
Dict,
Any,
TypeVar,
cast,
NamedTuple,
)
from urllib.parse import ParseResult, urlparse, parse_qs
from traceback import print_exc
from threading import Thread
from http.server import BaseHTTPRequestHandler, HTTPServer, SimpleHTTPRequestHandler
import email.message
from nose import SkipTest
from .earl import add_test, report
import unittest
from rdflib import BNode, Graph, ConjunctiveGraph
from rdflib.term import Node
from unittest.mock import MagicMock, Mock
from urllib.error import HTTPError
from urllib.request import urlopen
if TYPE_CHECKING:
import typing_extensions as te
# TODO: make an introspective version (like this one) of
# rdflib.graphutils.isomorphic and use instead.
from test import TEST_DIR
from test.earl import add_test, report
def crapCompare(g1, g2):
"""A really crappy way to 'check' if two graphs are equal. It ignores blank
nodes completely and ignores subgraphs."""
if len(g1) != len(g2):
raise Exception("Graphs dont have same length")
for t in g1:
s = _no_blank(t[0])
o = _no_blank(t[2])
if not (s, t[1], o) in g2:
e = "(%s, %s, %s) is not in both graphs!" % (s, t[1], o)
raise Exception(e)
def _no_blank(node):
if isinstance(node, BNode):
return None
if isinstance(node, Graph):
return None # node._Graph__identifier = _SQUASHED_NODE
return node
def check_serialize_parse(fpath, infmt, testfmt, verbose=False):
g = ConjunctiveGraph()
_parse_or_report(verbose, g, fpath, format=infmt)
if verbose:
for t in g:
print(t)
print("========================================")
print("Parsed OK!")
s = g.serialize(format=testfmt)
if verbose:
print(s)
g2 = ConjunctiveGraph()
_parse_or_report(verbose, g2, data=s, format=testfmt)
if verbose:
print(g2.serialize())
crapCompare(g, g2)
def _parse_or_report(verbose, graph, *args, **kwargs):
try:
graph.parse(*args, **kwargs)
except:
if verbose:
print("========================================")
print("Error in parsing serialization:")
print(args, kwargs)
raise
def nose_tst_earl_report(generator, earl_report_name=None):
from optparse import OptionParser
p = OptionParser()
(options, args) = p.parse_args()
skip = 0
tests = 0
success = 0
for t in generator(args):
tests += 1
print("Running ", t[1].uri)
try:
t[0](t[1])
add_test(t[1].uri, "passed")
success += 1
except SkipTest as e:
add_test(t[1].uri, "untested", e.message)
print("skipping %s - %s" % (t[1].uri, e.message))
skip += 1
except KeyboardInterrupt:
raise
except AssertionError:
add_test(t[1].uri, "failed")
except:
add_test(t[1].uri, "failed", "error")
print_exc()
sys.stderr.write("%s\n" % t[1].uri)
print(
"Ran %d tests, %d skipped, %d failed. " % (tests, skip, tests - skip - success)
)
if earl_report_name:
now = isodate.datetime_isoformat(datetime.datetime.utcnow())
earl_report = os.path.join(TEST_DIR, "../test_reports/%s-%s.ttl" % (
earl_report_name,
now.replace(":", ""),
))
report.serialize(earl_report, format="n3")
report.serialize(os.path.join(TEST_DIR, "../test_reports/%s-latest.ttl" % earl_report_name), format="n3")
print("Wrote EARL-report to '%s'" % earl_report)
def get_random_ip(parts: List[str] = None) -> str:
if parts is None:
parts = ["127"]
for _ in range(4 - len(parts)):
parts.append(f"{random.randint(0, 255)}")
return ".".join(parts)
@contextmanager
def ctx_http_server(handler: Type[BaseHTTPRequestHandler]) -> Iterator[HTTPServer]:
host = get_random_ip()
server = HTTPServer((host, 0), handler)
server_thread = Thread(target=server.serve_forever)
server_thread.daemon = True
server_thread.start()
yield server
server.shutdown()
server.socket.close()
server_thread.join()
class GraphHelper:
@classmethod
def triple_set(cls, graph: Graph) -> Set[Tuple[Node, Node, Node]]:
return set(graph.triples((None, None, None)))
@classmethod
def triple_sets(cls, graphs: Iterable[Graph]) -> List[Set[Tuple[Node, Node, Node]]]:
result: List[Set[Tuple[Node, Node, Node]]] = []
for graph in graphs:
result.append(cls.triple_set(graph))
return result
@classmethod
def equals(cls, lhs: Graph, rhs: Graph) -> bool:
return cls.triple_set(lhs) == cls.triple_set(rhs)
GenericT = TypeVar("GenericT", bound=Any)
def make_spypair(method: GenericT) -> Tuple[GenericT, Mock]:
m = MagicMock()
def wrapper(self: Any, *args: Any, **kwargs: Any) -> Any:
m(*args, **kwargs)
return method(self, *args, **kwargs)
setattr(wrapper, "mock", m)
return cast(GenericT, wrapper), m
HeadersT = Dict[str, List[str]]
PathQueryT = Dict[str, List[str]]
class MockHTTPRequests(NamedTuple):
method: str
path: str
parsed_path: ParseResult
path_query: PathQueryT
headers: email.message.Message
class MockHTTPResponse(NamedTuple):
status_code: int
reason_phrase: str
body: bytes
headers: HeadersT
class SimpleHTTPMock:
"""
SimpleHTTPMock allows testing of code that relies on an HTTP server.
NOTE: Currently only the GET and POST methods is supported.
Objects of this class has a list of responses for each method (GET, POST, etc...)
and returns these responses for these methods in sequence.
All request received are appended to a method specific list.
Example usage:
>>> httpmock = SimpleHTTPMock()
>>> with ctx_http_server(httpmock.Handler) as server:
... url = "http://{}:{}".format(*server.server_address)
... # add a response the server should give:
... httpmock.do_get_responses.append(
... MockHTTPResponse(404, "Not Found", b"gone away", {})
... )
...
... # send a request to get the first response
... http_error: Optional[HTTPError] = None
... try:
... urlopen(f"{url}/bad/path")
... except HTTPError as caught:
... http_error = caught
...
... assert http_error is not None
... assert http_error.code == 404
...
... # get and validate request that the mock received
... req = httpmock.do_get_requests.pop(0)
... assert req.path == "/bad/path"
"""
# TODO: add additional methods (PUT, PATCH, ...) similar to GET and POST
def __init__(self):
self.do_get_requests: List[MockHTTPRequests] = []
self.do_get_responses: List[MockHTTPResponse] = []
self.do_post_requests: List[MockHTTPRequests] = []
self.do_post_responses: List[MockHTTPResponse] = []
_http_mock = self
class Handler(SimpleHTTPRequestHandler):
http_mock = _http_mock
def _do_GET(self):
parsed_path = urlparse(self.path)
path_query = parse_qs(parsed_path.query)
request = MockHTTPRequests(
"GET", self.path, parsed_path, path_query, self.headers
)
self.http_mock.do_get_requests.append(request)
response = self.http_mock.do_get_responses.pop(0)
self.send_response(response.status_code, response.reason_phrase)
for header, values in response.headers.items():
for value in values:
self.send_header(header, value)
self.end_headers()
self.wfile.write(response.body)
self.wfile.flush()
return
(do_GET, do_GET_mock) = make_spypair(_do_GET)
def _do_POST(self):
parsed_path = urlparse(self.path)
path_query = parse_qs(parsed_path.query)
request = MockHTTPRequests(
"POST", self.path, parsed_path, path_query, self.headers
)
self.http_mock.do_post_requests.append(request)
response = self.http_mock.do_post_responses.pop(0)
self.send_response(response.status_code, response.reason_phrase)
for header, values in response.headers.items():
for value in values:
self.send_header(header, value)
self.end_headers()
self.wfile.write(response.body)
self.wfile.flush()
return
(do_POST, do_POST_mock) = make_spypair(_do_POST)
def log_message(self, format: str, *args: Any) -> None:
pass
self.Handler = Handler
self.do_get_mock = Handler.do_GET_mock
self.do_post_mock = Handler.do_POST_mock
def reset(self):
self.do_get_requests.clear()
self.do_get_responses.clear()
self.do_get_mock.reset_mock()
self.do_post_requests.clear()
self.do_post_responses.clear()
self.do_post_mock.reset_mock()
@property
def call_count(self):
return self.do_post_mock.call_count + self.do_get_mock.call_count
class SimpleHTTPMockTests(unittest.TestCase):
def test_example(self) -> None:
httpmock = SimpleHTTPMock()
with ctx_http_server(httpmock.Handler) as server:
url = "http://{}:{}".format(*server.server_address)
# add two responses the server should give:
httpmock.do_get_responses.append(
MockHTTPResponse(404, "Not Found", b"gone away", {})
)
httpmock.do_get_responses.append(
MockHTTPResponse(200, "OK", b"here it is", {})
)
# send a request to get the first response
with self.assertRaises(HTTPError) as raised:
urlopen(f"{url}/bad/path")
assert raised.exception.code == 404
# get and validate request that the mock received
req = httpmock.do_get_requests.pop(0)
self.assertEqual(req.path, "/bad/path")
# send a request to get the second response
resp = urlopen(f"{url}/")
self.assertEqual(resp.status, 200)
self.assertEqual(resp.read(), b"here it is")
httpmock.do_get_responses.append(
MockHTTPResponse(404, "Not Found", b"gone away", {})
)
httpmock.do_get_responses.append(
MockHTTPResponse(200, "OK", b"here it is", {})
)
class ServedSimpleHTTPMock(SimpleHTTPMock, AbstractContextManager):
"""
ServedSimpleHTTPMock is a ServedSimpleHTTPMock with a HTTP server.
Example usage:
>>> with ServedSimpleHTTPMock() as httpmock:
... # add a response the server should give:
... httpmock.do_get_responses.append(
... MockHTTPResponse(404, "Not Found", b"gone away", {})
... )
...
... # send a request to get the first response
... http_error: Optional[HTTPError] = None
... try:
... urlopen(f"{httpmock.url}/bad/path")
... except HTTPError as caught:
... http_error = caught
...
... assert http_error is not None
... assert http_error.code == 404
...
... # get and validate request that the mock received
... req = httpmock.do_get_requests.pop(0)
... assert req.path == "/bad/path"
"""
def __init__(self):
super().__init__()
host = get_random_ip()
self.server = HTTPServer((host, 0), self.Handler)
self.server_thread = Thread(target=self.server.serve_forever)
self.server_thread.daemon = True
self.server_thread.start()
def stop(self) -> None:
self.server.shutdown()
self.server.socket.close()
self.server_thread.join()
@property
def address_string(self) -> str:
(host, port) = self.server.server_address
return f"{host}:{port}"
@property
def url(self) -> str:
return f"http://{self.address_string}"
def __enter__(self) -> "ServedSimpleHTTPMock":
return self
def __exit__(
self,
__exc_type: Optional[Type[BaseException]],
__exc_value: Optional[BaseException],
__traceback: Optional[TracebackType],
) -> "te.Literal[False]":
self.stop()
return False
class ServedSimpleHTTPMockTests(unittest.TestCase):
def test_example(self) -> None:
with ServedSimpleHTTPMock() as httpmock:
# add two responses the server should give:
httpmock.do_get_responses.append(
MockHTTPResponse(404, "Not Found", b"gone away", {})
)
httpmock.do_get_responses.append(
MockHTTPResponse(200, "OK", b"here it is", {})
)
# send a request to get the first response
with self.assertRaises(HTTPError) as raised:
urlopen(f"{httpmock.url}/bad/path")
assert raised.exception.code == 404
# get and validate request that the mock received
req = httpmock.do_get_requests.pop(0)
self.assertEqual(req.path, "/bad/path")
# send a request to get the second response
resp = urlopen(f"{httpmock.url}/")
self.assertEqual(resp.status, 200)
self.assertEqual(resp.read(), b"here it is")
httpmock.do_get_responses.append(
MockHTTPResponse(404, "Not Found", b"gone away", {})
)
httpmock.do_get_responses.append(
MockHTTPResponse(200, "OK", b"here it is", {})
)
|
process_impl.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import atexit
import traceback
import multiprocessing
class Process(object):
"""
Wraps a `gym.Env` to host the environment in an external process.
Example:
```
env = Process(lambda: gym.make('Pendulum-v0'))
```
Args:
constructor: Constructor which returns a `gym.Env`.
"""
_ACCESS = 1
_CALL = 2
_RESULT = 3
_EXCEPTION = 4
_CLOSE = 5
def __init__(self, constructor):
self._conn, conn = multiprocessing.Pipe()
self._process = multiprocessing.Process(
target=self._worker, args=(constructor, conn)
)
atexit.register(self.close)
self._process.start()
self._observation_space = None
self._action_space = None
@property
def observation_space(self):
if self._observation_space is None:
self._observation_space = self.__getattr__("observation_space")
return self._observation_space
@property
def action_space(self):
if self._action_space is None:
self._action_space = self.__getattr__("action_space")
return self._action_space
def __getattr__(self, name):
self._conn.send((self._ACCESS, name))
return self._receive()
def call(self, name, *args, **kwargs):
payload = name, args, kwargs
self._conn.send((self._CALL, payload))
return self._receive
def close(self):
try:
self._conn.send((self._CLOSE, None))
self._conn.close()
except IOError:
# connection already closed
pass
self._process.join()
def seed(self, seed):
return self.call("seed", seed)
def step(self, action):
return self.call("step", action)
def reset(self):
return self.call("reset")
def _receive(self):
message, payload = self._conn.recv()
# re-raise exceptions in the main process
if message == self._EXCEPTION:
stacktrace = payload
raise Exception(stacktrace)
if message == self._RESULT:
return payload
raise KeyError("Received message of unexpected type {}".format(message))
def _worker(self, constructor, conn):
try:
env = constructor()
while True:
try:
# only block for short times to support keyboard exceptions
if not conn.poll(0.1):
continue
message, payload = conn.recv()
except (EOFError, KeyboardInterrupt):
break
if message == self._ACCESS:
name = payload
result = getattr(env, name)
conn.send((self._RESULT, result))
continue
if message == self._CALL:
name, args, kwargs = payload
result = getattr(env, name)(*args, **kwargs)
conn.send((self._RESULT, result))
continue
if message == self._CLOSE:
assert payload is None
break
raise KeyError("Received message of unknown type {}".format(message))
except Exception:
stacktrace = "".join(traceback.format_exception(*sys.exc_info()))
conn.send((self._EXCEPTION, stacktrace))
finally:
conn.close()
|
test_data_node_scale.py
|
import threading
import time
import pytest
from base.collection_wrapper import ApiCollectionWrapper
from common.common_type import CaseLabel
from common import common_func as cf
from customize.milvus_operator import MilvusOperator
from scale import constants
from pymilvus import connections
from utils.util_log import test_log as log
from utils.util_k8s import wait_pods_ready
prefix = "data_scale"
default_schema = cf.gen_default_collection_schema()
default_search_exp = "int64 >= 0"
default_index_params = {"index_type": "IVF_SQ8", "metric_type": "L2", "params": {"nlist": 64}}
class TestDataNodeScale:
@pytest.mark.tags(CaseLabel.L3)
def test_scale_data_node(self):
"""
target:
method:
expected:
"""
release_name = "scale-data"
image = f'{constants.IMAGE_REPOSITORY}:{constants.IMAGE_TAG}'
data_config = {
'metadata.namespace': constants.NAMESPACE,
'metadata.name': release_name,
'spec.components.image': image,
'spec.components.proxy.serviceType': 'LoadBalancer',
'spec.components.dataNode.replicas': 2,
'spec.config.dataCoord.enableCompaction': True,
'spec.config.dataCoord.enableGarbageCollection': True
}
mic = MilvusOperator()
mic.install(data_config)
healthy = mic.wait_for_healthy(release_name, constants.NAMESPACE, timeout=1200)
log.info(f"milvus healthy: {healthy}")
host = mic.endpoint(release_name, constants.NAMESPACE).split(':')[0]
# host = '10.98.0.4'
# connect
connections.add_connection(default={"host": host, "port": 19530})
connections.connect(alias='default')
# create
c_name = cf.gen_unique_str("scale_query")
# c_name = 'scale_query_DymS7kI4'
collection_w = ApiCollectionWrapper()
collection_w.init_collection(name=c_name, schema=cf.gen_default_collection_schema(), shards_num=5)
tmp_nb = 10000
def do_insert():
while True:
tmp_df = cf.gen_default_dataframe_data(tmp_nb)
collection_w.insert(tmp_df)
log.debug(collection_w.num_entities)
t_insert = threading.Thread(target=do_insert, args=(), daemon=True)
t_insert.start()
# scale dataNode to 5
mic.upgrade(release_name, {'spec.components.dataNode.replicas': 5}, constants.NAMESPACE)
time.sleep(300)
log.debug("Expand dataNode test finished")
# create new collection and insert
new_c_name = cf.gen_unique_str("scale_query")
collection_w_new = ApiCollectionWrapper()
collection_w_new.init_collection(name=new_c_name, schema=cf.gen_default_collection_schema(), shards_num=2)
def do_new_insert():
while True:
tmp_df = cf.gen_default_dataframe_data(tmp_nb)
collection_w_new.insert(tmp_df)
log.debug(collection_w_new.num_entities)
t_insert_new = threading.Thread(target=do_new_insert, args=(), daemon=True)
t_insert_new.start()
# scale dataNode to 3
mic.upgrade(release_name, {'spec.components.dataNode.replicas': 3}, constants.NAMESPACE)
wait_pods_ready(constants.NAMESPACE, f"app.kubernetes.io/instance={release_name}")
log.debug(collection_w.num_entities)
time.sleep(300)
log.debug("Shrink dataNode test finished")
# mic.uninstall(release_name, namespace=constants.NAMESPACE)
|
console.py
|
'''
Created on 2015/12/29
:author: hubo
'''
from __future__ import print_function
from vlcp.utils.connector import async_processor, async_to_async, Connector,\
generator_to_async
from vlcp.event.event import withIndices, Event
from vlcp.config import defaultconfig
from vlcp.server.module import Module, api, callAPI
import functools
import threading
import signal
from vlcp.event.runnable import RoutineContainer
from vlcp.event.runnable import RoutineException
import pdb
import code
from vlcp.config.config import manager
from vlcp.protocol.protocol import Protocol
from vlcp.event.connection import Client
import os
import socket
import re
from vlcp.event.core import InterruptedBySignalException
try:
from Queue import Queue, PriorityQueue
except:
from queue import Queue, PriorityQueue
import traceback
import sys
try:
import thread
except:
import _thread as thread
def console_help():
print(Console._full_help)
def restore_console():
if not hasattr(Console, '_instance') or not Console._instance:
raise ValueError('Console is not loaded')
Console._instance.restore_console()
@withIndices('type')
class ConsoleEvent(Event):
canignore = False
@withIndices()
class ConsoleServiceCall(Event):
pass
@withIndices('waiter')
class ConsoleServiceCancel(Event):
pass
@withIndices('socket')
class SocketInjectDone(Event):
pass
@withIndices()
class InterruptPoller(Event):
pass
class Waiter(object):
def __init__(self):
self.event = threading.Event()
self.event.clear()
self.exception = None
self.result = None
def wait(self, timeout = None):
self.event.wait(timeout)
if self.exception:
raise self.exception
else:
return self.result
def raise_exception(self, exc):
self.exception = exc
self.event.set()
def send_result(self, val):
self.result = val
self.event.set()
@defaultconfig
class Console(Module):
'''VLCP debugging console.
Besides the normal functions of Python interactive console,
Following variables are provided for debugging purpose:
server, manager, container
Following functions can be used to control VLCP running:
callapi, capture, sendevent, subroutine, execute, breakpoint, syscall, resume, debug, restore_console, console_help
For details call console_help()'''
_full_help = '''
VLCP debugging console.
Besides the normal functions of python interactive console,
following variables are provided for debugging purpose:
server - current running VLCP server
manager - current configuration manager
container - internal used routine container
Following functions can be used to control VLCP running:
callapi(modulename, functionname, **kwargs)
- Call module API modulename/functionname with kwargs, return result
capture(matchers, blocking = False, breakpoint = False, captureonce = False, callback = None)
- Capture events matched with specified matchers and print the event. Other parameters:
- blocking: if True, wait until the events are captured
- breakpoint: if True, suspend the event loop and wait for resume()
- captureonce: if True, remove the matchers on first capture
- callback: func(event, matcher) called on every capture if specified
sendevent(event, emerge = False)
- Send specified event to scheduler. if merge = True, send immediately without block
subroutine(routine)
- create a new routine in container.
execute(routine)
- execute the routine in container, and return container.retvalue
breakpoint()
- stop running and wait for resume().
syscall(syscall_func)
- execute syscall_func in syscall context
resume()
- resume from breakpoint
debug()
- resume from breakpoint with pdb.set_trace() to enter pdb debugging. Suspend the interactive console
to work with pdb.
restore_console()
- Prepare to continue in pdb and resume the console. Type in pdb:
clear
import vlcp.service.debugging.console
vlcp.service.debugging.console.restore_console()
continue
console_help()
- show this help
'''
service = False
_default_startinconsole = False
_default_telnetconsole = 'ptcp://localhost:9923/'
_default_key = None
_default_certificate = None
_default_ca_certs = None
def _service_routine(self):
self.apiroutine.subroutine(self._intercept_main())
csc = ConsoleServiceCall.createMatcher()
while True:
yield (csc,)
self.apiroutine.subroutine(self.apiroutine.event.routine, True)
def _service_call_routine(self, waiter, call):
try:
for m in self.apiroutine.withException(call, ConsoleServiceCancel.createMatcher(waiter)):
yield m
except RoutineException:
pass
except Exception as exc:
waiter.raise_exception(exc)
else:
waiter.send_result(self.apiroutine.retvalue)
def _intercept_main(self):
cr = self.apiroutine.currentroutine
self.sendEventQueue = Queue()
_console_connect_event = threading.Event()
_console_connect_event.clear()
for m in self.apiroutine.waitForSend(ConsoleEvent('initproxy')):
yield m
if not self.startinconsole:
p = Protocol()
p.persist = True
p.createqueue = False
def init(connection):
sock = connection.socket
self.telnet_socket = sock
self.scheduler.unregisterPolling(connection.socket)
connection.socket = None
connection.connected = False
_console_connect_event.set()
yield (SocketInjectDone.createMatcher(sock),)
p.init = init
p.reconnect_init = init
Client(self.telnetconsole, p, self.scheduler, self.key, self.certificate, self.ca_certs).start()
def syscall_threaded_main(scheduler, processor):
# Detach self
scheduler.unregisterall(cr)
self._threaded_main_quit = False
def threaded_main():
try:
scheduler.main(False, False)
finally:
self._threaded_main_quit = True
_console_connect_event.set()
t = threading.Thread(target=threaded_main)
t.daemon = True
t.start()
try:
if self.startinconsole:
self._interactive()
else:
while not self._threaded_main_quit:
try:
while not _console_connect_event.is_set():
# There is a bug in Python 2.x that wait without timeout cannot be
# interrupted by signal
_console_connect_event.wait(3600)
if self._threaded_main_quit:
break
except InterruptedBySignalException:
# This signal should interrupt the poller, but poller is not in the main thread
# Send an event through the proxy will do the trick
self.sendEventQueue.put((InterruptPoller(),))
continue
pstdin_r, pstdin_w = os.pipe()
pstdout_r, pstdout_w = os.pipe()
orig_stdin = sys.stdin
orig_stdout = sys.stdout
orig_stderr = sys.stderr
try:
pstdin = os.fdopen(pstdin_r, 'rU', 0)
pstdout = os.fdopen(pstdout_w, 'w', 0)
sys.stdin = pstdin
sys.stdout = pstdout
sys.stderr = pstdout
sock = self.telnet_socket
sock.setblocking(True)
self.telnet_socket = None
t = threading.Thread(target=self._telnet_server, args=(pstdin_w, pstdout_r, sock, orig_stdout))
t.daemon = True
t.start()
try:
self._interactive()
except SystemExit:
pass
if not t.is_alive():
break
self.sendEventQueue.put((SocketInjectDone(sock),))
finally:
try:
sock.shutdown(socket.SHUT_RDWR)
except:
pass
try:
pstdin.close()
except:
pass
try:
pstdout.close()
except:
pass
sys.stdin = orig_stdin
sys.stdout = orig_stdout
sys.stderr = orig_stderr
except SystemExit:
pass
finally:
self.sendEventQueue.put(None)
scheduler.quit()
if self.startinconsole:
print('Wait for scheduler end, this may take some time...')
t.join()
for m in self.apiroutine.syscall(syscall_threaded_main, True):
yield m
def _telnet_server_writer(self, queue, sock):
lastseq = -1
while True:
t, seq, val = queue.get()
if t < 0:
break
if t != 2 or seq >= lastseq:
try:
sock.sendall(val)
except:
break
if t == 0:
lastseq = seq
def _telnet_server_writer2(self, pstdout_r, queue, lock, orig_stdout):
while True:
data = os.read(pstdout_r, 1024)
if data == '':
os.close(pstdout_r)
break
data, _ = re.subn(br'\r?\n', b'\r\n', data)
lock.acquire()
try:
self._telnet_seq += 1
seq = self._telnet_seq
finally:
lock.release()
queue.put((2, seq, data))
def _telnet_server(self, pstdin_w, pstdout_r, sock, orig_stdout):
queue = PriorityQueue()
inputbuffer = b''
self._telnet_seq = 0
try:
t = threading.Thread(target=self._telnet_server_writer, args=(queue, sock))
t.daemon = True
t.start()
lock = threading.Lock()
def writeall(data):
start = 0
while start < len(data):
size = os.write(pstdin_w, data[start:])
start += size
def sendcontrol(t, data):
lock.acquire()
try:
self._telnet_seq += 1
seq = self._telnet_seq
finally:
lock.release()
queue.put((t, seq, data))
t2 = threading.Thread(target=self._telnet_server_writer2, args=(pstdout_r, queue, lock, orig_stdout))
t2.daemon = True
t2.start()
escaping = False
option = None
while True:
newdata = sock.recv(1024)
if newdata == b'':
break
for i in range(0, len(newdata)):
c = newdata[i:i+1]
if escaping:
if option == b'\xfd' and c == b'\x06':
sendcontrol(1, b'\xff\xfb\x06')
option = None
escaping = False
elif option == b'\xfd' or option == b'\xfe':
sendcontrol(1, b'\xff\xfc' + c)
option = None
escaping = False
elif option == b'\xfb' or option == b'\xfc':
sendcontrol(1, b'\xff\xfe' + c)
option = None
escaping = False
elif c in (b'\xfb', b'\xfc', b'\xfd', b'\xfe'):
option = c
else:
option = None
if c == b'\xf3' or c == b'\xf4':
thread.interrupt_main()
escaping = False
else:
if c == b'\x03':
thread.interrupt_main()
elif c == b'\x08':
inputbuffer = inputbuffer[:-1]
elif c == b'\x00':
inputbuffer += b'\n'
writeall(inputbuffer)
inputbuffer = b''
elif c == b'\r' or c == b'\n':
inputbuffer += c
writeall(inputbuffer)
inputbuffer = b''
elif c == b'\xff':
escaping = True
else:
inputbuffer += c
except OSError:
pass
except IOError:
pass
finally:
try:
os.close(pstdin_w)
except:
pass
queue.put((-1, -1, -1))
def _interactive(self):
lsignal = signal.signal(signal.SIGINT, signal.default_int_handler)
try:
_breakpoint_event = threading.Event()
_current_thread = threading.current_thread().ident
_enter_pdb = [False]
def _async_run(call):
self.sendEventQueue.put((ConsoleServiceCall(routine = call),))
def _async(func):
@functools.wraps(func)
def f(*args, **kwargs):
_async_run(func(*args, **kwargs))
return f
def _service_call_customized(factory):
waiter = Waiter()
self.sendEventQueue.put((ConsoleServiceCall(routine=factory(waiter)),))
try:
return waiter.wait()
except:
self.sendEventQueue.put((ConsoleServiceCancel(waiter),))
raise
def execute(call):
return _service_call_customized(lambda waiter: self._service_call_routine(waiter, call))
def _service(func):
@functools.wraps(func)
def f(*args, **kwargs):
return execute(func(*args, **kwargs))
return f
@_service
def callapi(modulename, functionname, **kwargs):
return callAPI(self.apiroutine, modulename, functionname, kwargs)
@_service
def sendevent(event, emerge = False):
if emerge:
self.apiroutine.scheduler.emergesend(event)
else:
for m in self.apiroutine.waitForSend(event):
yield m
self.apiroutine.retvalue = None
@_service
def subroutine(routine):
self.apiroutine.retvalue = self.apiroutine.subroutine(routine)
if False:
yield
@_service
def syscall(syscall_func):
for m in self.apiroutine.syscall(syscall_func):
yield m
def breakpoint():
in_thread = threading.current_thread().ident
if in_thread == _current_thread:
_breakpoint()
else:
print('Enter VLCP debugging breakpoint:')
traceback.print_stack()
print('Call resume() to continue the event loop, or debug() to enter pdb')
_breakpoint_event.clear()
_breakpoint_event.wait()
if _enter_pdb[0]:
pdb.set_trace()
else:
print('Resume from breakpoint.')
@_async
def _breakpoint():
breakpoint()
if False:
yield
def resume():
_enter_pdb[0] = False
_breakpoint_event.set()
@_async
def restore_console():
self._restore_console_event.set()
if False:
yield
self.restore_console = restore_console
def debug():
_enter_pdb[0] = True
self._restore_console_event.clear()
_breakpoint_event.set()
# Switch to event loop thread, suspend the main thread, wait for restore_console
self._restore_console_event.wait()
_capture_breakpoint = breakpoint
def capture(matchers, blocking = False, breakpoint = False, captureonce = False, callback = None):
def _capture_service(waiter):
if blocking:
csm = ConsoleServiceCancel.createMatcher(waiter)
else:
waiter.send_result(self.apiroutine.currentroutine)
firsttime = True
while firsttime or not captureonce:
if blocking:
yield tuple(matchers) + (csm,)
else:
yield matchers
if blocking and self.apiroutine.matcher is csm:
# Cancelled
return
print('Event Captured: Capture %r with %r' % (self.apiroutine.event, self.apiroutine.matcher))
if firsttime and blocking:
waiter.send_result((self.apiroutine.event, self.apiroutine.matcher, self.apiroutine.currentroutine))
firsttime = False
if callback:
try:
callback(self.apiroutine.event, self.apiroutine.matcher)
except:
print('Exception while running callback:')
traceback.print_exc()
if breakpoint:
_capture_breakpoint()
return _service_call_customized(_capture_service)
code.interact(self.__doc__ + '\n' + 'Python ' + str(sys.version) + ' on ' + str(sys.platform),
None,
{'server':self.server,'manager':manager, 'container':self.apiroutine,
'callapi':callapi, 'capture':capture, 'sendevent':sendevent,
'subroutine':subroutine, 'breakpoint':breakpoint, 'syscall':syscall,
'resume':resume, 'debug':debug, 'restore_console':restore_console,
'console_help':console_help,'execute':execute})
finally:
signal.signal(signal.SIGINT, lsignal)
def __init__(self, server):
'''
Constructor
'''
Module.__init__(self, server)
self._ce_matcher = ConsoleEvent.createMatcher()
self.apiroutine = RoutineContainer(self.scheduler)
self.apiroutine.main = self._service_routine
self._restore_console_event = threading.Event()
@generator_to_async(True, False)
def proxy(event, matcher):
while True:
events = self.sendEventQueue.get()
if events is None:
break
yield events
@async_to_async(True, False)
@async_processor
def processor(event, matcher, queueout):
if event.type == 'initproxy':
proxy(event, matcher, queueout)
self.connector = Connector(processor, (self._ce_matcher,), self.scheduler, False)
self.routines.append(self.apiroutine)
self.routines.append(self.connector)
if __name__ == '__main__':
from vlcp.server import main
manager['module.console.startinconsole'] = True
main(None, ())
|
test_cli.py
|
from __future__ import absolute_import, print_function, division
import os
import sys
import signal
import tarfile
import time
from threading import Thread
import pytest
import conda_pack
from conda_pack.cli import main
from conda_pack.compat import on_win
from .conftest import py36_path, py27_path
on_p2 = sys.version[0] == '2'
def test_help(capsys):
with pytest.raises(SystemExit) as exc:
main(["-h"])
assert exc.value.code == 0
out, err = capsys.readouterr()
assert not err
assert 'usage: conda-pack' in out
def test_version(capsys):
with pytest.raises(SystemExit) as exc:
main(["--version"])
assert exc.value.code == 0
out, err = capsys.readouterr()
assert not err
assert conda_pack.__version__ in out
def test_parse_include_exclude():
out = {}
def capture(**kwargs):
out.update(kwargs)
with pytest.raises(SystemExit) as exc:
main(["--exclude", "foo/*",
"--include", "*.py",
"--include", "*.pyx",
"--exclude", "foo/bar/*.pyx"],
pack=capture)
assert exc.value.code == 0
assert out['filters'] == [("exclude", "foo/*"),
("include", "*.py"),
("include", "*.pyx"),
("exclude", "foo/bar/*.pyx")]
def test_cli_roundtrip(capsys, tmpdir):
out_path = os.path.join(str(tmpdir), 'py36.tar')
with pytest.raises(SystemExit) as exc:
main(["-p", py36_path, "-o", out_path])
assert exc.value.code == 0
assert os.path.exists(out_path)
assert tarfile.is_tarfile(out_path)
out, err = capsys.readouterr()
assert not err
bar, percent, time = [i.strip() for i in out.split('\r')[-1].split('|')]
assert bar == '[' + '#' * 40 + ']'
assert percent == '100% Completed'
assert time
def test_quiet(capsys, tmpdir):
out_path = os.path.join(str(tmpdir), 'py36.tar')
with pytest.raises(SystemExit) as exc:
main(["-p", py36_path, "-o", out_path, "-q"])
assert exc.value.code == 0
assert os.path.exists(out_path)
assert tarfile.is_tarfile(out_path)
out, err = capsys.readouterr()
assert not err
assert not out
def test_cli_exceptions(capsys):
with pytest.raises(SystemExit) as exc:
main(["-p", "not_a_real_path"])
assert exc.value.code == 1
out, err = capsys.readouterr()
assert "CondaPackError: Environment path" in err
with pytest.raises(SystemExit) as exc:
main(["-foo", "-bar"])
assert exc.value.code != 0
out, err = capsys.readouterr()
assert not out
assert "usage: conda-pack" in err
@pytest.mark.xfail(on_p2, reason='Relaxing python 2 tests on CI')
def test_cli_warnings(capsys, tmpdir):
out_path = os.path.join(str(tmpdir), 'py27.tar')
with pytest.raises(SystemExit) as exc:
main(["-p", py27_path, "-o", out_path])
# Test fails in some CI systems for Python 2
assert exc.value.code == 0
assert os.path.exists(out_path)
assert tarfile.is_tarfile(out_path)
out, err = capsys.readouterr()
assert "Conda-managed packages were found" in err
assert "UserWarning" not in err # printed, not from python warning
@pytest.mark.skipif(on_win, reason='SIGINT terminates the tests on Windows')
@pytest.mark.xfail(on_p2, reason='Relaxing python 2 tests on CI')
def test_keyboard_interrupt(capsys, tmpdir):
def interrupt():
time.sleep(0.2)
os.kill(os.getpid(), signal.SIGINT)
interrupter = Thread(target=interrupt)
out_path = os.path.join(str(tmpdir), 'py36.tar')
try:
with pytest.raises(SystemExit) as exc:
interrupter.start()
main(["-p", py36_path, "-o", out_path])
except KeyboardInterrupt:
assert False, "Should have been caught by the CLI"
assert exc.value.code == 1
out, err = capsys.readouterr()
assert err == 'Interrupted\n'
assert not os.path.exists(out_path)
|
send_newsletter_continuous.py
|
"""Command for sending the newsletter"""
from threading import Thread
import signal
import sys
from django.conf import settings
from django.utils.translation import activate
from django.core import signals
from django.core.management.base import NoArgsCommand
from emencia.django.newsletter.mailer import SMTPMailer
from emencia.django.newsletter.models import SMTPServer
class Command(NoArgsCommand):
"""Send the newsletter in queue"""
help = 'Send the newsletter in queue'
def handle_noargs(self, **options):
verbose = int(options['verbosity'])
if verbose:
print 'Starting sending newsletters...'
activate(settings.LANGUAGE_CODE)
senders = SMTPServer.objects.all()
workers = []
for sender in senders:
worker = SMTPMailer(sender, verbose=verbose)
thread = Thread(target=worker.run, name=sender.name)
workers.append((worker, thread))
handler = term_handler(workers)
for s in [signal.SIGTERM, signal.SIGINT]:
signal.signal(s, handler)
# first close current connection
signals.request_finished.send(sender=self.__class__)
for worker, thread in workers:
thread.start()
signal.pause() # wait for sigterm
for worker, thread in workers:
if thread.is_alive():
thread.join()
sys.exit(0)
def term_handler(workers):
def handler(signum, frame):
for worker, thread in workers:
worker.stop_event.set()
return handler
|
btlejack.py
|
from threading import Lock
from queue import Queue
import time
from serial.tools.list_ports import comports
from serial import Serial,SerialException
from mirage.libs.ble_utils.constants import *
from mirage.libs.ble_utils.scapy_btlejack_layers import *
from mirage.libs import io,utils,wireless
class BTLEJackDevice(wireless.Device):
'''
This device allows to communicate with a BTLEJack Device in order to sniff Bluetooth Low Energy protocol.
The corresponding interfaces are : ``microbitX`` (e.g. "microbit0")
The following capabilities are actually supported :
+-----------------------------------+----------------+
| Capability | Available ? |
+===================================+================+
| SCANNING | yes |
+-----------------------------------+----------------+
| ADVERTISING | no |
+-----------------------------------+----------------+
| SNIFFING_ADVERTISEMENTS | yes |
+-----------------------------------+----------------+
| SNIFFING_NEW_CONNECTION | yes |
+-----------------------------------+----------------+
| SNIFFING_EXISTING_CONNECTION | yes |
+-----------------------------------+----------------+
| JAMMING_CONNECTIONS | yes |
+-----------------------------------+----------------+
| JAMMING_ADVERTISEMENTS | yes |
+-----------------------------------+----------------+
| HIJACKING_CONNECTIONS | yes |
+-----------------------------------+----------------+
| INITIATING_CONNECTION | no |
+-----------------------------------+----------------+
| RECEIVING_CONNECTION | no |
+-----------------------------------+----------------+
| COMMUNICATING_AS_MASTER | yes |
+-----------------------------------+----------------+
| COMMUNICATING_AS_SLAVE | no |
+-----------------------------------+----------------+
| HCI_MONITORING | no |
+-----------------------------------+----------------+
'''
sharedMethods = [
"getFirmwareVersion",
"getDeviceIndex",
"setCRCChecking",
"setChannel",
"getChannel",
"sniffNewConnections",
"sniffExistingConnections",
"sniffAdvertisements",
"jamAdvertisements",
"disableAdvertisementsJamming",
"setSweepingMode",
"setScan",
"setScanInterval",
"getConnections",
"switchConnection",
"getCurrentConnection",
"isConnected",
"isSynchronized",
"getCurrentHandle",
"getAccessAddress",
"getCrcInit",
"getChannelMap",
"getHopInterval",
"getHopIncrement",
"setJamming",
"setHijacking"
]
def setJamming(self,enable=True):
'''
This method allows to enable or disable the jamming mode.
:param enable: boolean indicating if the jamming mode must be enabled or disabled
:type enable: bool
:Example:
>>> device.setJamming(enable=True) # jamming mode enabled
>>> device.setJamming(enable=False) # jamming mode disabled
.. note::
This method is a **shared method** and can be called from the corresponding Emitters / Receivers.
'''
self.jamming = enable
def setHijacking(self,enable=True):
'''
This method allows to enable or disable the hijacking mode.
:param enable: boolean indicating if the hijacking mode must be enabled or disabled
:type enable: bool
:Example:
>>> device.setHijacking(enable=True) # hijacking mode enabled
>>> device.setHijacking(enable=False) # hijacking mode disabled
.. note::
This method is a **shared method** and can be called from the corresponding Emitters / Receivers.
'''
self.hijacking = enable
def getCurrentHandle(self):
'''
This method returns the connection Handle actually in use.
If no connection is established, its value is equal to -1.
:return: connection Handle
:rtype: int
.. warning::
This method always returns 1, it allows to provides the same API as the HCI Device.
.. note::
This method is a **shared method** and can be called from the corresponding Emitters / Receivers.
'''
return 1
def getConnections(self):
'''
This method returns a list of couple (connection handle / address) representing the connections actually established.
A connection is described by a dictionary containing an handle and an access address : ``{"handle":1, "address":"0x12345678"}``
:return: list of connections established
:rtype: list of dict
:Example:
>>> device.getConnections()
[{'handle':1, 'address':'0x12345678'}]
.. warning::
The connection handle is always 1, it allows to provides the same API as the HCI Device.
.. note::
This method is a **shared method** and can be called from the corresponding Emitters / Receivers.
'''
return [{"address":"0x{:08x}".format(self.accessAddress),"handle":1}]
def getCurrentConnection(self):
'''
This method returns the access address associated to the current connection. If no connection is established, it returns None.
:return: access address of the current connection
:rtype: str
:Example:
>>> device.getCurrentConnection()
'0x12345678'
.. note::
This method is a **shared method** and can be called from the corresponding Emitters / Receivers.
'''
return "0x{:08x}".format(self.accessAddress)
def switchConnection(self,address):
'''
This method is provided in order to provide the same API as an HCI Device, it actually has no effects.
.. note::
This method is a **shared method** and can be called from the corresponding Emitters / Receivers.
'''
io.fail("Switching connection not allowed with BTLEJack Device !")
def isConnected(self):
'''
This method returns a boolean indicating if a connection is actually established and hijacked.
:return: boolean indicating if a connection is established and hijacked
:rtype: bool
:Example:
>>> device.isConnected()
True
.. note::
This method is a **shared method** and can be called from the corresponding Emitters / Receivers.
'''
return self.hijacked
def isSynchronized(self):
'''
This method indicates if the sniffer is actually synchronized with a connection.
:return: boolean indicating if the sniffer is synchronized
:rtype: bool
:Example:
>>> device.isSynchronized()
True
.. note::
This method is a **shared method** and can be called from the corresponding Emitters / Receivers.
'''
return self.synchronized
@classmethod
def findMicrobits(cls,index=None):
'''
This class method allows to find a specific BTLEJack device, by providing the device's index.
If no index is provided, it returns a list of every devices found.
If no device has been found, None is returned.
:param index: device's index
:type index: int
:return: string indicating the device
:rtype: str
:Example:
>>> BTLEJackDevice.findMicrobits(0)
'/dev/ttyACM0'
>>> BTLEJackDevice.findMicrobits()
['/dev/ttyACM0','/dev/ttyACM1']
'''
microbitList = [i[0] for i in comports() if
(isinstance(i,tuple) and "VID:PID=0d28:0204" in port[-1]) or
(i.vid == 0x0D28 and i.pid == 0x0204)
]
if index is None:
return microbitList
else:
try:
microbit = microbitList[index]
except IndexError:
return None
return microbit
return None
def __init__(self,interface):
super().__init__(interface=interface)
customPort = None
if "microbit" == interface:
self.index = 0
self.interface = "microbit0"
elif "microbit" == interface[:8]:
if ":" in interface:
fields = interface.split(":")
customPort = fields[1]
self.index = customPort
else:
self.index = int(interface.split("microbit")[1])
self.interface = interface
if not customPort:
self.microbit = BTLEJackDevice.findMicrobits(self.index)
else:
self.microbit = customPort
if self.microbit is not None:
try:
self.microbit = Serial(port = self.microbit, baudrate=115200, timeout=0)
self.ready = False
self._flush()
except SerialException:
io.fail("Serial communication not ready !")
self.ready = False
self.microbit = None
else:
io.fail("No btlejack device found !")
self.ready = False
def _enterListening(self):
self.isListening = True
def _exitListening(self):
self.isListening = False
def _isListening(self):
return self.isListening
def _cancelFollow(self): # TODO
pass
def setChannel(self,channel=37):
'''
This method changes the channel actually in use by the provided channel.
:param channel: new channel
:type channel: int
:Example:
>>> device.getChannel()
37
>>> device.setChannel(channel=38)
>>> device.getChannel()
38
.. note::
This method is a **shared method** and can be called from the corresponding Emitters / Receivers.
'''
self.channel = channel
def getChannel(self):
'''
This method returns the channel actually in use.
:return: channel in use
:rtype: int
:Example:
>>> device.getChannel()
37
>>> device.setChannel(channel=38)
>>> device.getChannel()
38
.. note::
This method is a **shared method** and can be called from the corresponding Emitters / Receivers.
'''
return self.channel
def _flush(self):
while self.microbit.in_waiting:
self.microbit.read()
def _flushCommandResponses(self):
while not self.commandResponses.empty():
self.commandResponses.get()
def _internalCommand(self,cmd,noResponse=False):
packet = BTLEJack_Hdr()/cmd
self._flushCommandResponses()
def getFunction():
if not self._isListening() or self.commandResponses.empty():
func = self._recv
else:
func = self.commandResponses.get
return func
self._send(packet)
if not noResponse:
getResponse = getFunction()
response = getResponse()
while response is None or response.packet_type == 4 or response.opcode != packet.opcode:
getResponse = getFunction()
response = getResponse()
return response
def _getFirmwareVersion(self):
pkt = self._internalCommand(BTLEJack_Version_Command())
return (pkt.major,pkt.minor)
def _reset(self):
self._internalCommand(BTLEJack_Reset_Command())
def getFirmwareVersion(self):
'''
This method returns the firmware version of the current BTLEJack device.
:return: firmware version as a tuple of (major, minor)
:rtype: tuple of (int,int)
:Example:
>>> device.getFirmwareVersion()
(3,14)
.. note::
This method is a **shared method** and can be called from the corresponding Emitters / Receivers.
'''
version = self._getFirmwareVersion()
return version
def getDeviceIndex(self):
'''
This method returns the index of the current BTLEJack device.
:return: device's index
:rtype: int
:Example:
>>> device.getDeviceIndex()
0
.. note::
This method is a **shared method** and can be called from the corresponding Emitters / Receivers.
'''
return self.index
def _send(self,packet):
self.lock.acquire()
self.microbit.write(raw(packet))
self.lock.release()
def send(self,packet):
command = None
if BTLE_DATA in packet:
command = BTLEJack_Hdr()/BTLEJack_Send_Packet_Command(ble_payload=packet[BTLE_DATA:])
if self.isConnected() and CtrlPDU in command.ble_payload and command.ble_payload.optcode == 0x02:
self.hijacked = False
if command is not None :
self._send(raw(command))
# New Connection Sniffing methods
def sniffNewConnections(self,address="FF:FF:FF:FF:FF:FF",channel=None):
'''
This method starts the new connections sniffing mode.
:param address: selected address - if not provided, no filter is applied (format : "1A:2B:3C:4D:5E:6F")
:type address: str
:param channel: selected channel - if not provided, channel 37 is selected
:type channel: int
:Example:
>>> device.sniffNewConnections()
>>> device.sniffNewConnections(channel=38)
>>> device.sniffNewConnections(address="1A:2B:3C:4D:5E:6F")
.. note::
This method is a **shared method** and can be called from the corresponding Emitters / Receivers.
'''
self.synchronized = False
self.hijacked = False
self.sniffingMode = BLESniffingMode.NEW_CONNECTION
self.lastTarget = address
self._sniffConnectionRequests(address=address,channel=channel)
def _sniffConnectionRequests(self,address='FF:FF:FF:FF:FF:FF',channel=None):
if channel is not None and not self.sweepingMode:
self.setChannel(channel)
self._internalCommand(BTLEJack_Sniff_Connection_Request_Command(address=address,channel=self.getChannel() if
channel is None else channel))
# Existing Connection Sniffing methods
def sniffExistingConnections(self,accessAddress=None,crcInit=None,channelMap=None):
'''
This method starts the existing connections sniffing mode.
:param accessAddress: selected Access Address - if not provided, the parameter is recovered
:type address: int
:param crcInit: selected CRCInit - if not provided, the parameter is recovered
:type crcInit: int
:param channelMap: selected Channel Map - if not provided, the parameter is recovered
:type channelMap: int
:Example:
>>> device.sniffExistingConnections()
>>> device.sniffExistingConnections(accessAddress=0xe5e296e9)
>>> device.sniffExistingConnections(accessAddress=0xe5e296e9, crcInit=0x0bd54a)
>>> device.sniffExistingConnections(accessAddress=0xe5e296e9, crcInit=0x0bd54a, channelMap=0x1fffffffff)
.. warning::
If no access address is provided, BTLEJack tries to get multiple candidate access addresses and select the most probable address.
.. note::
This method is a **shared method** and can be called from the corresponding Emitters / Receivers.
'''
self.hijacked = False
self.synchronized = False
self.sniffingMode = BLESniffingMode.EXISTING_CONNECTION
if accessAddress is None:
self._listAccessAddress()
else:
self._setAccessAddress(accessAddress)
if crcInit is None:
self._recoverFromAccessAddress(accessAddress)
else:
self._setCrcInit(crcInit)
if channelMap is None:
self._recoverFromCrcInit(accessAddress,crcInit)
else:
self._setChannelMap(channelMap)
self._recoverFromChannelMap(accessAddress,crcInit, channelMap)
def _resetFilteringPolicy(self,policyType="blacklist"):
policy = 0x00 if policyType == "blacklist" else 0x01
self._internalCommand(BTLEJack_Advertisements_Command()/BTLEJack_Advertisements_Reset_Policy_Command(policy_type=policy))
def _addFilteringRule(self,pattern=b"",mask=None,position=None):
if position is None:
position = 0xFF
if mask is None:
mask = len(pattern) * b"\xFF"
self._internalCommand(BTLEJack_Advertisements_Command()/BTLEJack_Advertisements_Add_Rule_Command()/BTLEJack_Filtering_Rule(data=pattern,mask=mask,position=position))
def sniffAdvertisements(self,address='FF:FF:FF:FF:FF:FF',channel=None):
'''
This method starts the advertisement sniffing mode.
:param address: selected address - if not provided, no filter is applied (format : "1A:2B:3C:4D:5E:6F")
:type address: str
:param channel: selected channel - if not provided, channel 37 is selected
:type channel: int
:Example:
>>> device.sniffAdvertisements()
>>> device.sniffAdvertisements(channel=38)
>>> device.sniffAdvertisements(address="1A:2B:3C:4D:5E:6F")
.. warning::
This method requires the custom Mirage Firmware in order to sniff advertisements.
.. note::
This method is a **shared method** and can be called from the corresponding Emitters / Receivers.
'''
if self.customMirageFirmware:
self.sniffingMode = BLESniffingMode.ADVERTISEMENT
self.synchronized = False
self.hijacked = False
if channel is not None and not self.sweepingMode:
self.setChannel(channel)
if address.upper() == "FF:FF:FF:FF:FF:FF":
self._resetFilteringPolicy("blacklist")
else:
self._resetFilteringPolicy("whitelist")
target = bytes.fromhex(address.replace(":",""))[::-1]
self._addFilteringRule(pattern=target,position=2)
self._internalCommand(BTLEJack_Advertisements_Command()/BTLEJack_Advertisements_Disable_Sniff_Command())
self._internalCommand(BTLEJack_Advertisements_Command()/BTLEJack_Advertisements_Enable_Sniff_Command(channel=self.getChannel() if channel is None else channel))
else:
io.fail("Sniffing advertisements is not supported by BTLEJack firmware,"
" a Custom Mirage Firmware is available.")
def jamAdvertisements(self,pattern=b"",offset=0,channel=37):
'''
This method reactively jams advertisements according to the specified pattern, offset and channel provided.
:param pattern: pattern contained in payload indicating that the packet must be jammed
:type pattern: bytes
:param offset: offset indicating the position of pattern in the payload
:type offset: int
:param channel: selected channel - if not provided, channel 37 is selected
:type channel: int
:Example:
>>> target = "1A:2B:3C:4D:5E:6F"
>>> pattern = bytes.fromhex(target.replace(":",""))[::-1]
>>> device.jamAdvertisements(pattern=pattern,offset=2,channel=39) # jam the advertisements transmitted by 1A:2B:3C:4D:5E:6F on channel 39
.. warning::
This method requires the custom Mirage Firmware in order to jam advertisements.
.. note::
This method is a **shared method** and can be called from the corresponding Emitters / Receivers.
'''
if self.customMirageFirmware:
self.synchronized = False
self.hijacked = False
self.jammingEnabled = True
if channel is not None:
self.setChannel(channel)
self._internalCommand(BTLEJack_Advertisements_Command()/BTLEJack_Advertisements_Enable_Jamming_Command(
offset=offset,
pattern=pattern,
channel=self.getChannel() if
channel is None else channel))
else:
io.fail("Jamming advertisements is not supported by BTLEJack firmware,"
" a Custom Mirage Firmware is available.")
def _listAccessAddress(self):
io.info("Recovering access address ...")
self._internalCommand(BTLEJack_Scan_Connections_Command())
def _setAccessAddress(self,accessAddress=None):
self.accessAddress = accessAddress
def _setCrcInit(self,crcInit=None):
self.crcInit = crcInit
def _setChannelMap(self,channelMap=None):
self.channelMap = channelMap
def _setHopInterval(self,hopInterval=None):
self.hopInterval = hopInterval
def _getHopInterval(self):
return self.hopInterval
def _setHopIncrement(self,hopIncrement):
self.hopIncrement = hopIncrement
def _getHopIncrement(self):
return self.hopIncrement
def _getChannelMap(self):
return self.channelMap
def _getAccessAddress(self):
return self.accessAddress
def _getCrcInit(self):
return self.crcInit
def _updateCrcInit(self,crcInit=None):
io.success("CRCInit successfully recovered : "+"0x{:06x}".format(crcInit))
self._setCrcInit(crcInit)
self._recoverFromCrcInit()
def _updateChannelMap(self,channelMap=None):
io.success("Channel Map successfully recovered : "+"0x{:10x}".format(channelMap))
self._setChannelMap(channelMap)
self._recoverFromChannelMap()
def _updateHopInterval(self,hopInterval=None):
io.success("Hop Interval successfully recovered : "+str(hopInterval))
self._setHopInterval(hopInterval)
io.info("Recovering Hop Increment ...")
def _updateHopIncrement(self,hopIncrement=None):
io.success("Hop Increment successfully recovered : "+str(hopIncrement))
self._setHopIncrement(hopIncrement)
io.info("All parameters recovered, following connection ...")
def _recoverFromAccessAddress(self,accessAddress):
aa = accessAddress if accessAddress is not None else self._getAccessAddress()
io.info("Recovering CRCInit ...")
self._reset()
pkt = self._internalCommand(BTLEJack_Recover_Command()/BTLEJack_Recover_Connection_AA_Command(access_address=aa))
def _recoverFromCrcInit(self,accessAddress = None,crcInit = None):
aa = accessAddress if accessAddress is not None else self._getAccessAddress()
crcInit = crcInit if crcInit is not None else self._getCrcInit()
io.info("Recovering ChannelMap ...")
self._reset()
pkt = self._internalCommand(BTLEJack_Recover_Command()/BTLEJack_Recover_Channel_Map_Command(access_address=aa,crc_init=crcInit))
io.progress(0, total=36,suffix="0/36 channels")
def _recoverFromChannelMap(self,accessAddress = None,crcInit = None,channelMap=None):
aa = accessAddress if accessAddress is not None else self._getAccessAddress()
crcInit = crcInit if crcInit is not None else self._getCrcInit()
channelMap = channelMap if channelMap is not None else self._getChannelMap()
io.info("Recovering Hop Interval ...")
self._reset()
pkt = self._internalCommand(BTLEJack_Recover_Command()/BTLEJack_Recover_Hopping_Parameters_Command(access_address=aa,crc_init=crcInit,channel_map=channelMap))
def _addCandidateAccessAddress(self,accessAddress=None,rssi=None,channel=None):
io.info("Candidate access address found : "+"0x{:08x}".format(accessAddress)+" (rssi = -"+str(rssi)+"dBm / channel = "+str(channel)+")")
if accessAddress not in self.candidateAccessAddresses:
self.candidateAccessAddresses[accessAddress] = {"hits":1,"rssi":rssi,"channels":set([channel])}
else:
self.candidateAccessAddresses[accessAddress]["hits"] += 1
self.candidateAccessAddresses[accessAddress]["channels"].add(channel)
if self.candidateAccessAddresses[accessAddress]["hits"] >= 5:
io.success("Access Address selected : "+"0x{:08x}".format(accessAddress))
self._setAccessAddress(accessAddress)
self._recoverFromAccessAddress(accessAddress=accessAddress)
def _recv(self):
self.lock.acquire()
if self.microbit is not None and self.microbit.in_waiting:
self.receptionBuffer += self.microbit.read()
self.lock.release()
if len(self.receptionBuffer) > 0:
try:
start = self.receptionBuffer.index(0xBC)
self.receptionBuffer = self.receptionBuffer[start:]
except ValueError:
self.receptionBuffer = b""
if len(self.receptionBuffer) >= 4:
size = struct.unpack('<H',self.receptionBuffer[2:4])[0]
if len(self.receptionBuffer) >= size + 5:
#print(self.receptionBuffer[:size+5].hex())
pkt = BTLEJack_Hdr(self.receptionBuffer[:size+5])
self.receptionBuffer = self.receptionBuffer[size+5:]
return pkt
else:
receptionBuffer = b""
return None
def disableAdvertisementsJamming(self):
if self.jammingEnabled:
self._internalCommand(BTLEJack_Advertisements_Command()/BTLEJack_Advertisements_Disable_Jamming_Command())
def close(self):
self.lock.acquire()
self._stopSweepingThread()
self.microbit.close()
self.microbit = None
self.lock.release()
def isUp(self):
return self.microbit is not None
def setCRCChecking(self,enable=True):
'''
This method enables CRC Checking.
:param enable: boolean indicating if CRC Checking must be enabled
:type enable: bool
:Example:
>>> device.setCRCChecking(enable=True) # CRC Checking enabled
>>> device.setCRCChecking(enable=False) # CRC Checking disabled
.. warning::
BTLEJack calculates the CRC directly in the firmware, so this command is ignored. It is present in order to provide a similar API to Ubertooth.
.. note::
This method is a **shared method** and can be called from the corresponding Emitters / Receivers.
'''
self.crcEnabled = enable
def setAccessAddress(self,accessAddress):
'''
This method sets the access address to use.
:param accessAddress: new access address
:type accessAddress: int
:Example:
>>> device.setAccessAddress(0xe5e296e9)
.. note::
This method is a **shared method** and can be called from the corresponding Emitters / Receivers.
'''
self.accessAddress = accessAddress
def getAccessAddress(self):
'''
This method returns the access address actually in use.
:return: access address
:rtype: int
:Example:
>>> hex(device.getAccessAddress())
'0xe5e296e9'
.. note::
This method is a **shared method** and can be called from the corresponding Emitters / Receivers.
'''
return self.accessAddress
def getCrcInit(self):
'''
This method returns the CRCInit actually in use.
:return: CRCInit
:rtype: int
:Example:
>>> hex(device.getCrcInit())
'0x0bd54a'
.. note::
This method is a **shared method** and can be called from the corresponding Emitters / Receivers.
'''
return self.crcInit
def getChannelMap(self):
'''
This method returns the Channel Map actually in use.
:return: Channel Map
:rtype: int
:Example:
>>> hex(device.getChannelMap())
'0x1fffffffff'
.. note::
This method is a **shared method** and can be called from the corresponding Emitters / Receivers.
'''
return self.channelMap
def getHopInterval(self):
'''
This method returns the Hop Interval actually in use.
:return: Hop Interval
:rtype: int
:Example:
>>> device.getHopInterval()
36
.. note::
This method is a **shared method** and can be called from the corresponding Emitters / Receivers.
'''
return self.hopInterval
def getHopIncrement(self):
'''
This method returns the Hop Increment actually in use.
:return: Hop Increment
:rtype: int
:Example:
>>> device.getHopIncrement()
11
.. note::
This method is a **shared method** and can be called from the corresponding Emitters / Receivers.
'''
return self.hopIncrement
def restartSniffingMode(self):
'''
This method restarts the sniffing mode.
:Example:
>>> device.restartSniffingMode()
.. note::
This method is a **shared method** and can be called from the corresponding Emitters / Receivers.
'''
if self.sniffingMode == BLESniffingMode.NEW_CONNECTION:
self.sniffNewConnections()
else:
self.sniffExistingConnections()
def recv(self):
self._enterListening()
pkt = self._recv()
self._exitListening()
if pkt is not None:
if self.customMirageFirmware and BTLEJack_Advertisement_Packet_Notification in pkt:
timestamp = time.time()
ts_sec = int(timestamp)
ts_usec = int((timestamp - ts_sec)*1000000)
'''
if pkt.crc_ok == 0x01:
io.success("CRC OK !")
else:
io.fail("CRC not OK !")
'''
if pkt.crc_ok != 0x01 and self.crcEnabled:
return None
return BTLE_PPI(
btle_channel=pkt.channel,
btle_clkn_high=ts_sec,
btle_clk_100ns=ts_usec,
rssi_max=-pkt.rssi,
rssi_min=-pkt.rssi,
rssi_avg=-pkt.rssi,
rssi_count=1)/BTLE()/BTLE_ADV(pkt.ble_payload)
if BTLEJack_Access_Address_Notification in pkt:
self._addCandidateAccessAddress(accessAddress=pkt.access_address,
rssi=pkt.rssi,
channel=pkt.channel)
if BTLEJack_CRCInit_Notification in pkt:
self._updateCrcInit(crcInit=pkt.crc_init)
if BTLEJack_Channel_Map_Notification in pkt:
self._updateChannelMap(channelMap=pkt.channel_map)
if BTLEJack_Verbose_Response in pkt and b"c=" in pkt.message:
currentChannel = pkt.message.decode('ascii').split("c=")[1]
io.progress(int(currentChannel), total=36,suffix=str(currentChannel)+"/36 channels")
if BTLEJack_Verbose_Response in pkt and b"ADV_JAMMED" in pkt.message:
io.info("Advertisement jammed on channel #"+str(self.getChannel()))
if BTLEJack_Verbose_Response in pkt:
io.info(pkt.message.decode('ascii'))
if BTLEJack_Hop_Interval_Notification in pkt:
self._updateHopInterval(pkt.hop_interval)
if BTLEJack_Hop_Increment_Notification in pkt:
self._updateHopIncrement(pkt.hop_increment)
if self.hijacking:
self._internalCommand(BTLEJack_Enable_Hijacking_Command(enabled=0x01))
elif self.jamming:
self._internalCommand(BTLEJack_Enable_Jamming_Command(enabled=0x01))
self.synchronized = True
if BTLEJack_Hijack_Status_Notification in pkt:
self.hijacked = (pkt.status == 0x00)
if BTLEJack_Nordic_Tap_Packet_Notification in pkt:
timestamp = time.time()
ts_sec = int(timestamp)
ts_usec = int((timestamp - ts_sec)*1000000)
return BTLE_PPI(
btle_channel=pkt.channel,
btle_clkn_high=ts_sec,
btle_clk_100ns=ts_usec,
rssi_max=pkt.rssi,
rssi_min=pkt.rssi,
rssi_avg=pkt.rssi,
rssi_count=1)/BTLE(access_addr=self.getAccessAddress())/pkt.ble_payload
elif BTLEJack_Connection_Request_Notification in pkt:
self._setAccessAddress(struct.unpack(">I",struct.pack("<I",pkt.ble_payload.AA))[0])
self._setCrcInit(struct.unpack(">I",b"\x00" + struct.pack('<I',pkt.ble_payload.crc_init)[:3])[0])
self._setChannelMap(pkt.ble_payload.chM)
self._setHopInterval(pkt.ble_payload.interval)
self._setHopIncrement(pkt.ble_payload.hop)
self.synchronized = True
timestamp = time.time()
ts_sec = int(timestamp)
ts_usec = int((timestamp - ts_sec)*1000000)
return BTLE_PPI(
btle_channel=self.channel,
btle_clkn_high=ts_sec,
btle_clk_100ns=ts_usec,
rssi_max=0,
rssi_min=0,
rssi_avg=0,
rssi_count=1)/BTLE()/BTLE_ADV(RxAdd=pkt.RxAdd,TxAdd=pkt.TxAdd,RFU=pkt.RFU, PDU_type=pkt.PDU_type)/pkt.ble_payload
elif BTLEJack_Connection_Lost_Notification in pkt or pkt.packet_type==0x4 and pkt.notification_type==0x9:
io.fail("Connection lost !")
self._reset()
self.restartSniffingMode()
self._setAccessAddress(None)
self._setCrcInit(None)
self._setChannelMap(None)
self._setHopInterval(None)
self._setHopIncrement(None)
self.hijacked = False
self.synchronized = False
else:
self.commandResponses.put(pkt)
else:
utils.wait(seconds=0.0001)
def setScanInterval(self,seconds=1):
'''
This method allows to provide the scan interval (in second).
:param seconds: number of seconds to wait between two channels
:type seconds: float
:Example:
>>> device.setScanInterval(seconds=1)
.. note::
This method is a **shared method** and can be called from the corresponding Emitters / Receivers.
'''
self.scanInterval = seconds
def _scanThread(self):
self.sniffAdvertisements(channel=37)
utils.wait(seconds=self.scanInterval)
self.sniffAdvertisements(channel=38)
utils.wait(seconds=self.scanInterval)
self.sniffAdvertisements(channel=39)
utils.wait(seconds=self.scanInterval)
def setScan(self,enable=True):
'''
This method enables or disables the scanning mode. It allows to change the channel according to the scan interval parameter.
:param enable: boolean indicating if the scanning mode must be enabled
:type enable: bool
:Example:
>>> device.setScan(enable=True) # scanning mode enabled
>>> device.setScan(enable=False) # scanning mode disabled
.. note::
This method is a **shared method** and can be called from the corresponding Emitters / Receivers.
'''
if enable:
self.sniffAdvertisements()
if self.scanThreadInstance is None:
self.scanThreadInstance = wireless.StoppableThread(target=self._scanThread)
self.scanThreadInstance.start()
else:
self.scanThreadInstance.stop()
self.scanThreadInstance = None
def _sweepingThread(self):
for channel in self.sweepingSequence:
self.setChannel(channel=channel)
if self.sniffingMode is not None:
if self.sniffingMode == BLESniffingMode.ADVERTISEMENT:
self._internalCommand(BTLEJack_Advertisements_Command()/BTLEJack_Advertisements_Enable_Sniff_Command(channel=channel),noResponse=True)
elif self.sniffingMode == BLESniffingMode.NEW_CONNECTION and not self.synchronized:
self._sniffConnectionRequests(address=self.lastTarget,channel=channel)
utils.wait(seconds=0.1)
def _startSweepingThread(self):
self._stopSweepingThread()
self.sweepingThreadInstance = wireless.StoppableThread(target=self._sweepingThread)
self.sweepingThreadInstance.start()
def _stopSweepingThread(self):
if self.sweepingThreadInstance is not None:
self.sweepingThreadInstance.stop()
self.sweepingThreadInstance = None
def setSweepingMode(self,enable=True,sequence=[37,38,39]):
'''
This method allows to enable or disable the Sweeping mode. It allows to provide a subset of advertising channels to monitor sequentially.
:param enable: boolean indicating if the Sweeping mode is enabled.
:type enable: bool
:param sequence: sequence of channels to use
:type sequence: list of int
.. note::
This method is a **shared method** and can be called from the corresponding Emitters / Receivers.
'''
self.sweepingMode = enable
if enable:
self.sweepingSequence = sequence
self._startSweepingThread()
else:
self._stopSweepingThread()
def init(self):
if self.microbit is not None:
self._flush()
self.setCRCChecking(True)
self.scanThreadInstance = None
self.isListening = False
self.hijacking = False
self.jamming = False
self.customMirageFirmware = False
self.receptionBuffer = b""
self.lock = Lock()
self.commandResponses = Queue()
self.channel = 37
self.accessAddress = None
self.crcInit = None
self.channelMap = None
self.hopInterval = None
self.hopIncrement = None
self.sniffingMode = None
self.hijacked = False
self.synchronized = False
self.jammingEnabled = True
self.sweepingMode = False
self.sweepingSequence = []
self.sweepingThreadInstance = None
self.lastTarget = "FF:FF:FF:FF:FF:FF"
self.setScanInterval()
self.candidateAccessAddresses = {}
self.capabilities = ["SNIFFING_EXISTING_CONNECTION", "SNIFFING_NEW_CONNECTION", "HIJACKING_CONNECTIONS", "JAMMING_CONNECTIONS", "COMMUNICATING_AS_MASTER"]
try:
(major,minor) = self._getFirmwareVersion()
io.success("BTLEJack device "+("#"+str(self.index) if isinstance(self.index,int) else str(self.index))+
" successfully instantiated (firmware version : "+str(major)+"."+str(minor)+")")
if major == 3 and minor == 14:
io.info("Custom Mirage Firmware used ! Advertisements sniffing and jamming will be supported.")
self.capabilities += ["SNIFFING_ADVERTISEMENTS","SCANNING","JAMMING_ADVERTISEMENTS"]
self.customMirageFirmware = True
self._reset()
self.ready = True
except:
self.microbit = None
self.ready = False
|
measure_methods.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,too-many-function-args,too-many-nested-blocks
"""
Functions that run on executor for measurement.
These functions are responsible for building the tvm module, uploading it to
remote devices, recording the running time costs, and checking the correctness of the output.
"""
import logging
import shutil
import os
import threading
import time
from random import getrandbits
from collections import namedtuple
import tempfile
import numpy as np
import tvm._ffi
from tvm import nd, rpc as _rpc, target as _target
from tvm.error import TVMError
from tvm.target import build_config
from tvm.driver import build
from tvm.contrib import nvcc, ndk, tar
from ..util import get_const_tuple
from ..env import AutotvmGlobalScope
from ..task.space import InstantiationError
from .measure import MeasureResult, MeasureErrorNo, Builder, Runner
from .local_executor import LocalExecutor
logger = logging.getLogger('autotvm')
class BuildResult(namedtuple("BuildResult", ('filename', 'arg_info', 'error', 'time_cost'))):
"""
Stores all the necessary inputs for a measurement.
Parameters
----------
filename : str
The filename of generated library
arg_info : Tuple
The shape and dtype information of tvm tensor arguments
error : Exception
The error happens during compilation.
time_cost : float
The time cost of building
"""
class LocalBuilder(Builder):
"""Run compilation on local machine
Parameters
----------
timeout: float
The timeout of a compilation
n_parallel: int
The number of tasks run in parallel. "None" will use all cpu cores
build_func: callable or str
If is 'default', use default build function
If is 'ndk', use function for android ndk
If is callable, use it as custom build function, expect lib_format field.
"""
def __init__(self, timeout=10, n_parallel=None, build_func='default'):
super(LocalBuilder, self).__init__(timeout, n_parallel)
if isinstance(build_func, str):
if build_func == 'default':
build_func = tar.tar
elif build_func == 'ndk':
build_func = ndk.create_shared
else:
raise ValueError("Invalid build_func" + build_func)
self.build_func = _wrap_build_func(build_func)
self.executor = LocalExecutor(timeout=timeout)
self.tmp_dir = tempfile.mkdtemp()
def build(self, measure_inputs):
results = []
shutil.rmtree(self.tmp_dir, ignore_errors=True)
self.tmp_dir = tempfile.mkdtemp()
for i in range(0, len(measure_inputs), self.n_parallel):
futures = []
for inp in measure_inputs[i:i + self.n_parallel]:
ret = self.executor.submit(self.build_func,
inp,
self.tmp_dir,
**self.build_kwargs)
futures.append(ret)
for future in futures:
res = future.get()
if isinstance(res, Exception):
# timeout or fleet error, return MeasureResult directly
results.append(MeasureResult((res,), MeasureErrorNo.BUILD_TIMEOUT,
self.timeout, time.time()))
elif res.error is not None:
# instantiation error
if isinstance(res.error, InstantiationError):
results.append(MeasureResult((res.error,),
MeasureErrorNo.INSTANTIATION_ERROR,
res.time_cost, time.time()))
else:
if "InstantiationError" in str(res.error):
msg = str(res.error)
try:
msg = msg.split('\n')[-2].split(": ")[1]
except Exception: # pylint: disable=broad-except
pass
results.append(MeasureResult((InstantiationError(msg),),
MeasureErrorNo.INSTANTIATION_ERROR,
res.time_cost, time.time()))
else: # tvm error
results.append(MeasureResult((res.error,),
MeasureErrorNo.COMPILE_HOST,
res.time_cost, time.time()))
else:
# return BuildResult
results.append(res)
return results
class RPCRunner(Runner):
"""Run generated code on remove devices.
This function will ask a RPC Tracker to get device for measurement.
Parameters
----------
timeout: float
The timeout of a compilation
n_parallel: int
The number of tasks run in parallel. "None" will use all cpu cores
key: str
The key of the device registered in the tracker
host: str
The host address of RPC Tracker
port: int
The port of RPC Tracker
number: int
The number of times to run the generated code for taking average.
We call these runs as one `repeat` of measurement.
repeat : int, optional
The number of times to repeat the measurement.
In total, the generated code will be run (1 + number x repeat) times,
where the first "1" is warm up and will be discarded.
The returned result contains `repeat` costs,
each of which is an average of `number` costs.
min_repeat_ms: int, optional
The minimum duration of one `repeat` in milliseconds.
By default, one `repeat` contains `number` runs. If this parameter is set,
the parameters `number` will be dynamically adjusted to meet the
minimum duration requirement of one `repeat`.
i.e., When the run time of one `repeat` falls below this time, the `number` parameter
will be automatically increased.
cooldown_interval: float, optional
The cool down interval between two measurements.
check_correctness: bool, optional
Whether check correctness after measurement. This will use llvm cpu target to
call your template and get the reference output.
This can work for TOPI templates, but may not work for your custom template.
"""
def __init__(self,
key, host, port, priority=1,
timeout=10, n_parallel=None,
number=4, repeat=3, min_repeat_ms=0, cooldown_interval=0.1,
check_correctness=False):
super(RPCRunner, self).__init__(timeout, n_parallel)
self.key = key
self.host = host
self.port = port
self.priority = priority
self.timeout = timeout
self.number = number
self.repeat = repeat
self.min_repeat_ms = min_repeat_ms
self.ref_input = None
self.ref_output = None
self.check_correctness = check_correctness
self.cooldown_interval = cooldown_interval
self.executor = LocalExecutor()
def set_task(self, task):
self.task = task
if check_remote(task.target, self.key, self.host, self.port, timeout=self.timeout):
logger.info("Get devices for measurement successfully!")
print(' | Connected Device!')
else:
raise RuntimeError("Cannot get remote devices from the tracker. "
"Please check the status of tracker by "
"'python -m tvm.exec.query_rpc_tracker --port [THE PORT YOU USE]' "
"and make sure you have free devices on the queue status.")
if self.check_correctness:
# use llvm cpu to generate a reference input/output
# this option works for tuning topi, but might not work for you custom op
with _target.create("llvm"):
s, arg_bufs = task.instantiate(task.config_space.get(0))
self.ref_input = [np.random.uniform(size=get_const_tuple(x.shape)).astype(x.dtype)
for x in arg_bufs]
func = build(s, arg_bufs, "llvm")
tvm_buf = [nd.array(x) for x in self.ref_input]
func(*tvm_buf)
self.ref_output = [x.asnumpy() for x in tvm_buf]
def get_build_kwargs(self):
kwargs = {}
if 'cuda' in self.task.target.keys or 'opencl' in self.task.target.keys or \
'rocm' in self.task.target.keys:
remote = request_remote(self.key, self.host, self.port)
ctx = remote.context(str(self.task.target), 0)
max_dims = ctx.max_thread_dimensions
kwargs['check_gpu'] = {
'max_shared_memory_per_block': ctx.max_shared_memory_per_block,
'max_threads_per_block': ctx.max_threads_per_block,
'max_thread_x': max_dims[0],
'max_thread_y': max_dims[1],
'max_thread_z': max_dims[2],
}
if 'cuda' in self.task.target.keys:
kwargs["cuda_arch"] = "sm_" + "".join(ctx.compute_version.split('.'))
if self.task.target.device_name == 'micro_dev':
kwargs.setdefault('build_option', {})['disable_vectorize'] = True
return kwargs
def run(self, measure_inputs, build_results):
results = []
remote_args = (self.key, self.host, self.port, self.priority, self.timeout)
for i in range(0, len(measure_inputs), self.n_parallel):
futures = []
for measure_inp, build_res in zip(measure_inputs[i:i+self.n_parallel],
build_results[i:i+self.n_parallel]):
ret = self.executor.submit(run_through_rpc,
measure_inp,
build_res,
self.number,
self.repeat,
self.min_repeat_ms,
self.cooldown_interval,
remote_args,
self.ref_input,
self.ref_output)
futures.append(ret)
for future in futures:
res = future.get()
if isinstance(res, Exception): # executor error or timeout
results.append(MeasureResult((str(res),), MeasureErrorNo.RUN_TIMEOUT,
self.timeout, time.time()))
raise Exception(f'encountered exception during measurement: {results}')
results.append(res)
return results
class LocalRunner(RPCRunner):
"""Run generated code on local devices.
Parameters
----------
timeout: float
The timeout of a compilation
number: int
The number of times to run the generated code for taking average.
We call these runs as one `repeat` of measurement.
repeat : int, optional
The number of times to repeat the measurement.
In total, the generated code will be run (1 + number x repeat) times,
where the first one is warm up and will be discarded.
The returned result contains `repeat` costs,
each of which is an average of `number` costs.
min_repeat_ms: int, optional
The minimum duration of one `repeat` in milliseconds.
By default, one `repeat` contains `number` runs. If this parameter is set,
the parameters `number` will be dynamically adjusted to meet the
minimum duration requirement of one `repeat`.
i.e., When the run time of one `repeat` falls below this time, the `number` parameter
will be automatically increased.
cooldown_interval: float, optional
The cool down interval between two measurements.
check_correctness: bool, optional
Whether check correctness after measurement. This will use llvm cpu target to
call your template and get the reference output.
This can work for TOPI templates, but may not work for your custom template.
Note
----
This is a "fake" local mode. We start a silent rpc tracker and rpc server
for the user. In this way we reuse timeout/isolation mechanism in RPC infrastructure.
"""
def __init__(self,
timeout=10,
number=4, repeat=3, min_repeat_ms=0, cooldown_interval=0.1,
check_correctness=False):
super(LocalRunner, self).__init__('', None, None, 0,
timeout=timeout, n_parallel=1,
number=number, repeat=repeat,
min_repeat_ms=min_repeat_ms,
cooldown_interval=cooldown_interval,
check_correctness=check_correctness)
self.tracker = None
self.server = None
def set_task(self, task):
# pylint: disable=import-outside-toplevel
from ...rpc.tracker import Tracker
from ...rpc.server import Server
self.task = task
tracker = Tracker('0.0.0.0', port=9000, port_end=10000, silent=True)
device_key = '$local$device$%d' % tracker.port
server = Server('0.0.0.0', port=9000, port_end=10000,
key=device_key,
use_popen=True, silent=True,
tracker_addr=(tracker.host, tracker.port))
self.key = device_key
self.host = tracker.host
self.port = tracker.port
super(LocalRunner, self).set_task(task)
return server, tracker
def _build_func_common(measure_input, check_gpu=None, cuda_arch=None, build_option=None):
"""Common part for building a configuration"""
target, task, config = measure_input
with target:
s, args = task.instantiate(config)
# check invalidity of template and code hash consistency
if not config.valid():
raise InstantiationError(config.errors)
opts = build_option or {}
if check_gpu: # Add verify pass to filter out invalid configs in advance.
opts["add_lower_pass"] = [(2, gpu_verify_pass(**check_gpu))]
if cuda_arch:
set_cuda_target_arch(cuda_arch)
# if target is vta, we need to use vta build
if hasattr(measure_input.target, 'device_name') and \
measure_input.target.device_name == 'vta':
# pylint: disable=import-outside-toplevel
import vta
func = vta.build(s, args, target_host=task.target_host)
else:
with build_config(**opts):
func = build(s, args, target_host=task.target_host)
return func, tuple((get_const_tuple(x.shape), x.dtype) for x in args)
def _wrap_build_func(build_func):
"""
Wrap build_func to a function that can be used in measure.
Parameters
----------
build_func : The compilation function
We expect fcompile to contain an attr "output_format"
Returns
-------
wrapped_build_func : function
The wrapped build function
"""
if not hasattr(build_func, "output_format"):
raise AttributeError("Expect build_func to have the attribute output_format.")
output_format = build_func.output_format
def _wrapped(measure_input, tmp_dir, **kwargs):
"""
Wrapped build func.
Parameters
----------
measure_input: MeasureInput
The input of measurement
tmp_dir: str
The path of temporary directory to export generated library
"""
tic = time.time()
try:
filename = os.path.join(tmp_dir, "tmp_func_%0x.%s" % (
getrandbits(64), output_format))
# TODO(tvm-team) consider linline _build_func_common
func, arg_info = _build_func_common(measure_input, **kwargs)
func.export_library(filename, build_func)
except Exception as e: # pylint: disable=broad-except
return BuildResult(None, None, e, time.time() - tic)
return BuildResult(filename, arg_info, None, time.time() - tic)
return _wrapped
def run_through_rpc(measure_input, build_result,
number, repeat, min_repeat_ms, cooldown_interval,
remote_args, ref_input=None, ref_output=None):
"""Run a generated library through rpc
Parameters
----------
measure_input: MeasureInput
The raw measure input
build_result: BuildResult
The result returned from Builder. This contains the path to the generated library.
number: int
The number of times to run the generated code for taking average.
We call these runs as one `repeat` of measurement.
repeat : int, optional
The number of times to repeat the measurement.
In total, the generated code will be run (1 + number x repeat) times,
where the first one is warm up and will be discarded.
The returned result contains `repeat` costs,
each of which is an average of `number` costs.
min_repeat_ms: int, optional
The minimum duration of one `repeat` in milliseconds.
By default, one `repeat` contains `number` runs. If this parameter is set,
the parameters `number` will be dynamically adjusted to meet the
minimum duration requirement of one `repeat`.
i.e., When the run time of one `repeat` falls below this time, the `number` parameter
will be automatically increased.
cooldown_interval: float
The cool down interval between two measurements
remote_args: Tuple
The argument for request_remote
ref_input: List of np.ndarray
The reference input used for checking correctness
ref_output: List of np.ndarray
The reference output used for checking correctness
"""
if isinstance(build_result, MeasureResult):
return build_result
tic = time.time()
errno = MeasureErrorNo.NO_ERROR
try:
# upload built module
remote = request_remote(*remote_args)
# Program the FPGA every single time when targeting VTA
if hasattr(measure_input.target, 'device_name') and \
measure_input.target.device_name == 'vta':
# pylint: disable=import-outside-toplevel
from vta import program_fpga, reconfig_runtime
program_fpga(remote, None)
reconfig_runtime(remote)
remote.upload(build_result.filename)
func = remote.load_module(os.path.split(build_result.filename)[1])
ctx = remote.context(str(measure_input.target), 0)
time_f = func.time_evaluator(
func.entry_name, ctx, number=number, repeat=repeat, min_repeat_ms=min_repeat_ms)
# set input
if ref_input:
args = [nd.array(x, ctx=ctx) for x in ref_input]
else:
# create empty arrays on the remote device and copy them once.
# This can avoid some memory issues that make the measurement results unreliable.
args = [nd.empty(x[0], dtype=x[1], ctx=ctx) for x in build_result.arg_info]
args = [nd.array(x, ctx=ctx) for x in args]
ctx.sync()
costs = time_f(*args).results
# clean up remote files
remote.remove(build_result.filename)
remote.remove(os.path.splitext(build_result.filename)[0] + '.so')
remote.remove('')
if len(costs) > 2: # remove largest and smallest value to reduce variance
costs = list(costs)
costs.sort()
costs = tuple(costs[1:-1])
# check correctness of output
if ref_output:
for expected, real in zip(ref_output, args):
if not np.allclose(expected, real.asnumpy(), rtol=1e-4):
logger.warning("Wrong Answer!")
errno = MeasureErrorNo.WRONG_ANSWER
except TVMError as exc:
msg = str(exc)
if "Stack trace returned" in msg:
msg = msg[:msg.index("Stack trace returned")]
if "CUDA Source" in msg:
msg = msg[:msg.index("CUDA Source")]
costs = (RuntimeError(msg[:1024]),)
errno = MeasureErrorNo.RUNTIME_DEVICE
tstamp = time.time()
time.sleep(cooldown_interval)
return MeasureResult(costs, errno, tstamp - tic + build_result.time_cost, tstamp)
def request_remote(device_key, host=None, port=None, priority=1, timeout=60):
"""Request a remote session
Parameters
----------
device_key: string
The device key of registered device in tracker
host: host, optional
The host address of rpc tracker.
If is none, will use environment variable "TVM_TRACKER_HOST"
port: int, optional
The port of rpc tracker.
If is none, will use environment variable "TVM_TRACKER_PORT"
priority: int, optional
The priority of this request, larger is more prior
timeout: float, optional
The timeout of this session (units: second)
Returns
------
session: RPCSession
"""
# connect to the tracker
host = host or os.environ['TVM_TRACKER_HOST']
port = port or int(os.environ['TVM_TRACKER_PORT'])
tracker = _rpc.connect_tracker(host, port)
remote = tracker.request(device_key, priority=priority,
session_timeout=timeout)
return remote
def check_remote(target, device_key, host=None, port=None, priority=100, timeout=10):
"""
Check the availability of a remote device
Parameters
----------
target: Target
The wanted compilation target
device_key: string
device key of registered device in tracker
host: host, optional
The host address of rpc tracker.
If is none, will use environment variable "TVM_TRACKER_HOST"
port: int, optional
The port address of rpc tracker.
If is none, will use environment variable "TVM_TRACKER_PORT"
priority: int, optional
The priority of this request, larger is more prior
timeout: float, optional
The timeout of this check (units: seconds).
Returns
-------
available: bool
True if can find available device
"""
def _check():
remote = request_remote(device_key, host, port, priority)
ctx = remote.context(str(target))
while not ctx.exist: # wait until we get an available device
pass
t = threading.Thread(target=_check,)
t.start()
t.join(timeout)
return not t.is_alive()
@tvm._ffi.register_func
def tvm_callback_cuda_compile(code):
"""use nvcc to generate ptx code for better optimization"""
curr_cuda_target_arch = AutotvmGlobalScope.current.cuda_target_arch
# e.g., target arch could be [
# "-gencode", "arch=compute_52,code=sm_52",
# "-gencode", "arch=compute_70,code=sm_70"
# ]
target = "fatbin" if isinstance(curr_cuda_target_arch, list) else "ptx"
ptx = nvcc.compile_cuda(code, target=target, arch=AutotvmGlobalScope.current.cuda_target_arch)
return ptx
def set_cuda_target_arch(arch):
"""set target architecture of nvcc compiler
Parameters
----------
arch: str or list
The argument of nvcc -arch. (e.g. "sm_51", "sm_62")
it can also be a count of gencode arguments pass to nvcc command line,
e.g., ["-gencode", "arch=compute_52,code=sm_52", "-gencode", "arch=compute_70,code=sm_70"]
"""
AutotvmGlobalScope.current.cuda_target_arch = arch
def gpu_verify_pass(**kwargs):
"""Verify the validity of a gpu kernel.
This pass will check memory usage and number of threads per block.
"""
def verify_pass(f, *_):
valid = tvm.tir.analysis.verify_gpu_code(f, kwargs)
if not valid:
raise InstantiationError("Skipped because of invalid gpu kernel")
return f
return tvm.tir.transform.prim_func_pass(verify_pass, opt_level=0)
|
AsynchExec_v2.py
|
"""
Framework for setting up an experiment.
"""
import numpy as np
import gym
import gym_minigrid,gym_cap
import tensorflow as tf
import argparse
from urllib.parse import unquote
import os
from networks.network import Network
from utils.utils import InitializeVariables, CreatePath, interval_flag, GetFunction
from utils.record import Record,SaveHyperparams
import json
from utils.utils import MovingAverage
import threading
import collections.abc
from environments.Common import CreateEnvironment
#Input arguments to override the default Config Files
parser = argparse.ArgumentParser()
parser.add_argument("-f", "--file", required=True,
help="File for specific run. Located in ./configs/run")
parser.add_argument("-c", "--config", required=False,
help="JSON configuration string to override runtime configs of the script.")
parser.add_argument("-n", "--network", required=False,
help="JSON configuration string to override network parameters")
parser.add_argument("-p", "--processor", required=False, default="/gpu:0",
help="Processor identifier string. Ex. /cpu:0 /gpu:0")
parser.add_argument("-r", "--render", default=False, action="store_true",
help="Processor identifier string. Ex. /cpu:0 /gpu:0")
args = parser.parse_args()
if args.config is not None: configOverride = json.loads(unquote(args.config))
else: configOverride = {}
if args.network is not None: netConfigOverride = json.loads(unquote(args.network))
else: netConfigOverride = {}
def Update(defaultSettings,overrides):
for label,override in overrides.items():
if isinstance(override, collections.abc.Mapping):
Update(defaultSettings[label],override)
else:
defaultSettings[label] = override
return defaultSettings
#Defining parameters and Hyperparameters for the run.
for (dirpath, dirnames, filenames) in os.walk("configs/run"):
for filename in filenames:
if args.file in filename:
runConfigFile = os.path.join(dirpath,filename)
break
with open(runConfigFile) as json_file:
settings = json.load(json_file)
settings.update(configOverride)
#Creating the Networks and Methods of the Run.
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=settings["GPUCapacitty"], allow_growth=True)
config = tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False, allow_soft_placement=True)
sess = tf.Session(config=config)
with tf.device(args.processor):
Method = GetFunction(settings["Method"])
workers = Method(sess=sess,settings=settings,netConfigOverride=netConfigOverride)
InitializeVariables(sess) #Included to catch if there are any uninitalized variables.
#Saving config files in the model directory
EXP_NAME = settings["RunName"]
LOG_PATH = './logs/'+EXP_NAME
CreatePath(LOG_PATH)
with open(LOG_PATH+'/runSettings.json', 'w') as outfile:
json.dump(settings, outfile)
with open(LOG_PATH+'/netConfigOverride.json', 'w') as outfile:
json.dump(netConfigOverride, outfile)
COORD = tf.train.Coordinator()
worker_threads = []
for i,worker in enumerate(workers):
if i==0:
job = lambda: worker.work(COORD,render=args.render)
else:
job = lambda: worker.work(COORD)
t = threading.Thread(target=job)
t.start()
worker_threads.append(t)
COORD.join(worker_threads)
|
algo_five.py
|
from functools import reduce
import numpy as np
import random as r
import socket
import struct
import subprocess as sp
import threading
from threading import Thread
import ast
import time
import datetime as dt
import os
import psutil
from netifaces import interfaces, ifaddresses, AF_INET
import paho.mqtt.client as mqtt
import smtplib
import config
import paramiko
import argparse
import pickle
hosts = {} # {hostname: ip}
_tasks = {'t1': {'wcet': 3, 'period': 20, 'deadline': 15},
't2': {'wcet': 1, 'period': 5, 'deadline': 4},
't3': {'wcet': 2, 'period': 10, 'deadline': 8},
't4': {'wcet': 1, 'period': 10, 'deadline': 9},
't5': {'wcet': 3, 'period': 15, 'deadline': 12}
}
# mat = {'p0': ['cpu', 'mem', 'storage']}
_need = {
't1': [7, 4, 3],
't2': [1, 2, 2],
't3': [6, 0, 0],
't4': [0, 1, 1],
't5': [4, 3, 1]
}
allocation = {
't1': [0, 1, 0],
't2': [2, 0, 0],
't3': [3, 0, 2],
't4': [2, 1, 1],
't5': [0, 0, 2]
}
_cpu = [] # cpu plot list
prev_t = 0 # variable for cpu util
_off_mec = 0 # used to keep a count of tasks offloaded from local mec to another mec
_off_cloud = 0 # used to keep a count of tasks offloaded to cloud
_loc = 0 # used to keep a count of tasks executed locally
_inward_mec = 0 # used to keep a count of tasks offloaded from another mec to local mec
deadlock = [1] # keeps count of how many deadlock is resolved
memory = []
mec_waiting_time = {} # {ip : [moving (waiting time + rtt)]}
mec_rtt = {} # {ip: [RTT]}
offload_register = {} # {task: host_ip} to keep track of tasks sent to mec for offload
reoffload_list = [[], {}] # [[task_list],{wait_time}] => records that’s re-offloaded to mec to execute.
discovering = 0 # if discovering == 0 update host
test = []
_time = []
_pos = 0
received_task_queue = [] # [[(task_list,wait_time), host_ip], ....]
thread_record = []
_port_ = 64000
cloud_register = {} # ={client_id:client_ip} keeps address of task offloaded to cloud
cloud_port = 63000
received_time = []
task_record = {} # keeps record of task reoffloaded
task_id = 0 # id for each task reoffloaded
shared_resource_lock = threading.Lock()
t_track = 1
def ping(host):
cmd = [f'ping -c 1 {host}']
output = str(sp.check_output(cmd, shell=True), 'utf-8').split('\n')
try:
value = float(output[-2].split('=')[-1].split('/')[0])
except ValueError:
value = None
return value
def discovering_group():
global sock1
multicast_group = '224.3.29.71'
server_address = ('', 10000)
# Create the socket
sock1 = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# Bind to the server address
sock1.bind(server_address)
# Tell the operating system to add the socket to the multicast group
# on all interfaces.
group = socket.inet_aton(multicast_group)
mreq = struct.pack('4sL', group, socket.INADDR_ANY)
sock1.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)
def offloading_group():
global sock2
multicast_group = '224.5.5.55'
server_address = ('', 20000)
# Create the socket
sock2 = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# Bind to the server address
sock2.bind(server_address)
# Tell the operating system to add the socket to the multicast group
# on all interfaces.
group = socket.inet_aton(multicast_group)
mreq = struct.pack('4sL', group, socket.INADDR_ANY)
sock2.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)
def ip_address():
try:
cmd = ['ifconfig eth1 | grep inet | cut -d ":" -f 2 | cut -d " " -f 1']
address = str(sp.check_output(cmd, shell=True), 'utf-8')[0:-1]
if len(address.strip().split('.')) == 4:
return address.strip()
else:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
return s.getsockname()[0]
except Exception as e:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
return s.getsockname()[0]
def _memory():
global memory
memory.append(round(my_algo.memory_percent(), 4))
def m_cpu():
global prev_t
# get cpu
next_t = psutil.cpu_percent(percpu=False)
delta = abs(prev_t - next_t)
prev_t = next_t
_cpu.append(round(delta, 4))
def get_mec_rtts():
for i in mec_rtt:
mec_rtt[i].append(get_rtt(i))
def generate_results():
_memory()
m_cpu()
get_mec_rtts()
def host_ip_set():
global ip_set
ip_set = set()
for ifaceName in interfaces():
addresses = [i['addr'] for i in ifaddresses(ifaceName).setdefault(AF_INET, [{'addr': 'No IP addr'}])]
ip_set.add(', '.join(addresses))
def get_time():
_time_ = []
d = str(dt.datetime.utcnow()).split()
_time_ += d[0].split('-')
g = d[1].split('.')
_time_ += g[0].split(':')
try:
_time_.append(g[1])
except IndexError:
print(f'indexError on Time: {g}')
_time_.append('0')
return _time_
def get_rtt(host):
rtt = ping(host)
if rtt:
return round(rtt, 4)
else:
return get_rtt(host)
def gcd(a, b):
if b == 0:
return a
return gcd(b, a % b)
def _lcm(a, b):
return int(a * b / gcd(a, b))
def lcm(_list):
return reduce(_lcm, _list)
def gosh_dist(_range):
return ((23 ** r.randrange(1, 1331)) % r.randrange(1, 1777)) % _range
def on_connect(connect_client, userdata, flags, rc):
# print("Connected with Code :" +str(rc))
# Subscribe Topic from here
connect_client.subscribe(node_id)
# Callback Function on Receiving the Subscribed Topic/Message
def on_message(message_client, userdata, msg):
global run
data = str(msg.payload, 'utf-8')
if data[0] == 'c': # receive from cloud
received_task = data[2:]
# send_client({received_task: get_time()}, cloud_register[received_task.split('.')[2]])
if received_task in task_record:
del task_record[received_task]
received_task = '.'.join(received_task.split('.')[:-1])
_client.publish(topic=received_task.split('.')[2], payload=str({received_task: get_time() + ['cloud']}), )
cooperate['cloud'] += 1
count_task_sent(received_task)
elif data[0] == 't': # receive from client
received_task = ast.literal_eval(data[2:])
received_task_queue.append(received_task)
received_time.append(time.time())
elif data.strip() == 'stop': # stop {hostname: ip}
print('sending stop alert')
run = 0
def connect_to_broker(stop):
global _client
username = 'mec'
password = 'password'
broker_port_no = 1883
_client = mqtt.Client()
_client.on_connect = on_connect
_client.on_message = on_message
_client.username_pw_set(username, password)
_client.connect(broker_ip, broker_port_no, 60)
_client.loop_start()
while True:
if stop():
_client.loop_stop()
_client.disconnect()
print('broker loop terminated')
break
def task_time_map(seq, process):
exe_seq = []
capacity_sum = 0
for job in process:
capacity_sum += process[job]['wcet']
while capacity_sum > 0:
for job in seq:
if process[job]['wcet'] > 0:
exe_seq.append(job)
process[job]['wcet'] -= 1
capacity_sum -= 1
return exe_seq
total_received_task = 0
def edf():
global total_received_task
t_lcm = lcm([tasks[i]['period'] for i in tasks])
t_dead = {i: tasks[i]['deadline'] for i in tasks}
sorted_dead = sorted(t_dead.items(), key=lambda kv: (kv[1], kv[0]))
# print(sorted_dead)
ready_task = []
for i in sorted_dead:
period = tasks[i[0]]['period']
# print('lcm: ', t_lcm, ' period: ', period)
t_range = int(t_lcm / period)
last_dead = 0
for j in range(t_range):
ready_task.append((i[0], last_dead + tasks[i[0]]['deadline']))
last_dead += period
ready_task = sorted(ready_task, key=lambda t: t[1])
print(ready_task)
t_time_ = 0
schedule = []
missed = []
register = {i: 0 for i in tasks.keys()} # {ti : amount executed}
for i in ready_task:
if (t_time_ // tasks[i[0]]['period']) + 1 <= register[i[0]]:
while (t_time_ // tasks[i[0]]['period']) + 1 <= register[i[0]]:
t_time_ += 1
# schedule.append(('idle', t_time))
if (t_time_ // tasks[i[0]]['period']) + 1 > register[i[0]]:
if t_time_ + tasks[i[0]]['wcet'] <= i[1]:
register[i[0]] += 1
t_time_ += tasks[i[0]]['wcet']
schedule.append(i[0])
else:
print('Deadline missed: ', i)
missed.append(i[0])
# print('s : ', schedule)
# print('r: ', register)
if len(missed) > 0:
# print('missed deadline: ', missed)
cooperative_mec(missed)
_edf_ = task_time_map(schedule, tasks)
total_received_task += len(_edf_)
return _edf_
# generate execution sequence using wound wait algorithm
def wound_wait(processes, avail, n_need, allocat):
global deadlock
offload = []
# To store execution sequence
exec_seq = []
# Make a copy of available resources
work = [0] * len(processes)
# While all processes are not finished
# or system is not in safe state.
while 0 in work:
ind = work.index(0)
i = processes[ind]
# print('comparing| process: ', i, n_need[i], 'work: ', avail)
if not (False in list(np.greater_equal(avail, n_need[i]))):
exec_seq.append(i)
avail = np.add(avail, allocat[i])
work[ind] = 1
else:
a = list(set(processes) - set(exec_seq) - set(offload))
n = {}
for j in a:
n[j] = sum(allocat[j])
_max = max(n, key=n.get)
# print('work: ', work, 'need: ', _need[_max])
if not (False in list(np.greater_equal(np.array(avail) + np.array(allocat[_max]), n_need[i]))):
offload.append(_max)
avail = np.array(avail) + np.array(allocat[_max])
work[processes.index(_max)] = 1
else:
offload.append(i)
avail = np.array(avail) + np.array(allocat[i])
work[processes.index(i)] = 1
if len(offload) > 0:
print('offloading tasks: ', offload)
cooperative_mec(offload)
deadlock[0] += 1
print('Execution seq: ', exec_seq)
return exec_seq
def get_exec_seq(pro):
# Number of processes
# p = len(pro)
processes = ['{}_{}'.format(pro[i], i) for i in range(len(pro))]
# Available instances of resources
avail = [6, 5, 5]
n_need = {i: _need[i[:2]] for i in processes}
# print('need', n_need)
# Resources allocated to processes
allot = {i: allocation[i[:2]] for i in processes}
# return execution sequence
return wound_wait(processes, avail, n_need, allot)
def calc_wait_time(list_seq):
pre = 0
time_dic = {}
for i in list_seq:
j = i.split('_')[0]
time_dic[i] = round(t_time[j][0] + pre, 3)
pre += t_time[j][0]
# waiting time = total waiting time ÷ 2 average waiting time might be too tight
w_send = round(time_dic[list(time_dic.keys())[-1]] / 2, 3)
send_message('wt {} {}'.format(ip_address(), str(w_send))) # Broadcasting waiting time to cooperative MECs
return time_dic
def compare_local_mec(list_seq):
time_compare_dict = {i: t_time[i.split('_')[0]][1] > list_seq[i] for i in list_seq}
print('local vs MEC comparison: ', time_compare_dict)
execute_mec = []
execute_locally = []
for i in time_compare_dict:
if time_compare_dict[i]:
execute_locally.append(i)
else:
execute_mec.append(i)
return execute_mec, execute_locally
def calculate_mov_avg(ma1, a1):
if ma1 in mec_waiting_time:
_count = len(mec_waiting_time[ma1])
avg1 = mec_waiting_time[ma1][-1]
else:
_count = 0
avg1 = 0
_count += 1
avg1 = ((_count - 1) * avg1 + a1) / _count
# ma1.append(avg1) #cumulative average formula
# μ_n=((n-1) μ_(n-1) + x_n)/n
return round(avg1, 4)
def send_message(mg):
_multicast_group = ('224.3.29.71', 10000)
try:
# Send data to the multicast group
if mg == 'hello':
smg = mg + ' ' + str([get_hostname(), ip_address()])
sock1.sendto(str.encode(smg), _multicast_group)
print('\nHello message sent')
else:
sock1.sendto(str.encode(mg), _multicast_group)
except Exception as e:
print(e)
def get_hostname():
cmd = ['cat /etc/hostname']
hostname = str(sp.check_output(cmd, shell=True), 'utf-8')[0:-1]
return hostname
def receive_message(stop): # used for multi-cast message exchange among MEC
global hosts
while True:
if stop():
print('Stopped: receive_message()')
break
else:
data, address = sock1.recvfrom(1024)
_d = data.decode()
if _d[:5] == 'hello':
_data = ast.literal_eval(_d[6:])
hosts[_data[0]] = _data[1]
if _data[1] != host_ip:
mec_rtt[_data[1]] = []
elif (_d[:6] == 'update') and (discovering == 0):
hosts = ast.literal_eval(_d[7:])
# print('received: ', hosts)
for i in hosts:
if i != host_ip:
mec_rtt[i] = []
elif _d[:2] == 'wt':
split_data = _d.split()
if split_data[1] != host_ip:
w_time = calculate_mov_avg(split_data[1], float(split_data[2]) + get_rtt(
address[0])) # calcuate moving average of mec wait time => w_time = wait time + rtt
if split_data[1] in mec_waiting_time:
mec_waiting_time[split_data[1]].append(w_time)
else:
mec_waiting_time[split_data[1]] = [w_time]
def mec_comparison():
# returns min average waiting for all mecs
if len(mec_waiting_time) == 0:
return 0
min_mec = {i: mec_waiting_time[i][-1] for i in mec_waiting_time}
min_wt = min(min_mec, key=min_mec.get)
return min_wt
def cooperative_mec(mec_list):
global _off_cloud
global _off_mec
global task_id, task_record
for i in mec_list:
_host = mec_comparison()
if _host == 0:
# send_cloud([i.split('_')[0], t_time[i.split('_')[0]][0]]) # [task_id,exec_time]
_send_task = f"{i.split('_')[0]}.{task_id}"
_client.publish(cloud_ip, str([_send_task, t_time[i.split('_')[0]][0]]), )
task_record[_send_task] = 'cloud'
task_id += 1
_off_cloud += 1
# cloud_register[i.split('_')[0].split('.')[2]] = send_back_host
print('\n=========SENDING {} TO CLOUD==========='.format(i))
else:
j = i.split('_')[0]
_max = np.array([6, 5, 5])
send = 'false'
if not (False in list(np.greater_equal(_max, _need[j[:2]]))):
send = 'true'
# CHECK IF THE MINIMUM MEC WAIT TIME IS LESS THAN LATENCY
if mec_waiting_time[_host][-1] < t_time[j][1] and send == 'true':
_send_task = f"{j}.{task_id}"
send_offloaded_task_mec('{} {} {}'.format('ex', mec_id(_host), [_send_task, t_time[j][0]]))
task_record[_send_task] = 'mec'
task_id += 1
_off_mec += 1
# SENDS TASK TO MEC FOR EXECUTION
w_send = mec_waiting_time[_host][-1] + 0.001
mec_waiting_time[_host].append(w_send) # adds a new average waiting time
print('\n======SENDING {} TO MEC {}========='.format(i, _host))
elif send == 'true' and (get_rtt(_host) < get_rtt(cloud_ip)):
_send_task = f"{j}.{task_id}"
send_offloaded_task_mec('{} {} {}'.format('ex', mec_id(_host), [_send_task, t_time[j][0]]))
task_record[_send_task] = 'mec'
task_id += 1
_off_mec += 1
# SENDS TASK TO MEC FOR EXECUTION
w_send = mec_waiting_time[_host][-1] + 0.001
mec_waiting_time[_host].append(w_send) # adds a new average waiting time
print('\n======SENDING {} TO MEC {}========='.format(i, _host))
else:
_send_task = f"{j}.{task_id}"
_client.publish(cloud_ip, str([_send_task, t_time[j][0]]), )
task_record[_send_task] = 'cloud'
task_id += 1
_off_cloud += 1
# send_cloud([j, t_time[j][0]]) # # [task_id,exec_time]
# cloud_register[j.split('.')[2]] = send_back_host
print('\n=========SENDING {} TO CLOUD==========='.format(i))
outward_mec = 0
offload_check = [0, 0]
def execute_re_offloaded_task(offloaded_task):
global outward_mec, offload_check
exec_list = get_exec_seq(offloaded_task[0])
outward_mec += len(exec_list)
for i in offloaded_task[0]: # i = 't1.1.2.3*1_3'
j = i.split('_')[0]
time.sleep(offloaded_task[1][j] / 2)
# print('j task: ', j)
send_offloaded_task_mec('{} {}'.format(j.split('.')[1], i.split('*')[0]))
clients_record = {}
def count_task_sent(task):
global clients_record
c_id = task.split('.')[2]
if c_id in clients_record:
clients_record[c_id] += 1
else:
clients_record[c_id] = 1
def execute(local):
print('\nExecuting :', local)
for i in local:
j = i.split('_')[0]
_t = t_time[j][0] / 2
time.sleep(_t)
print('#{}'.format(local.index(i) + 1), ' Executed: ', i)
_client.publish(j.split('.')[2], str({j: get_time() + ['local']}), )
count_task_sent(j)
print('============== EXECUTION DONE ===============')
cooperate = {'mec': 0, 'cloud': 0}
def receive_offloaded_task_mec(stop): # run as a thread
global _inward_mec
global t_track
while True:
if stop():
print('Stopped: receive_offloaded_task_mec()')
break
else:
data, address = sock2.recvfrom(1024)
if len(data.decode()) > 0:
da = data.decode().split(' ')
if (address[0] not in ip_set) and (da[0] == node_id): # send back to client
# send_client({da[1]: get_time()}, offload_register[da[1]]) # send back to client
if da[1] in task_record:
del task_record[da[1]]
task_new = '.'.join(da[1].split('.')[:-1])
_client.publish(da[1].split('.')[2], str({task_new: get_time() + ['mec']}), )
count_task_sent(da[1])
cooperate['mec'] += 1
else:
print('*' * 30 + f'\n{da[1]} Not in Task Record\n' + '*' * 30)
elif (address[0] not in ip_set) and (da[0] == 'ex') and (da[1] == node_id):
_received = ast.literal_eval(da[2] + da[3])
shared_resource_lock.acquire()
task = _received[0] + '*{}'.format(t_track)
reoffload_list[0].append(task)
reoffload_list[1][task] = _received[1]
shared_resource_lock.release()
t_track += 1
_inward_mec += 1
def call_execute_re_offload(stop):
global reoffload_list, outward_mec
global offload_check
while True:
if stop():
print('Stopped: call_execute_re_offload()')
break
else:
if len(reoffload_list[0]) == 1:
t = reoffload_list[0][-1]
time.sleep(reoffload_list[1][t] / 2)
shared_resource_lock.acquire()
reoffload_list[0].remove(t)
del reoffload_list[1][t]
shared_resource_lock.release()
send_offloaded_task_mec('{} {}'.format(t.split('.')[1], t.split('*')[0]))
outward_mec += 1
offload_check[0] += 1
elif len(reoffload_list[0]) > 1:
o = reoffload_list.copy()
offload_check[1] += len(o)
execute_re_offloaded_task(o)
for i in o[0]:
shared_resource_lock.acquire()
reoffload_list[0].remove(i)
del reoffload_list[1][i]
shared_resource_lock.release()
def send_email(msg, send_path):
try:
server = smtplib.SMTP_SSL('smtp.gmail.com')
server.ehlo()
server.login(config.email_address, config.password)
subject = 'Deadlock results edf+wound-wait {} {}'.format(get_hostname(), send_path)
# msg = 'Attendance done for {}'.format(_timer)
_message = 'Subject: {}\n\n{}\n\n SENT BY RIHANNA \n\n'.format(subject, msg)
server.sendmail(config.email_address, config.send_email, _message)
server.quit()
print("Email sent!")
except Exception as e:
print(e)
def send_offloaded_task_mec(msg):
_multicast_group = ('224.5.5.55', 20000)
try:
sock2.sendto(str.encode(msg), _multicast_group)
except Exception as e:
print(e)
def mec_id(client_ip):
_id = client_ip.split('.')[-1]
if len(_id) == 1:
return '00' + _id
elif len(_id) == 2:
return '0' + _id
else:
return _id
def send_result(host_, data):
try:
c = paramiko.SSHClient()
un = 'mec'
pw = 'password'
port = 22
c.set_missing_host_key_policy(paramiko.AutoAddPolicy())
c.connect(host_, port, un, pw)
for i in data:
cmd = ('echo "{}" >> /home/mec/result/data.py'.format(i)) # task share : host ip task
stdin, stdout, stderr = c.exec_command(cmd)
except Exception as e:
print(e)
def save_and_send(send_path):
_id_ = get_hostname()[-1]
result = f"\nwt{_id_}_12_{mec_no} = {mec_waiting_time} " \
f"\nrtt{_id_}_12_{mec_no} = {mec_rtt} \ncpu{_id_}_12_{mec_no} = {_cpu} " \
f"\noff_mec{_id_}_12_{mec_no} = {_off_mec} " \
f"\noff_cloud{_id_}_12_{mec_no} = {_off_cloud} " \
f"\ninward_mec{_id_}_12_{mec_no} = {_inward_mec}" \
f"\nloc{_id_}_12_{mec_no} = {_loc} " \
f"\ndeadlock{_id_}_12_{mec_no} = {deadlock} \nmemory{_id_}_12_{mec_no} = {memory}" \
f"\ntask_received = {total_received_task} \nsent_t = {clients_record}" \
f"\ncooperate{_id_}_12_{mec_no} = {cooperate} \ntask_record{_id_}_12_{mec_no} = {task_record}" \
f"\noutward_mec{_id_}_12_{mec_no} = {outward_mec}" \
f"\noffload_check{_id_}_12_{mec_no} = {offload_check}"
list_result = [
f"\nwt{_id_}_12_{mec_no} = {mec_waiting_time} ",
f"\nrtt{_id_}_12_{mec_no} = {mec_rtt} \ncpu{_id_}_12_{mec_no} = {_cpu} ",
f"\noff_mec{_id_}_12_{mec_no} = {_off_mec} \noff_cloud{_id_}_12_{mec_no} = {_off_cloud} ",
f"\ninward_mec{_id_}_12_{mec_no} = {_inward_mec}",
f"\nloc{_id_}_12_{mec_no} = {_loc} ",
f"\ndeadlock{_id_}_12_{mec_no} = {deadlock} \nmemory{_id_}_12_{mec_no} = {memory}",
f"\ntask_received{_id_}_12_{mec_no} = {total_received_task} \nsent_t{_id_}_12_{mec_no} = {clients_record}",
f"\ncooperate{_id_}_12_{mec_no} = {cooperate} \ntask_record{_id_}_12_{mec_no} = {task_record} "
f"\noutward_mec{_id_}_12_{mec_no} = {outward_mec}",
f"\noffload_check{_id_}_12_{mec_no} = {offload_check}",
]
path_ = 'data/raw/'
if os.path.exists(path_):
cmd = f"echo '' > {path_}{_id_}_12_{mec_no}datal.py"
os.system(cmd)
cmd = f"echo '' > {path_}{_id_}_12_{mec_no}datap.py"
os.system(cmd)
else:
os.system('mkdir -p data/raw')
cmd = f"echo '' > {path_}{_id_}_12_{mec_no}datal.py"
os.system(cmd)
cmd = f"echo '' > {path_}{_id_}_12_{mec_no}datap.py"
os.system(cmd)
file_ = open(f'{path_}{_id_}_12_{mec_no}datap.py', 'w')
for i in list_result:
cmd = f'echo "{i}" >> {path_}{_id_}_12_{mec_no}datal.py'
file_.write(i)
os.system(cmd)
file_.close()
sp.run(
["scp", f"{path_}{_id_}_12_{mec_no}datap.py", f"mec@{hosts['osboxes-0']}:{send_path}"])
send_result(hosts['osboxes-0'], list_result)
send_email(result, send_path)
if len(task_record) > 0:
for _task_ in task_record:
task_new = '.'.join(_task_.split('.')[:-1])
_client.publish(task_new.split('.')[2], str({task_new: get_time() + [task_record[_task_]]}), )
def start_loop():
global _loc
global tasks
global t_time
global node_id
print('\n============* WELCOME TO THE DEADLOCK EMULATION PROGRAM *=============\n')
node_id = mec_id(ip_address())
# print('node id: ', node_id)
func_to_thread = [receive_message, receive_offloaded_task_mec, call_execute_re_offload, connect_to_broker]
threads_ = []
stop = False
for i in func_to_thread:
threads_.append(Thread(target=i, args=(lambda: stop,)))
threads_[-1].daemon = True
threads_[-1].start()
input('start..')
print('========= Waiting for tasks ==========')
_time_ = dt.datetime.now()
while True:
try:
if len(received_task_queue) > 0:
info = received_task_queue.pop(0)
tasks, t_time = info
print('EDF List of Processes: ', tasks, '\n')
print('\n========= Running Deadlock Algorithm ===========')
list_seq = get_exec_seq(edf())
if len(list_seq) > 0: # do only when there is a task in safe sequence
wait_list = calc_wait_time(list_seq)
print('\nWaiting Time List: ', wait_list)
compare_result = compare_local_mec(wait_list)
print('\nExecute Locally: ', compare_result[1])
_loc += len(compare_result[1]) # total number of tasks to be executed locally
print('\nExecute in MEC: ', compare_result[0])
print('\nSending to cooperative platform')
if len(compare_result[0]) > 0:
cooperative_mec(compare_result[0])
execute(compare_result[1])
generate_results()
_time_ = dt.datetime.now()
else:
send_message(str('wt {} 0.0'.format(ip_address())))
time.sleep(.5)
now = dt.datetime.now()
delta = now - _time_
if delta > dt.timedelta(minutes=4):
print('terminating programme 3 mins elapsed')
stop = False
break
except KeyboardInterrupt:
print('\nProgramme Terminated')
stop = False
cmd = 'kill -9 {}'.format(os.getpid())
os.system(cmd)
break
print('algo stopped!')
class BrokerSend:
def __init__(self, user, pw, ip, sub_topic, data):
self.user = user
self.pw = pw
self.ip = ip
self.port = 1883
self.topic = sub_topic
self.response = None
self.client = mqtt.Client()
self.client.username_pw_set(self.user, self.pw)
self.client.connect(self.ip, self.port, 60)
self.data = data
def publish(self):
self.client.publish(self.topic, self.data)
def __del__(self):
print('BrokerSend Object Deleted!')
def run_me(mec_no_, send_path, broker_ip_): # call this from agent
global discovering
global mec_no
global host_ip
global my_algo
global broker_ip
print('mec ip: ', ip_address())
my_algo = psutil.Process()
discovering_group()
offloading_group()
host_ip_set()
mec_no = mec_no_
broker_ip = broker_ip_
host_ip = ip_address()
print('MEC Details: ', hosts)
discovering = 1
time.sleep(2)
for host in hosts:
if hosts[host] != host_ip:
mec_rtt[hosts[host]] = []
os.system(f'echo {mec_no}/{send_path} >> started.txt')
start_loop()
print('saving data')
save_and_send(send_path)
print('send alert to control')
time.sleep(r.uniform(1, 30))
data = pickle.dumps([get_hostname(), host_ip])
broker_dict = {'user': 'mec', 'pw': 'password', 'sub_topic': 'control', 'ip': '192.168.122.111', 'data': data}
BrokerSend(**broker_dict).publish()
print('Terminating process')
cmd = 'kill -9 {}'.format(os.getpid())
os.system(cmd)
def main():
global hosts
global cloud_ip
# (--n, --mec_no_, --cloud_ip, --s_path, --b_ip) send_path = f'/home/mec/result/{kind}/{count}'
mec_nodes = {'mec-9': '192.168.122.119', 'mec-8': '192.168.122.118', 'mec-7': '192.168.122.117',
'mec-6': '192.168.122.116', 'mec-5': '192.168.122.115', 'mec-4': '192.168.122.114',
'mec-3': '192.168.122.113', 'mec-2': '192.168.122.112', 'mec-1': '192.168.122.111',
}
gui = {'osboxes-0': '192.168.122.110'}
cloud_ips = ['192.168.200.11', '192.168.200.12']
b_ip = '192.168.122.111'
parser = argparse.ArgumentParser()
parser.add_argument('--n', type=int, default=1.0, help='Number of MEC nodes')
parser.add_argument('--p', type=str, default='/home/mec/result/python', help='Path to send result: homo_1')
args = parser.parse_args()
kind, count = args.p.split('_')
send_path = f'/home/mec/result/{kind}/{count}'
ho = sorted(list(mec_nodes))[:args.n - 1]
hosts = {**{host: mec_nodes[host] for host in ho if ho != get_hostname()}, **gui}
ho += ['osboxes-0']
cloud_ip = cloud_ips[ho.index(get_hostname()) % 2]
os.system('clear')
run_me(mec_no_=args.n, send_path=send_path, broker_ip_=b_ip)
if __name__ == '__main__':
main()
|
test_script.py
|
"""
Tests for scripts don't yet support verification against redis-server.
"""
from hashlib import sha1
from unittest.case import SkipTest
import sys
import threading
from nose.tools import assert_raises, eq_, ok_
from mockredis import MockRedis
from mockredis.exceptions import RedisError
from mockredis.script import Script as MockRedisScript
from mockredis.tests.test_constants import (
LIST1, LIST2,
SET1,
VAL1, VAL2, VAL3, VAL4,
LPOP_SCRIPT
)
from mockredis.tests.fixtures import raises_response_error
if sys.version_info >= (3, 0):
long = int
class TestScript(object):
"""
Tests for MockRedis scripting operations
"""
def setup(self):
self.redis = MockRedis(load_lua_dependencies=False)
self.LPOP_SCRIPT_SHA = sha1(LPOP_SCRIPT.encode("utf-8")).hexdigest()
try:
lua, lua_globals = MockRedisScript._import_lua(load_dependencies=False)
except RuntimeError:
raise SkipTest("mockredispy was not installed with lua support")
self.lua = lua
self.lua_globals = lua_globals
assert_equal_list = """
function compare_list(list1, list2)
if #list1 ~= #list2 then
return false
end
for i, item1 in ipairs(list1) do
if item1 ~= list2[i] then
return false
end
end
return true
end
function assert_equal_list(list1, list2)
assert(compare_list(list1, list2))
end
return assert_equal_list
"""
self.lua_assert_equal_list = self.lua.execute(assert_equal_list)
assert_equal_list_with_pairs = """
function pair_exists(list1, key, value)
i = 1
for i, item1 in ipairs(list1) do
if i%2 == 1 then
if (list1[i] == key) and (list1[i + 1] == value) then
return true
end
end
end
return false
end
function compare_list_with_pairs(list1, list2)
if #list1 ~= #list2 or #list1 % 2 == 1 then
return false
end
for i = 1, #list1, 2 do
if not pair_exists(list2, list1[i], list1[i + 1]) then
return false
end
end
return true
end
function assert_equal_list_with_pairs(list1, list2)
assert(compare_list_with_pairs(list1, list2))
end
return assert_equal_list_with_pairs
"""
self.lua_assert_equal_list_with_pairs = self.lua.execute(assert_equal_list_with_pairs)
compare_val = """
function compare_val(var1, var2)
return var1 == var2
end
return compare_val
"""
self.lua_compare_val = self.lua.execute(compare_val)
def test_register_script_lpush(self):
# lpush two values
script_content = "redis.call('LPUSH', KEYS[1], ARGV[1], ARGV[2])"
script = self.redis.register_script(script_content)
script(keys=[LIST1], args=[VAL1, VAL2])
# validate insertion
eq_([VAL2, VAL1], self.redis.lrange(LIST1, 0, -1))
def test_register_script_lpop(self):
self.redis.lpush(LIST1, VAL2, VAL1)
# lpop one value
script_content = "return redis.call('LPOP', KEYS[1])"
script = self.redis.register_script(script_content)
list_item = script(keys=[LIST1])
# validate lpop
eq_(VAL1, list_item)
eq_([VAL2], self.redis.lrange(LIST1, 0, -1))
def test_register_script_rpoplpush(self):
self.redis.lpush(LIST1, VAL2, VAL1)
self.redis.lpush(LIST2, VAL4, VAL3)
# rpoplpush
script_content = "redis.call('RPOPLPUSH', KEYS[1], KEYS[2])"
script = self.redis.register_script(script_content)
script(keys=[LIST1, LIST2])
# validate rpoplpush
eq_([VAL1], self.redis.lrange(LIST1, 0, -1))
eq_([VAL2, VAL3, VAL4], self.redis.lrange(LIST2, 0, -1))
def test_register_script_rpop_lpush(self):
self.redis.lpush(LIST1, VAL2, VAL1)
self.redis.lpush(LIST2, VAL4, VAL3)
# rpop from LIST1 and lpush the same value to LIST2
script_content = """
local tmp_item = redis.call('RPOP', KEYS[1])
redis.call('LPUSH', KEYS[2], tmp_item)
"""
script = self.redis.register_script(script_content)
script(keys=[LIST1, LIST2])
# validate rpop and then lpush
eq_([VAL1], self.redis.lrange(LIST1, 0, -1))
eq_([VAL2, VAL3, VAL4], self.redis.lrange(LIST2, 0, -1))
def test_register_script_client(self):
# lpush two values in LIST1 in first instance of redis
self.redis.lpush(LIST1, VAL2, VAL1)
# create script on first instance of redis
script_content = LPOP_SCRIPT
script = self.redis.register_script(script_content)
# lpush two values in LIST1 in redis2 (second instance of redis)
redis2 = MockRedis()
redis2.lpush(LIST1, VAL4, VAL3)
# execute LPOP script on redis2 instance
list_item = script(keys=[LIST1], client=redis2)
# validate lpop from LIST1 in redis2
eq_(VAL3, list_item)
eq_([VAL4], redis2.lrange(LIST1, 0, -1))
eq_([VAL1, VAL2], self.redis.lrange(LIST1, 0, -1))
def test_eval_lpush(self):
# lpush two values
script_content = "redis.call('LPUSH', KEYS[1], ARGV[1], ARGV[2])"
self.redis.eval(script_content, 1, LIST1, VAL1, VAL2)
# validate insertion
eq_([VAL2, VAL1], self.redis.lrange(LIST1, 0, -1))
def test_eval_lpop(self):
self.redis.lpush(LIST1, VAL2, VAL1)
# lpop one value
script_content = "return redis.call('LPOP', KEYS[1])"
list_item = self.redis.eval(script_content, 1, LIST1)
# validate lpop
eq_(VAL1, list_item)
eq_([VAL2], self.redis.lrange(LIST1, 0, -1))
def test_eval_lrem(self):
self.redis.delete(LIST1)
self.redis.lpush(LIST1, VAL1)
# lrem one value
script_content = "return redis.call('LREM', KEYS[1], 0, ARGV[1])"
value = self.redis.eval(script_content, 1, LIST1, VAL1)
eq_(value, 1)
def test_eval_zadd(self):
# The score and member are reversed when the client is not strict.
self.redis.strict = False
script_content = "return redis.call('zadd', KEYS[1], ARGV[1], ARGV[2])"
self.redis.eval(script_content, 1, SET1, 42, VAL1)
eq_(42, self.redis.zscore(SET1, VAL1))
def test_eval_zrangebyscore(self):
# Make sure the limit is removed.
script = "return redis.call('zrangebyscore',KEYS[1],ARGV[1],ARGV[2])"
self.eval_zrangebyscore(script)
def test_eval_zrangebyscore_with_limit(self):
# Make sure the limit is removed.
script = ("return redis.call('zrangebyscore', "
"KEYS[1], ARGV[1], ARGV[2], 'LIMIT', 0, 2)")
self.eval_zrangebyscore(script)
def eval_zrangebyscore(self, script):
self.redis.strict = False
self.redis.zadd(SET1, VAL1, 1)
self.redis.zadd(SET1, VAL2, 2)
eq_([], self.redis.eval(script, 1, SET1, 0, 0))
eq_([VAL1], self.redis.eval(script, 1, SET1, 0, 1))
eq_([VAL1, VAL2], self.redis.eval(script, 1, SET1, 0, 2))
eq_([VAL2], self.redis.eval(script, 1, SET1, 2, 2))
def test_table_type(self):
self.redis.lpush(LIST1, VAL2, VAL1)
script_content = """
local items = redis.call('LRANGE', KEYS[1], ARGV[1], ARGV[2])
return type(items)
"""
script = self.redis.register_script(script_content)
itemType = script(keys=[LIST1], args=[0, -1])
eq_('table', itemType)
def test_script_hgetall(self):
myhash = {"k1": "v1"}
self.redis.hmset("myhash", myhash)
script_content = """
return redis.call('HGETALL', KEYS[1])
"""
script = self.redis.register_script(script_content)
item = script(keys=["myhash"])
ok_(isinstance(item, list))
eq_(["k1", "v1"], item)
def test_evalsha(self):
self.redis.lpush(LIST1, VAL1)
script = LPOP_SCRIPT
sha = self.LPOP_SCRIPT_SHA
# validator error when script not registered
with assert_raises(RedisError) as redis_error:
self.redis.evalsha(self.LPOP_SCRIPT_SHA, 1, LIST1)
eq_("Sha not registered", str(redis_error.exception))
with assert_raises(RedisError):
self.redis.evalsha(self.LPOP_SCRIPT_SHA, 1, LIST1)
# load script and then evalsha
eq_(sha, self.redis.script_load(script))
eq_(VAL1, self.redis.evalsha(sha, 1, LIST1))
eq_(0, self.redis.llen(LIST1))
def test_script_exists(self):
script = LPOP_SCRIPT
sha = self.LPOP_SCRIPT_SHA
eq_([False], self.redis.script_exists(sha))
self.redis.register_script(script)
eq_([True], self.redis.script_exists(sha))
def test_script_flush(self):
script = LPOP_SCRIPT
sha = self.LPOP_SCRIPT_SHA
self.redis.register_script(script)
eq_([True], self.redis.script_exists(sha))
self.redis.script_flush()
eq_([False], self.redis.script_exists(sha))
def test_script_load(self):
script = LPOP_SCRIPT
sha = self.LPOP_SCRIPT_SHA
eq_([False], self.redis.script_exists(sha))
eq_(sha, self.redis.script_load(script))
eq_([True], self.redis.script_exists(sha))
def test_lua_to_python_none(self):
lval = self.lua.eval("")
pval = MockRedisScript._lua_to_python(lval)
ok_(pval is None)
def test_lua_to_python_list(self):
lval = self.lua.eval('{"val1", "val2"}')
pval = MockRedisScript._lua_to_python(lval)
ok_(isinstance(pval, list))
eq_(["val1", "val2"], pval)
def test_lua_to_python_long(self):
lval = self.lua.eval('22')
pval = MockRedisScript._lua_to_python(lval)
ok_(isinstance(pval, long))
eq_(22, pval)
def test_lua_to_python_flota(self):
lval = self.lua.eval('22.2')
pval = MockRedisScript._lua_to_python(lval)
ok_(isinstance(pval, float))
eq_(22.2, pval)
def test_lua_to_python_string(self):
lval = self.lua.eval('"somestring"')
pval = MockRedisScript._lua_to_python(lval)
ok_(isinstance(pval, str))
eq_("somestring", pval)
def test_lua_to_python_bool(self):
lval = self.lua.eval('true')
pval = MockRedisScript._lua_to_python(lval)
ok_(isinstance(pval, bool))
eq_(True, pval)
def test_python_to_lua_none(self):
pval = None
lval = MockRedisScript._python_to_lua(pval)
is_null = """
function is_null(var1)
return var1 == nil
end
return is_null
"""
lua_is_null = self.lua.execute(is_null)
ok_(MockRedisScript._lua_to_python(lua_is_null(lval)))
def test_python_to_lua_string(self):
pval = "somestring"
lval = MockRedisScript._python_to_lua(pval)
lval_expected = self.lua.eval('"somestring"')
eq_("string", self.lua_globals.type(lval))
eq_(lval_expected, lval)
def test_python_to_lua_list(self):
pval = ["abc", "xyz"]
lval = MockRedisScript._python_to_lua(pval)
lval_expected = self.lua.eval('{"abc", "xyz"}')
self.lua_assert_equal_list(lval_expected, lval)
def test_python_to_lua_dict(self):
pval = {"k1": "v1", "k2": "v2"}
lval = MockRedisScript._python_to_lua(pval)
lval_expected = self.lua.eval('{"k1", "v1", "k2", "v2"}')
self.lua_assert_equal_list_with_pairs(lval_expected, lval)
def test_python_to_lua_long(self):
pval = long(10)
lval = MockRedisScript._python_to_lua(pval)
lval_expected = self.lua.eval('10')
eq_("number", self.lua_globals.type(lval))
ok_(MockRedisScript._lua_to_python(self.lua_compare_val(lval_expected, lval)))
def test_python_to_lua_float(self):
pval = 10.1
lval = MockRedisScript._python_to_lua(pval)
lval_expected = self.lua.eval('10.1')
eq_("number", self.lua_globals.type(lval))
ok_(MockRedisScript._lua_to_python(self.lua_compare_val(lval_expected, lval)))
def test_python_to_lua_boolean(self):
pval = True
lval = MockRedisScript._python_to_lua(pval)
eq_("boolean", self.lua_globals.type(lval))
ok_(MockRedisScript._lua_to_python(lval))
def test_lua_ok_return(self):
script_content = "return {ok='OK'}"
script = self.redis.register_script(script_content)
eq_('OK', script())
@raises_response_error
def test_lua_err_return(self):
script_content = "return {err='ERROR Some message'}"
script = self.redis.register_script(script_content)
script()
def test_concurrent_lua(self):
script_content = """
local entry = redis.call('HGETALL', ARGV[1])
redis.call('HSET', ARGV[1], 'kk', 'vv')
return entry
"""
script = self.redis.register_script(script_content)
for i in range(500):
self.redis.hmset(i, {'k1': 'v1', 'k2': 'v2', 'k3': 'v3'})
def lua_thread():
for i in range(500):
script(args=[i])
active_threads = []
for i in range(10):
thread = threading.Thread(target=lua_thread)
active_threads.append(thread)
thread.start()
for thread in active_threads:
thread.join()
|
server.py
|
import os
import sys
import time
import random
import socket
import threading
import datetime
from multiprocessing.pool import ThreadPool
sys.path.append('..')
from helpers import message
from helpers.filelock import FileLock
from player import Player
# Mostly just a struct to hold match information
class Match:
def __init__(self, players):
self.names = [player.name for player in players]
self.winner = -1
self.results = ''
class Server:
''' Server class handles all communication between this program and client scripts ''' # <- Docstring
'''
@description Constructor, initializes data members and ensures correct usage.
@param game function(list<string>,list<connection>,string,dict,threading.Event) what game to run
@param settings dict<string, string> what settings to use for the server
'''
def __init__(self, game, settings):
self.players = []
# self.match_lock = threading.Mutex() # Make sure your threads don't lock much
self.matches = dict()
self.threads = []
self.alive = threading.Event() # Set in run_match
self.game = game
#self.scoring = game.scoring # Get scoring system from game
# Load settings from config file
self.ppm = int(settings["ppm"])
self.port = int(settings["port"])
self.address = settings["address"][:-1] #strip newline character
self.prune = int(settings["prune"])
self.sleep = int(settings["sleep"])
self.win_by = int(settings["win_by"])
self.timeout = float(settings["timeout"])
self.min_games = int(settings["min_games"])
self.max_games = int(settings["max_games"])
self.listen_queue = int(settings["listen_queue"])
self.verbose = int(settings["verbose"])
self.log_folder = os.path.realpath(settings["log_folder"])
self.err_folder = os.path.realpath(settings["error_folder"])
self.res_folder = os.path.realpath(settings["results_folder"])
if not os.path.exists(self.log_folder):
os.makedirs(self.log_folder)
if not os.path.exists(self.err_folder):
os.makedirs(self.err_folder)
if not os.path.exists(self.res_folder):
os.makedirs(self.res_folder)
self.now = lambda: str(datetime.datetime.now())
# Error log
fname = self.now().replace(':', '-') + ".log"
self.log_file = os.path.join(self.log_folder, fname)
self.err_file = os.path.join(self.err_folder, fname)
self.res_file = os.path.join(self.res_folder, fname)
with open(self.log_file, 'w') as f:
f.write("Activity log for RAIL GGS:\n---------------------\n")
with open(self.err_file, 'w') as f:
f.write("Error log for RAIL GGS:\n---------------------\n")
with open(self.res_file, 'w') as f:
f.write("Results log for RAIL GGS:\n---------------------\n")
# TODO: Implement locks on files since everything is asynchronous
'''
@description Prints a message to a log, in this case just the terminal.
@param msg string what message to print
@return string the passed message
'''
def report(self, msg):
# This function is too complicated to properly comment
sys.stdout.flush()
now = str(datetime.datetime.now())
# Grab the lock
with FileLock(self.log_file), open(self.log_file, 'a') as f:
f.write(self.now() + ':\t')
f.write(msg + '\n')
return msg
'''
@description Prints a message to a log.
@param msg string what message to print
@return string the passed message
'''
def log(self, msg, log_level=0):
if log_level <= self.verbose:
print msg
return self.report(msg)
else:
return msg # This is pythonic
'''
@description Prints a error to a log.
@param e Exception the exception being logged
@return Exception the exception passed as an arg
'''
def log_error(self, e):
# Grab the lock
with FileLock(self.err_file), open(self.err_file, 'a') as f:
f.write(self.now() + ':\t')
f.write(str(e) + ('\n'))
#self.report("An exception has been raised: %s" % (e,))
return e
'''
@description Prints results of a match to a log.
@param results string what the results were
@return string the passed results
'''
def log_result(self, results):
# Grab the lock
with FileLock(self.res_file), open(self.res_file, 'a') as f:
f.write(self.now() + ':\t')
f.write(results + '\n')
return self.log(results, 2)
#return results
'''
@description Attempts to send a message to one of the players. Logs failures.
@param receiver Player who is recieving the payload
@param type string what kind of information is being sent
@param payload string data to be sent
@return int number of bytes sent or -1 on failure
'''
def send(self, receiver, msg_type, msg_body):
'''
Types:
ID - player's token
OM - opponent's move
GS - new gamestate after player moves
RS - result of a match (not a game)
TN - end of connection
RQ - request for a move
RN - request for a name
AK - check if you're still alive
'''
# Check if the server is still active
if not self.alive.isSet():
return -1
# Construct and send message
try:
size = receiver.con.send(msg_type, msg_body)
#Log
self.log("Sent %i bytes of data to %s" % (size, receiver.name), 10)
return size
except Exception as e:
# Log
size = len(msg_type) + len(msg_body) + 2
self.log_error(e)
self.log("Failed to send %i bytes of data to %s" % (size, receiver.name), 9)
return -1 # An error occured, return -1
'''
@description Multithreaded poll for multiple recipients
@param recipient_infos list<tuple(Player, string, string)> information about who is receiving the poll and what kind of poll it is
@return dict of players to poll results
'''
def poll_all(self, recipient_infos):
# Recipient_info entries are of form: (player, type, body)
results = dict()
threads = dict()
# For each recipient, make an asynchronous process to handle their response
num_reqs = len(recipient_infos)
pool = ThreadPool(processes=num_reqs)
for info in recipient_infos:
# Unpack poll() args
receiver = info[0]
rq_type = info[1]
body = info[2]
# Run each poll on a separate thread
threads[receiver] = pool.apply_async(self.poll, (receiver, rq_type, body,))
# Get the results, store them in a dict
# Seems like it defeats the purpose of polling asynchronously, but it doesn't (brain teaser?)
for info in recipient_infos:
receiver = info[0]
try:
results[receiver] = threads[receiver].get(timeout=self.timeout)
except Exception as e:
self.log_error(e)
results[receiver] = None # Worry about this later
# Clean up those threads
pool.close()
pool.join()
# Return the dict
return results
'''
@description Sends a player a request for input, then waits for a response. Validates received data with a token.
@param sender Player which player is being polled
@param rq_type string what kind of request it is
@param size how long the data is expected to be (including headers and token)
@param out tuple<Player, tuple<string, string> where to send the result
@return tuple<Player, tuple<string, string> tuple of sender and either the response received or None on failure
'''
def poll(self, sender, rq_type, body):
# TODO: timeout if no response received after 1 second
self.log("Sending request %s to %s: %s" % (rq_type, sender.name, body), 10)
err = self.send(sender, rq_type, body)
# If the request didn't get send, the connection is lost
if err == -1:
out = (sender, None)
else:
# Wait for response
try:
response = sender.con.recv()
if response[1] is not None:
response_msg = response[1][0]
else:
response_msg = "None"
self.log("Received response %s from %s" % (response_msg, sender.name), 10)
out = (sender, response)
except Exception as e:
out = (sender, None)
self.log_error(e)
# Return the response
return out
'''
@description Creates a player object from a connection, then sends that player's token over the connection.
@param name string player's nickname
@param address string player's ip address
@param connection player's connection to the socket
@modifies self.players
@effects appends a new player to self.players
@return Player player object created
'''
def init_player(self, address, connection):
# Create unique (probably) token for player
token = message.gen_token(64)
# Create player object, append to player deque
player = Player(token, address, connection)
self.players.append(player)
# TODO: get token from player's message object
# Return player object
return player
'''
@description Creates a socket and starts to listen for connections. Maximum 1 queued connection.
@modifies self.sock
@effects sets self.sock to be localhost:argv[1]
'''
def init(self):
# Create socket
try:
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_address = (self.address, self.port)
self.sock.bind(server_address)
# Start looking for connections
self.sock.listen(self.listen_queue)
except Exception as e:
self.log_error(e)
self.alive.clear()
'''
@description Take a pool of players and create a groups
@param players list<Player> list of connected players not in a game
@param ppm int number of players per match
@return list<Player> group of players, size ppm
'''
def random_pairings(self, players, ppm):
# Get a random selection of ppm players to be in the game
playing_players = random.sample(players, ppm)
# Return that random selection
return playing_players
'''
@description Goes through list of players and removes everyone who is disconnected
@modifies self.players
@effects removes all disconnected players from self.players
'''
def prune_players(self):
return self.players # TODO: fix prune_players
# New list for connected players
new_playerlist = []
# Ping players with ACK to mae sure the connection is still valid
tuples = [(player, message._MSGTYP["Ack"], "ACK") for player in self.players]
reses = self.poll_all(tuples)
# Get a list of all players who are in a game
new_playerlist = [res for res in reses if not reses[res] == None] # Could be if reses[res], but that wouldn't be Pythonic
self.players = new_playerlist
'''
@description Makes random groups of players, ensuring that no player is in more than one game at a time.
@param ppm int players per match
@param timeout double seconds to wait before autoloss of game (not match)
@modifies self.matches
@effects creates a new entry in matches, key is random
@modifies self.threads
@effects runs each match on a new thread
'''
def setup_matches(self, ppm, timeout, pairing_method):
# TODO: implement bracket and round-robin
ct = 0
while self.alive.isSet():
ct += 1
# Check that all players are still connected every PRUNE loops
if not ct % self.prune:
self.prune_players()
# Get a list of all players who are in a game
in_queue = [player for player in self.players if player.is_ready()] # TODO: replace with self.in_queue
#print in_queue
if len(in_queue) >= ppm + 1: # Enough players to start a match plus one to advoid repairings after a match
# Matchmake players
playing_players = pairing_method(in_queue, ppm)
# Generate unique key to identify match
uniqid = message.gen_token(64)
self.matches[uniqid] = []
# Set timeouts
# !IMPORTANT: self.match is in charge of reverting these values
for player in playing_players:
player.con.connection.settimeout(timeout)
player.in_game = True
# Make new thread for the match
match = threading.Thread(target=self.match, args=(playing_players, uniqid))
match.start()
self.threads.append(match)
# Wait one second between making new games
time.sleep(self.sleep)
'''
TODO: fix this documentation
@description
@param active_players list<Player> list of players in the game
@param match_id unique identifier by which to distinguish the match
@return list<tuple(Player, score)> list of tuples of players and their scores after the match
TODO: break up this function
'''
def match(self, active_players, match_id):
# Get the names of all players, used for logging
names = ''
for i, player in enumerate(active_players):
if i+1 == len(active_players): # If this is the last player, slap an "and" in there
names += "and " + player.name
else:
names += player.name + ", "
# Inform all players that they have started a match
tuples = [(player, message._MSGTYP["Note"], "Starting a new match between %s" % (names)) for player in active_players]
self.poll_all(tuples) # We don't care about these responses
self.log("Starting a new match between %s" % (names))
# Initialize all scores to zero
scores = dict()
for player in active_players:
scores[player] = 0
games = 0
while games < self.max_games:
if games >= self.min_games:
# Check if someone has yet won by win_by yet
# Note: we don't need to keep track of players, but it might be urgent later so we do so anyway (we did that on purpose)
first = (None, 0)
second = (None, 0)
for player in scores:
score = scores[player]
if score > first[1]:
second, first = first, (player, score)
elif score > second[1]:
second = (player, score)
if first[1] - second[1] >= self.win_by:
# Yay, we're done!
break
# Request moves from all players
tuples = [(player, message._MSGTYP["Move"], "What is your move?") for player in active_players]
responses = self.poll_all(tuples)
### New Function Here ###
# Responses is a dict<Players, tuple<Player, tuple<Type of response(MV), move>>
moves = dict()
for response in responses:
# Track moves
msg = responses[response]
try:
# Get the move
if not msg or not msg[1]: # 't' is reserved as a timeout signal
mv = 't'
response.timeout()
else:
mv = msg[1][1]
response.untimeout()
except:
mv = 't'
continue
moves[response] = mv
# Let other players know about this move
players_to_inform = [player for player in active_players if not player is response]
tuples = [(player, message._MSGTYP["OppMove"], "%s;%s" % (response.name, mv)) for player in players_to_inform]
self.poll_all(tuples)
# Run the actual game
results = self.game.game(moves)
# Game has been played
games += 1
# Parse results
if not results:
return None # We'll handle that later
else:
for player in results:
result = results[player]
scores[player] += result
scores_str = ""
for score in scores:
scores_str += "[%s, %s] " % (score.name, str(scores[score]))
self.log_result("Match ended between %s. Results: %s" % (names, scores_str))
### New function here ###
# Inform the player of the result
tuples = [(player, message._MSGTYP["Note"], "Match ended between %s. Results - %s" % (names, scores_str)) for player in active_players]
responses = self.poll_all(tuples)
scoring = self.game.scoring(scores, self.win_by)
for player in active_players:
# Free the players so they can compete once more
player.score += scoring[player]
player.matches += 1
player.in_game = False
player.con.connection.settimeout(self.timeout)
scores = {}
for player in self.players:
if player.matches > 0: scores[player] = player.score / float(player.matches)
ranking = self.game.ranking(scores)
self.log_result('The current standings:')
for rank, player in enumerate(ranking):
if player.matches > 0: self.log_result('%d\t%s\t%f' % (rank, player.name, player.score / float(player.matches)))
return scores
'''
@description Starts the server proper.
@modifies threads
@effects starts matchmaking on a new thread
'''
def go(self):
self.alive.set() # Begin the match
try:
# Create a new thread for pairing players
match_maker = threading.Thread(target=self.setup_matches, args=(self.ppm, self.timeout, self.random_pairings))
match_maker.start()
self.threads.append(match_maker)
# Log
self.log("Server started on port: " + str(self.port), 0)
# TODO: threading for 2 connections
while self.alive.isSet():
# Poll for connection, add them to player queue
connection, client_address = self.sock.accept()
# Create player object, append to self.players, inform player of his token
player = self.init_player(client_address, connection)
try:
# Get player's name, this also informs the player that they have connected
name = self.poll(player, message._MSGTYP["Name"], "What is your name?")
player.name = name[1][1]
self.log("New player %s connected from %s" % (player.name, player.address), 1)
except Exception as e:
self.log_error(e)
self.log("Could not establish a player's connection.", 1)
self.players.remove(player)
continue
except Exception as e:
self.log_error(e)
self.cleanup()
'''
@description Joins all threads. Closes all connections.
@modifies self.threads
@effects empties list
'''
def cleanup(self):
# Clean up threads
self.log("Attempting to close threads...", 10)
self.alive.clear() # Unset alive, this informs the class that no more server actions should take place
for thread in self.threads:
thread.join()
threads = []
self.log("Threads successfully closed", 10)
# Clean up sockets
self.log("Terminating active connections...", 10)
for player in self.players:
self.send(player, message._MSGTYP["Termination"], "You don't have to go home, but you can't stay here")
player.con.connection.close()
self.log("Active connections terminated", 10)
'''
Why is this server code so complicated? I can make an RPS server in like 100 lines.
Don't worry, we'll fix it in the flavor text.
'''
|
pants_daemon.py
|
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import logging
import os
import sys
import threading
from contextlib import contextmanager
from dataclasses import dataclass
from typing import IO, Iterator, Optional, cast
from setproctitle import setproctitle as set_process_title
from pants.base.build_environment import get_buildroot
from pants.base.exception_sink import ExceptionSink, SignalHandler
from pants.base.exiter import Exiter
from pants.bin.daemon_pants_runner import DaemonPantsRunner
from pants.engine.native import Native
from pants.engine.unions import UnionMembership
from pants.init.engine_initializer import EngineInitializer
from pants.init.logging import NativeHandler, init_rust_logger, setup_logging
from pants.init.options_initializer import BuildConfigInitializer, OptionsInitializer
from pants.option.option_value_container import OptionValueContainer
from pants.option.options_bootstrapper import OptionsBootstrapper
from pants.option.options_fingerprinter import OptionsFingerprinter
from pants.option.scope import GLOBAL_SCOPE
from pants.pantsd.process_manager import FingerprintedProcessManager
from pants.pantsd.service.fs_event_service import FSEventService
from pants.pantsd.service.pailgun_service import PailgunService
from pants.pantsd.service.pants_service import PantsServices
from pants.pantsd.service.scheduler_service import SchedulerService
from pants.pantsd.service.store_gc_service import StoreGCService
from pants.pantsd.watchman_launcher import WatchmanLauncher
from pants.util.contextutil import stdio_as
from pants.util.logging import LogLevel
from pants.util.memo import memoized_property
from pants.util.strutil import ensure_text
class _LoggerStream(object):
"""A sys.std{out,err} replacement that pipes output to a logger.
N.B. `logging.Logger` expects unicode. However, most of our outstream logic, such as in
`exiter.py`, will use `sys.std{out,err}.buffer` and thus a bytes interface. So, we must provide
a `buffer` property, and change the semantics of the buffer to always convert the message to
unicode. This is an unfortunate code smell, as `logging` does not expose a bytes interface so
this is the best solution we could think of.
"""
def __init__(self, logger, log_level, handler):
"""
:param logging.Logger logger: The logger instance to emit writes to.
:param int log_level: The log level to use for the given logger.
:param Handler handler: The underlying log handler, for determining the fileno
to support faulthandler logging.
"""
self._logger = logger
self._log_level = log_level
self._handler = handler
def write(self, msg):
msg = ensure_text(msg)
for line in msg.rstrip().splitlines():
# The log only accepts text, and will raise a decoding error if the default encoding is ascii
# if provided a bytes input for unicode text.
line = ensure_text(line)
self._logger.log(self._log_level, line.rstrip())
def flush(self):
return
def isatty(self):
return False
def fileno(self):
return self._handler.stream.fileno()
@property
def buffer(self):
return self
class PantsDaemonSignalHandler(SignalHandler):
def __init__(self, daemon):
super().__init__()
self._daemon = daemon
def handle_sigint(self, signum, _frame):
self._daemon.terminate(include_watchman=False)
class PantsDaemon(FingerprintedProcessManager):
"""A daemon that manages PantsService instances."""
JOIN_TIMEOUT_SECONDS = 1
LOG_NAME = "pantsd.log"
class StartupFailure(Exception):
"""Represents a failure to start pantsd."""
class RuntimeFailure(Exception):
"""Represents a pantsd failure at runtime, usually from an underlying service failure."""
@dataclass(frozen=True)
class Handle:
"""A handle to a "probably running" pantsd instance.
We attempt to verify that the pantsd instance is still running when we create a Handle, but
after it has been created it is entirely process that the pantsd instance perishes.
"""
pid: int
port: int
metadata_base_dir: str
class Factory:
@classmethod
def maybe_launch(cls, options_bootstrapper) -> "PantsDaemon.Handle":
"""Creates and launches a daemon instance if one does not already exist.
:param OptionsBootstrapper options_bootstrapper: The bootstrap options.
:returns: A Handle for the running pantsd instance.
"""
stub_pantsd = cls.create(options_bootstrapper, full_init=False)
with stub_pantsd._services.lifecycle_lock:
if stub_pantsd.needs_restart(stub_pantsd.options_fingerprint):
# Once we determine we actually need to launch, recreate with full initialization.
pantsd = cls.create(options_bootstrapper)
return pantsd.launch()
else:
# We're already launched.
return PantsDaemon.Handle(
stub_pantsd.await_pid(10),
stub_pantsd.read_named_socket("pailgun", int),
stub_pantsd._metadata_base_dir,
)
@classmethod
def restart(cls, options_bootstrapper):
"""Restarts a running daemon instance.
:param OptionsBootstrapper options_bootstrapper: The bootstrap options.
:returns: A Handle for the pantsd instance.
:rtype: PantsDaemon.Handle
"""
pantsd = cls.create(options_bootstrapper)
with pantsd._services.lifecycle_lock:
# N.B. This will call `pantsd.terminate()` before starting.
return pantsd.launch()
@classmethod
def create(cls, options_bootstrapper, full_init=True) -> "PantsDaemon":
"""
:param OptionsBootstrapper options_bootstrapper: The bootstrap options.
:param bool full_init: Whether or not to fully initialize an engine et al for the purposes
of spawning a new daemon. `full_init=False` is intended primarily
for lightweight lifecycle checks (since there is a ~1s overhead to
initialize the engine). See the impl of `maybe_launch` for an example
of the intended usage.
"""
bootstrap_options = options_bootstrapper.bootstrap_options
bootstrap_options_values = bootstrap_options.for_global_scope()
# TODO: https://github.com/pantsbuild/pants/issues/3479
watchman = WatchmanLauncher.create(bootstrap_options_values).watchman
native: Optional[Native] = None
build_root: Optional[str] = None
if full_init:
build_root = get_buildroot()
native = Native()
build_config = BuildConfigInitializer.get(options_bootstrapper)
legacy_graph_scheduler = EngineInitializer.setup_legacy_graph(
native, options_bootstrapper, build_config
)
services = cls._setup_services(
build_root,
bootstrap_options_values,
legacy_graph_scheduler,
watchman,
union_membership=UnionMembership(build_config.union_rules()),
)
else:
services = PantsServices()
return PantsDaemon(
native=native,
build_root=build_root,
work_dir=bootstrap_options_values.pants_workdir,
log_level=bootstrap_options_values.level,
services=services,
metadata_base_dir=bootstrap_options_values.pants_subprocessdir,
bootstrap_options=bootstrap_options,
)
@staticmethod
def _setup_services(
build_root,
bootstrap_options,
legacy_graph_scheduler,
watchman,
union_membership: UnionMembership,
):
"""Initialize pantsd services.
:returns: A PantsServices instance.
"""
should_shutdown_after_run = bootstrap_options.shutdown_pantsd_after_run
fs_event_service = (
FSEventService(watchman, build_root,) if bootstrap_options.watchman_enable else None
)
pidfile_absolute = PantsDaemon.metadata_file_path(
"pantsd", "pid", bootstrap_options.pants_subprocessdir
)
if pidfile_absolute.startswith(build_root):
pidfile = os.path.relpath(pidfile_absolute, build_root)
else:
pidfile = None
logging.getLogger(__name__).warning(
"Not watching pantsd pidfile because subprocessdir is outside of buildroot. Having "
"subprocessdir be a child of buildroot (as it is by default) may help avoid stray "
"pantsd processes."
)
# TODO make SchedulerService handle fs_event_service_being None
scheduler_service = SchedulerService(
fs_event_service=fs_event_service,
legacy_graph_scheduler=legacy_graph_scheduler,
build_root=build_root,
invalidation_globs=OptionsInitializer.compute_pantsd_invalidation_globs(
build_root, bootstrap_options
),
pantsd_pidfile=pidfile,
union_membership=union_membership,
)
pailgun_service = PailgunService(
(bootstrap_options.pantsd_pailgun_host, bootstrap_options.pantsd_pailgun_port),
DaemonPantsRunner,
scheduler_service,
should_shutdown_after_run,
)
store_gc_service = StoreGCService(legacy_graph_scheduler.scheduler)
return PantsServices(
services=tuple(
service
for service in (
fs_event_service,
scheduler_service,
pailgun_service,
store_gc_service,
)
if service is not None
),
port_map=dict(pailgun=pailgun_service.pailgun_port),
)
def __init__(
self,
native: Optional[Native],
build_root: Optional[str],
work_dir: str,
log_level: LogLevel,
services: PantsServices,
metadata_base_dir: str,
bootstrap_options: Optional[OptionValueContainer] = None,
):
"""
:param Native native: A `Native` instance.
:param string build_root: The pants build root.
:param string work_dir: The pants work directory.
:param string log_level: The log level to use for daemon logging.
:param PantsServices services: A registry of services to use in this run.
:param string metadata_base_dir: The ProcessManager metadata base dir.
:param Options bootstrap_options: The bootstrap options, if available.
"""
super().__init__(name="pantsd", metadata_base_dir=metadata_base_dir)
self._native = native
self._build_root = build_root
self._work_dir = work_dir
self._log_level = log_level
self._services = services
self._bootstrap_options = bootstrap_options
self._log_show_rust_3rdparty = (
bootstrap_options.for_global_scope().log_show_rust_3rdparty
if bootstrap_options
else True
)
self._log_dir = os.path.join(work_dir, self.name)
self._logger = logging.getLogger(__name__)
# N.B. This Event is used as nothing more than a convenient atomic flag - nothing waits on it.
self._kill_switch = threading.Event()
@memoized_property
def watchman_launcher(self):
return WatchmanLauncher.create(self._bootstrap_options.for_global_scope())
@property
def is_killed(self):
return self._kill_switch.is_set()
@property
def options_fingerprint(self):
return OptionsFingerprinter.combined_options_fingerprint_for_scope(
GLOBAL_SCOPE, self._bootstrap_options, fingerprint_key="daemon", invert=True
)
def shutdown(self, service_thread_map):
"""Gracefully terminate all services and kill the main PantsDaemon loop."""
with self._services.lifecycle_lock:
for service, service_thread in service_thread_map.items():
self._logger.info(f"terminating pantsd service: {service}")
service.terminate()
service_thread.join(self.JOIN_TIMEOUT_SECONDS)
self._logger.info("terminating pantsd")
self._kill_switch.set()
@staticmethod
def _close_stdio():
"""Close stdio streams to avoid output in the tty that launched pantsd."""
for fd in (sys.stdin, sys.stdout, sys.stderr):
file_no = fd.fileno()
fd.flush()
fd.close()
os.close(file_no)
@contextmanager
def _pantsd_logging(self) -> Iterator[IO[str]]:
"""A context manager that runs with pantsd logging.
Asserts that stdio (represented by file handles 0, 1, 2) is closed to ensure that we can
safely reuse those fd numbers.
"""
# Ensure that stdio is closed so that we can safely reuse those file descriptors.
for fd in (0, 1, 2):
try:
os.fdopen(fd)
raise AssertionError(f"pantsd logging cannot initialize while stdio is open: {fd}")
except OSError:
pass
# Redirect stdio to /dev/null for the rest of the run, to reserve those file descriptors
# for further forks.
with stdio_as(stdin_fd=-1, stdout_fd=-1, stderr_fd=-1):
# Reinitialize logging for the daemon context.
init_rust_logger(self._log_level, self._log_show_rust_3rdparty)
# We can't statically prove it, but we won't execute `launch()` (which
# calls `run_sync` which calls `_pantsd_logging`) unless PantsDaemon
# is launched with full_init=True. If PantsdDaemon is launched with
# full_init=True, we can guarantee self._native is non-None.
native = cast(Native, self._native)
log_handler = setup_logging(
self._log_level,
native=native,
log_dir=self._log_dir,
log_filename=self.LOG_NAME,
warnings_filter_regexes=self._bootstrap_options.for_global_scope(), # type: ignore[union-attr]
)
# We know log_handler is never None because we did pass a non-None `log_dir`
# to setup_logging.
log_handler = cast(NativeHandler, log_handler)
native.override_thread_logging_destination_to_just_pantsd()
# Do a python-level redirect of stdout/stderr, which will not disturb `0,1,2`.
# TODO: Consider giving these pipes/actual fds, in order to make them "deep" replacements
# for `1,2`, and allow them to be used via `stdio_as`.
sys.stdout = _LoggerStream(logging.getLogger(), logging.INFO, log_handler) # type: ignore[assignment]
sys.stderr = _LoggerStream(logging.getLogger(), logging.WARN, log_handler) # type: ignore[assignment]
self._logger.debug("logging initialized")
yield log_handler.stream
def _setup_services(self, pants_services):
for service in pants_services.services:
self._logger.info(f"setting up service {service}")
service.setup(self._services)
@staticmethod
def _make_thread(service):
name = f"{service.__class__.__name__}Thread"
def target():
Native().override_thread_logging_destination_to_just_pantsd()
service.run()
t = threading.Thread(target=target, name=name)
t.daemon = True
return t
def _run_services(self, pants_services):
"""Service runner main loop."""
if not pants_services.services:
self._logger.critical("no services to run, bailing!")
return
service_thread_map = {
service: self._make_thread(service) for service in pants_services.services
}
# Start services.
for service, service_thread in service_thread_map.items():
self._logger.info(f"starting service {service}")
try:
service_thread.start()
except (RuntimeError, FSEventService.ServiceError):
self.shutdown(service_thread_map)
raise PantsDaemon.StartupFailure(
f"service {service} failed to start, shutting down!"
)
# Once all services are started, write our pid.
self.write_pid()
self.write_metadata_by_name(
"pantsd", self.FINGERPRINT_KEY, ensure_text(self.options_fingerprint)
)
# Monitor services.
while not self.is_killed:
for service, service_thread in service_thread_map.items():
if not service_thread.is_alive():
self.shutdown(service_thread_map)
raise PantsDaemon.RuntimeFailure(
f"service failure for {service}, shutting down!"
)
else:
# Avoid excessive CPU utilization.
service_thread.join(self.JOIN_TIMEOUT_SECONDS)
def _write_named_sockets(self, socket_map):
"""Write multiple named sockets using a socket mapping."""
for socket_name, socket_info in socket_map.items():
self.write_named_socket(socket_name, socket_info)
def run_sync(self):
"""Synchronously run pantsd."""
os.environ.pop("PYTHONPATH")
# Switch log output to the daemon's log stream from here forward.
# Also, register an exiter using os._exit to ensure we only close stdio streams once.
self._close_stdio()
with self._pantsd_logging() as log_stream, ExceptionSink.exiter_as(
lambda _: Exiter(exiter=os._exit)
):
# We don't have any stdio streams to log to anymore, so we log to a file.
# We don't override the faulthandler destination because the stream we get will proxy things
# via the rust logging code, and faulthandler needs to be writing directly to a real file
# descriptor. When pantsd logging was originally initialised, we already set up faulthandler
# to log to the correct file descriptor, so don't override it.
#
# We can get tracebacks of the pantsd process by tailing the pantsd log and sending it
# SIGUSR2.
ExceptionSink.reset_interactive_output_stream(
log_stream, override_faulthandler_destination=False,
)
# Reset the log location and the backtrace preference from the global bootstrap options.
global_bootstrap_options = self._bootstrap_options.for_global_scope()
ExceptionSink.reset_should_print_backtrace_to_terminal(
global_bootstrap_options.print_exception_stacktrace
)
ExceptionSink.reset_log_location(global_bootstrap_options.pants_workdir)
self._native.set_panic_handler()
# Set the process name in ps output to 'pantsd' vs './pants compile src/etc:: -ldebug'.
set_process_title(f"pantsd [{self._build_root}]")
# Write service socket information to .pids.
self._write_named_sockets(self._services.port_map)
# Enter the main service runner loop.
self._setup_services(self._services)
self._run_services(self._services)
def post_fork_child(self):
"""Post-fork() child callback for ProcessManager.daemon_spawn()."""
spawn_control_env = dict(
PANTS_ENTRYPOINT=f"{__name__}:launch",
# The daemon should run under the same sys.path as us; so we ensure
# this. NB: It will scrub PYTHONPATH once started to avoid infecting
# its own unrelated subprocesses.
PYTHONPATH=os.pathsep.join(sys.path),
)
exec_env = {**os.environ, **spawn_control_env}
# Pass all of sys.argv so that we can proxy arg flags e.g. `-ldebug`.
cmd = [sys.executable] + sys.argv
spawn_control_env_vars = " ".join(f"{k}={v}" for k, v in spawn_control_env.items())
cmd_line = " ".join(cmd)
self._logger.debug(f"cmd is: {spawn_control_env_vars} {cmd_line}")
# TODO: Improve error handling on launch failures.
os.spawnve(os.P_NOWAIT, sys.executable, cmd, env=exec_env)
def needs_launch(self):
"""Determines if pantsd needs to be launched.
N.B. This should always be called under care of the `lifecycle_lock`.
:returns: True if the daemon needs launching, False otherwise.
:rtype: bool
"""
new_fingerprint = self.options_fingerprint
self._logger.debug(
"pantsd: is_alive={self.is_alive()} new_fingerprint={new_fingerprint} current_fingerprint={self.fingerprint}"
)
return self.needs_restart(new_fingerprint)
def launch(self) -> "PantsDaemon.Handle":
"""Launches pantsd in a subprocess.
N.B. This should always be called under care of the `lifecycle_lock`.
:returns: A Handle for the pantsd instance.
"""
self.terminate(include_watchman=False)
self.watchman_launcher.maybe_launch()
self._logger.debug("launching pantsd")
self.daemon_spawn()
# Wait up to 60 seconds for pantsd to write its pidfile.
pantsd_pid = self.await_pid(60)
listening_port = self.read_named_socket("pailgun", int)
self._logger.debug(f"pantsd is running at pid {self.pid}, pailgun port is {listening_port}")
return self.Handle(pantsd_pid, listening_port, self._metadata_base_dir)
def terminate(self, include_watchman=True):
"""Terminates pantsd and watchman.
N.B. This should always be called under care of the `lifecycle_lock`.
"""
super().terminate()
if include_watchman:
self.watchman_launcher.terminate()
def needs_restart(self, option_fingerprint):
"""Overrides ProcessManager.needs_restart, to account for the case where pantsd is running
but we want to shutdown after this run.
:param option_fingerprint: A fingeprint of the global bootstrap options.
:return: True if the daemon needs to restart.
"""
should_shutdown_after_run = (
self._bootstrap_options.for_global_scope().shutdown_pantsd_after_run
)
return super().needs_restart(option_fingerprint) or (
self.is_alive() and should_shutdown_after_run
)
def launch():
"""An external entrypoint that spawns a new pantsd instance."""
PantsDaemon.Factory.create(OptionsBootstrapper.create()).run_sync()
|
socks5.py
|
#!/usr/bin/env python3
import argparse
import logging
import os
import platform
import signal
import struct
import sys
import threading
from socket import AF_INET, SOCK_STREAM, socket
from socketserver import BaseServer, StreamRequestHandler, ThreadingTCPServer
__author__ = 'Youchao Feng'
support_os = ('Darwin', 'Linux')
current_os = platform.system()
def byte_to_int(b):
"""
Convert Unsigned byte to int
:param b: byte value
:return: int value
"""
return b & 0xFF
def port_from_byte(b1, b2):
"""
:param b1: First byte of port
:param b2: Second byte of port
:return: Port in Int
"""
return byte_to_int(b1) << 8 | byte_to_int(b2)
def host_from_ip(a, b, c, d):
a = byte_to_int(a)
b = byte_to_int(b)
c = byte_to_int(c)
d = byte_to_int(d)
return "%d.%d.%d.%d" % (a, b, c, d)
def get_command_name(value):
"""
Gets command name by value
:param value: value of Command
:return: Command Name
"""
if value == 1:
return 'CONNECT'
elif value == 2:
return 'BIND'
elif value == 3:
return 'UDP_ASSOCIATE'
else:
return None
def build_command_response(reply):
start = b'\x05%s\x00\x01\x00\x00\x00\x00\x00\x00'
return start % reply.get_byte_string()
def close_session(session):
session.get_client_socket().close()
logging.info("Session[%s] closed", session.get_id())
def run_daemon_process(stdout='/dev/null',
stderr=None,
stdin='/dev/null',
pid_file=None,
start_msg='started with pid %s'):
"""
This forks the current process into a daemon.
The stdin, stdout, and stderr arguments are file names that
will be opened and be used to replace the standard file descriptors
in sys.stdin, sys.stdout, and sys.stderr.
These arguments are optional and default to /dev/null.
Note that stderr is opened unbuffered, so
if it shares a file with stdout then interleaved output
may not appear in the order that you expect.
"""
# flush io
sys.stdout.flush()
sys.stderr.flush()
# Do first fork.
try:
if os.fork() > 0:
sys.exit(0) # Exit first parent.
except OSError as e:
sys.stderr.write("fork #1 failed: (%d) %s\n" % (e.errno, e.strerror))
sys.exit(1)
# Decouple from parent environment.
os.chdir("/")
os.umask(0)
os.setsid()
# Do second fork.
try:
if os.fork() > 0:
sys.exit(0) # Exit second parent.
except OSError as e:
sys.stderr.write("fork #2 failed: (%d) %s\n" % (e.errno, e.strerror))
sys.exit(1)
# Open file descriptors and print start message
if not stderr:
stderr = stdout
si = open(stdin, 'r')
so = open(stdout, 'a+')
se = open(stderr, 'ba+', 0) # unbuffered
pid = str(os.getpid())
sys.stderr.write(start_msg % pid)
sys.stderr.flush()
if pid_file:
open(pid_file, 'w+').write("%s\n" % pid)
# Redirect standard file descriptors.
os.dup2(si.fileno(), sys.stdin.fileno())
os.dup2(so.fileno(), sys.stdout.fileno())
os.dup2(se.fileno(), sys.stderr.fileno())
class Session:
index = 0
def __init__(self, client_socket):
Session.index += 1
self.__id = Session.index
self.__client_socket = client_socket
self._attr = {}
def get_id(self):
return self.__id
def set_attr(self, key, value):
self._attr[key] = value
def get_client_socket(self):
return self.__client_socket
class AddressType:
IPV4 = 1
DOMAIN_NAME = 3
IPV6 = 4
class SocksCommand:
CONNECT = 1
BIND = 2
UDP_ASSOCIATE = 3
class SocksMethod:
NO_AUTHENTICATION_REQUIRED = 0
GSS_API = 1
USERNAME_PASSWORD = 2
class ServerReply:
def __init__(self, value):
self.__value = value
def get_byte_string(self):
if self.__value == 0:
return b'\x00'
elif self.__value == 1:
return b'\x01'
elif self.__value == 2:
return b'\x02'
elif self.__value == 3:
return b'\x03'
elif self.__value == 4:
return b'\x04'
elif self.__value == 5:
return b'\x05'
elif self.__value == 6:
return b'\x06'
elif self.__value == 7:
return b'\x07'
elif self.__value == 8:
return b'\x08'
def get_value(self):
return self.__value
class ReplyType:
SUCCEEDED = ServerReply(0)
GENERAL_SOCKS_SERVER_FAILURE = ServerReply(1)
CONNECTION_NOT_ALLOWED_BY_RULESET = ServerReply(2)
NETWORK_UNREACHABLE = ServerReply(3)
HOST_UNREACHABLE = ServerReply(4)
CONNECTION_REFUSED = ServerReply(5)
TTL_EXPIRED = ServerReply(6)
COMMAND_NOT_SUPPORTED = ServerReply(7)
ADDRESS_TYPE_NOT_SUPPORTED = ServerReply(8)
class SocketPipe:
BUFFER_SIZE = 1024 * 1024
def __init__(self, socket1, socket2):
self._socket1 = socket1
self._socket2 = socket2
self.__running = False
self.t1 = threading.Thread(target=self.__transfer, args=(self._socket1, self._socket2))
self.t2 = threading.Thread(target=self.__transfer, args=(self._socket2, self._socket1))
def __transfer(self, socket1, socket2):
while self.__running:
try:
data = socket1.recv(self.BUFFER_SIZE)
if len(data) > 0:
socket2.sendall(data)
else:
break
except IOError:
self.stop()
self.stop()
def start(self):
self.__running = True
self.t1.start()
self.t2.start()
def stop(self):
self._socket1.close()
self._socket2.close()
self.__running = False
def is_running(self):
return self.__running
class CommandExecutor:
def __init__(self, remote_server_host, remote_server_port, session):
self.__proxy_socket = socket(AF_INET, SOCK_STREAM)
self.__remote_server_host = remote_server_host
self.__remote_server_port = remote_server_port
self.__client = session.get_client_socket()
self.__session = session
def do_connect(self):
"""
Do SOCKS CONNECT method
:return: None
"""
address = self.__get_address()
logging.info("Connect request to %s", address)
result = self.__proxy_socket.connect_ex(address)
if result == 0:
self.__client.send(build_command_response(ReplyType.SUCCEEDED))
socket_pipe = SocketPipe(self.__client, self.__proxy_socket)
socket_pipe.start()
while socket_pipe.is_running():
pass
elif result == 60:
self.__client.send(build_command_response(ReplyType.TTL_EXPIRED))
elif result == 61:
self.__client.send(build_command_response(ReplyType.NETWORK_UNREACHABLE))
else:
logging.error('Connection Error:[%s] is unknown', result)
self.__client.send(build_command_response(ReplyType.NETWORK_UNREACHABLE))
def do_bind(self):
pass
def do_udp_associate(self):
pass
def __get_address(self):
return self.__remote_server_host, self.__remote_server_port
class User:
def __init__(self, username, password):
self.__username = username
self.__password = password
def get_username(self):
return self.__username
def get_password(self):
return self.__password
def __repr__(self):
return '<user: username=%s, password=%s>' % (self.get_username(), self.__password)
class UserManager:
def __init__(self):
self.__users = {}
def add_user(self, user):
self.__users[user.get_username()] = user
def remove_user(self, username):
if username in self.__users:
del self.__users[username]
def check(self, username, password):
if username in self.__users and self.__users[username].get_password() == password:
return True
else:
return False
def get_user(self, username):
return self.__users[username]
def get_users(self):
return self.__users
class Socks5RequestHandler(StreamRequestHandler):
def __init__(self, request, client_address, server):
StreamRequestHandler.__init__(self, request, client_address, server)
def handle(self):
session = Session(self.connection)
logging.info('Create session[%s] for %s:%d', 1, self.client_address[0], self.client_address[1])
# print(self.server.allowed)
if self.server.allowed and self.client_address[0] not in self.server.allowed:
logging.info('Remote IP not in allowed list. Closing connection')
close_session(session)
return
client = self.connection
client.recv(1)
method_num, = struct.unpack('b', client.recv(1))
meth_bytes = client.recv(method_num)
methods = struct.unpack('b' * method_num, meth_bytes)
auth = self.server.is_auth()
if methods.__contains__(SocksMethod.NO_AUTHENTICATION_REQUIRED) and not auth:
client.send(b"\x05\x00")
elif methods.__contains__(SocksMethod.USERNAME_PASSWORD) and auth:
client.send(b"\x05\x02")
if not self.__do_username_password_auth():
logging.info('Session[%d] authentication failed', session.get_id())
close_session(session)
return
else:
logging.info('Client requested unknown method (%s, %s->%s). Cannot continue.', methods, method_num,
meth_bytes)
client.send(b"\x05\xFF")
return
version, command, reserved, address_type = struct.unpack('B' * 4, client.recv(4))
host = None
port = None
if address_type == AddressType.IPV4:
ip_a, ip_b, ip_c, ip_d, port = struct.unpack('!' + ('b' * 4) + 'H', client.recv(6))
host = host_from_ip(ip_a, ip_b, ip_c, ip_d)
elif address_type == AddressType.DOMAIN_NAME:
host_length, = struct.unpack('b', client.recv(1))
host = client.recv(host_length)
port, = struct.unpack('!H', client.recv(2))
elif address_type == AddressType.IPV6:
ip6_01, ip6_02, ip6_03, ip6_04, \
ip6_05, ip6_06, ip6_07, ip6_08, \
ip6_09, ip6_10, ip6_11, ip6_12, \
ip6_13, ip6_14, ip6_15, ip6_16, \
port = struct.unpack('!' + ('b' * 16) + 'H', client.recv(18))
logging.warn("Address type not implemented: %s (IPV6 Connect)", address_type)
logging.info("Params: %s, port: %s", (
ip6_01, ip6_02, ip6_03, ip6_04, ip6_05, ip6_06, ip6_07, ip6_08, ip6_09, ip6_10, ip6_11, ip6_12, ip6_13,
ip6_14, ip6_15, ip6_16), port)
client.send(build_command_response(ReplyType.ADDRESS_TYPE_NOT_SUPPORTED))
return
else: # address type not support
logging.warn("Address type not supported: %s", address_type)
client.send(build_command_response(ReplyType.ADDRESS_TYPE_NOT_SUPPORTED))
return
command_executor = CommandExecutor(host, port, session)
if command == SocksCommand.CONNECT:
logging.info("Session[%s] Request connect %s:%s", session.get_id(), host, port)
command_executor.do_connect()
close_session(session)
def __do_username_password_auth(self):
client = self.connection
client.recv(1)
length = byte_to_int(struct.unpack('b', client.recv(1))[0])
username = client.recv(length).decode('utf-8')
length = byte_to_int(struct.unpack('b', client.recv(1))[0])
password = client.recv(length).decode('utf-8')
user_manager = self.server.get_user_manager()
if user_manager.check(username, password):
client.send(b"\x01\x00")
return True
else:
client.send(b"\x01\x01")
return False
class Socks5Server(ThreadingTCPServer):
"""
SOCKS5 proxy server
"""
def __init__(self, port, auth=False, user_manager=UserManager(), allowed=None):
ThreadingTCPServer.__init__(self, ('', port), Socks5RequestHandler)
self.__port = port
self.__users = {}
self.__auth = auth
self.__user_manager = user_manager
self.__sessions = {}
self.allowed = allowed
self.th = threading.Thread(target=self.serve_forever)
def serve_forever(self, poll_interval=0.5):
logging.info("Create SOCKS5 server at port %d", self.__port)
ThreadingTCPServer.serve_forever(self, poll_interval)
def finish_request(self, request, client_address):
BaseServer.finish_request(self, request, client_address)
def is_auth(self):
return self.__auth
def set_auth(self, auth):
self.__auth = auth
def get_all_managed_session(self):
return self.__sessions
def get_bind_port(self):
return self.__port
def get_user_manager(self):
return self.__user_manager
def set_user_manager(self, user_manager):
self.__user_manager = user_manager
def run_in_thread(self):
self.th.start()
def stop_server_thread(self):
self.server_close()
self.shutdown()
self.th.join()
def check_os_support():
if not support_os.__contains__(current_os):
print('Not support in %s' % current_os)
sys.exit()
def stop(pid_file):
check_os_support()
print('Stopping server...', end=' ')
try:
f = open(pid_file, 'r')
pid = int(f.readline())
os.kill(pid, signal.SIGTERM)
os.remove(pid_file)
print(" [OK]")
except OSError:
print("pysocks is not running")
except IOError:
print("pysocks is not running")
def status(pid_file):
check_os_support()
try:
f = open(pid_file, 'r')
pid = int(f.readline())
print('pysocks(pid %d) is running...' % pid)
except IOError:
print("pysocks is stopped")
def start_command(args):
enable_log = True
log_file = args.logfile
auth = args.auth is not None
pid_file = args.pidfile
user_manager = UserManager()
should_daemonisze = not args.foreground
if auth:
for user in args.auth:
user_pwd = user.split(':')
user_manager.add_user(User(user_pwd[0], user_pwd[1]))
if enable_log:
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s - %(message)s',
filename=log_file,
filemode='a')
console = logging.StreamHandler(sys.stdout)
console.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s %(levelname)-5s %(lineno)-3d - %(message)s')
console.setFormatter(formatter)
logging.getLogger().addHandler(console)
Socks5Server.allow_reuse_address = True
socks5_server = Socks5Server(args.port, auth, user_manager, allowed=args.allow_ip)
try:
if support_os.__contains__(current_os) and should_daemonisze:
run_daemon_process(pid_file=pid_file, start_msg='Start SOCKS5 server at pid %s\n')
socks5_server.serve_forever()
except KeyboardInterrupt:
socks5_server.server_close()
socks5_server.shutdown()
logging.info("SOCKS5 server shutdown")
def stop_command(args):
pid_file = pid_file = args.pidfile
stop(pid_file)
sys.exit()
def status_command(args):
pid_file = args.pidfile
status(pid_file)
sys.exit()
def main():
default_pid_file = os.path.join(os.path.expanduser('~'), '.pysocks.pid')
default_log_file = os.path.join(os.path.expanduser('~'), 'pysocks.log')
parser = argparse.ArgumentParser(description='start a simple socks5 server',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
subparsers = parser.add_subparsers(help='sub-command help')
parser_start = subparsers.add_parser('start', help='start a SOCKS5 server', description='start a SOCKS5 server',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser_start.add_argument('-p', '--port', type=int, help='specify server port, default 1080', default=1080)
parser_start.add_argument('-f', '--foreground', help='stay in foreground (prevents daemonization)',
action='store_true', default=False)
parser_start.add_argument('-i', '--allow-ip', nargs='+', help='allowed client IP list')
parser_start.add_argument('-a', '--auth', nargs='+', help='allowed users')
parser_start.add_argument('-L', '--logfile', help='log file', default=default_log_file)
parser_start.add_argument('-P', '--pidfile', help='pid file', default=default_pid_file)
parser_start.set_defaults(func=start_command)
parser_stop = subparsers.add_parser('stop', help='stop a SOCKS5 server', description='stop a SOCKS5 server',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser_stop.add_argument('-P', '--pidfile', help='pid file', default=default_pid_file)
parser_stop.set_defaults(func=stop_command)
parser_status = subparsers.add_parser('status', help='print SOCKS5 server status',
description='print SOCKS5 server status',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser_status.add_argument('-P', '--pidfile', help='pid file', default=default_pid_file)
parser_status.set_defaults(func=status_command)
args = parser.parse_args()
args.func(args)
if __name__ == '__main__':
main()
|
test.py
|
# from aqt import mw
# from aqt import gui_hooks
# from aqt import utils
import threading
import socket
from PyQt5.QtCore import QObject, pyqtSignal, QThread
import tcp_pack_construct
# 信号对象
class QTypeSignal(QObject):
# 生成一个信号
sendmsg = pyqtSignal(object)
def __init__(self):
super().__init__()
def emit_connect(self):
# 发射信号
self.sendmsg.emit('emit_connect')
global_send = QTypeSignal()
global_on = True
tcp_server_socket = None
def thread_recv_msg(new_client_socket: socket, ip_port, send):
pack_constructor=tcp_pack_construct.TcpPackConstructor()
def pack_handle(pack_data:list):
pack_data=bytearray(pack_data)
print("recv:",pack_data.decode(encoding='UTF-8',errors='strict'))
pack_constructor.set_detahandle_callback(pack_handle)
while global_on:
recv_data = new_client_socket.recv(1024)
# 判断是否有消息返回
if recv_data:
pack_constructor.handle_slice(list(recv_data))
# recv_text = recv_data.decode('UTF-8')
# print("来自【%s】的消息:%s" % (str(ip_port), recv_text))
else:
# 如果断开连接会执行这行代码,此时关闭socket的连接
new_client_socket.close()
# print("已经断开【%s】的连接" % (str(ip_port)))
break
class NetCtx:
send: QTypeSignal
netctx = NetCtx()
def thread_server(send: QTypeSignal):
# global_send.emit_connect()
global tcp_server_socket
tcp_server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# 设置地址可复用
# tcp_server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, True)
# 绑定TCP端口
tcp_server_socket.bind(("", 12357))
# 设置监听 最多128个连接
tcp_server_socket.listen(128)
while global_on:
try:
new_client_socket, ip_port = tcp_server_socket.accept()
# utils.showInfo("panote 客户端已经连接上anki插件")
send.emit_connect();
new_thread = threading.Thread(target=thread_recv_msg, args=(new_client_socket, ip_port, send))
# 设置守护线程:在主线程关闭的时候 子线程也会关闭
new_thread.setDaemon(True)
new_thread.start()
except:
print('err')
def slot_handle(msg):
utils.showInfo(msg + "panote 客户端已经连接上anki插件")
# 插件生命周期
def init():
print('将信号绑定槽:')
# 将信号绑定到槽函数上
global_send.sendmsg.connect(slot_handle)
thread = threading.Thread(target=thread_server, args=(global_send,))
thread.start()
def about_2_quit():
global tcp_server_socket, global_on
global_on = False
tcp_server_socket.close()
# utils.showInfo("about_2_quit")
init()
|
vc.py
|
# -*- coding: utf-8 -*-
"""Prompt formatter for simple version control branches"""
# pylint:disable=no-member, invalid-name
import os
import sys
import queue
import builtins
import threading
import subprocess
import re
import pathlib
import xonsh.tools as xt
from xonsh.lazyasd import LazyObject
RE_REMOVE_ANSI = LazyObject(
lambda: re.compile(r"(?:\x1B[@-_]|[\x80-\x9F])[0-?]*[ -/]*[@-~]"),
globals(),
"RE_REMOVE_ANSI",
)
def _get_git_branch(q):
denv = builtins.__xonsh__.env.detype()
try:
branches = xt.decode_bytes(
subprocess.check_output(
["git", "branch"], env=denv, stderr=subprocess.DEVNULL
)
).splitlines()
except (subprocess.CalledProcessError, OSError, FileNotFoundError):
q.put(None)
else:
for branch in branches:
if not branch.startswith("* "):
continue
elif branch.endswith(")"):
branch = branch.split()[-1][:-1]
else:
branch = branch.split()[-1]
q.put(branch)
break
else:
q.put(None)
def get_git_branch():
"""Attempts to find the current git branch. If this could not
be determined (timeout, not in a git repo, etc.) then this returns None.
"""
branch = None
timeout = builtins.__xonsh__.env.get("VC_BRANCH_TIMEOUT")
q = queue.Queue()
t = threading.Thread(target=_get_git_branch, args=(q,))
t.start()
t.join(timeout=timeout)
try:
branch = q.get_nowait()
# branch = RE_REMOVE_ANSI.sub("", branch or "")
if branch:
branch = RE_REMOVE_ANSI.sub("", branch)
except queue.Empty:
branch = None
return branch
def _get_hg_root(q):
_curpwd = builtins.__xonsh__.env["PWD"]
while True:
if not os.path.isdir(_curpwd):
return False
try:
dot_hg_is_in_curwd = any([b.name == ".hg" for b in xt.scandir(_curpwd)])
except OSError:
return False
if dot_hg_is_in_curwd:
q.put(_curpwd)
break
else:
_oldpwd = _curpwd
_curpwd = os.path.split(_curpwd)[0]
if _oldpwd == _curpwd:
return False
def get_hg_branch(root=None):
"""Try to get the mercurial branch of the current directory,
return None if not in a repo or subprocess.TimeoutExpired if timed out.
"""
env = builtins.__xonsh__.env
timeout = env["VC_BRANCH_TIMEOUT"]
q = queue.Queue()
t = threading.Thread(target=_get_hg_root, args=(q,))
t.start()
t.join(timeout=timeout)
try:
root = pathlib.Path(q.get_nowait())
except queue.Empty:
return None
if env.get("VC_HG_SHOW_BRANCH"):
# get branch name
branch_path = root / ".hg" / "branch"
if branch_path.exists():
with open(branch_path, "r") as branch_file:
branch = branch_file.read().strip()
else:
branch = "default"
else:
branch = ""
# add activated bookmark and topic
for filename in ["bookmarks.current", "topic"]:
feature_branch_path = root / ".hg" / filename
if feature_branch_path.exists():
with open(feature_branch_path) as file:
feature_branch = file.read().strip()
if feature_branch:
if branch:
if filename == "topic":
branch = f"{branch}/{feature_branch}"
else:
branch = f"{branch}, {feature_branch}"
else:
branch = feature_branch
return branch
_FIRST_BRANCH_TIMEOUT = True
def _first_branch_timeout_message():
global _FIRST_BRANCH_TIMEOUT
sbtm = builtins.__xonsh__.env["SUPPRESS_BRANCH_TIMEOUT_MESSAGE"]
if not _FIRST_BRANCH_TIMEOUT or sbtm:
return
_FIRST_BRANCH_TIMEOUT = False
print(
"xonsh: branch timeout: computing the branch name, color, or both "
"timed out while formatting the prompt. You may avoid this by "
"increasing the value of $VC_BRANCH_TIMEOUT or by removing branch "
"fields, like {curr_branch}, from your $PROMPT. See the FAQ "
"for more details. This message will be suppressed for the remainder "
"of this session. To suppress this message permanently, set "
"$SUPPRESS_BRANCH_TIMEOUT_MESSAGE = True in your xonshrc file.",
file=sys.stderr,
)
def current_branch():
"""Gets the branch for a current working directory. Returns an empty string
if the cwd is not a repository. This currently only works for git and hg
and should be extended in the future. If a timeout occurred, the string
'<branch-timeout>' is returned.
"""
branch = None
cmds = builtins.__xonsh__.commands_cache
# check for binary only once
if cmds.is_empty():
has_git = bool(cmds.locate_binary("git", ignore_alias=True))
has_hg = bool(cmds.locate_binary("hg", ignore_alias=True))
else:
has_git = bool(cmds.lazy_locate_binary("git", ignore_alias=True))
has_hg = bool(cmds.lazy_locate_binary("hg", ignore_alias=True))
if has_git:
branch = get_git_branch()
if not branch and has_hg:
branch = get_hg_branch()
if isinstance(branch, subprocess.TimeoutExpired):
branch = "<branch-timeout>"
_first_branch_timeout_message()
return branch or None
def _git_dirty_working_directory(q, include_untracked):
status = None
denv = builtins.__xonsh__.env.detype()
try:
cmd = ["git", "status", "--porcelain"]
if include_untracked:
cmd.append("--untracked-files=normal")
else:
cmd.append("--untracked-files=no")
status = subprocess.check_output(cmd, stderr=subprocess.DEVNULL, env=denv)
except (subprocess.CalledProcessError, OSError, FileNotFoundError):
q.put(None)
if status is not None:
return q.put(bool(status))
def git_dirty_working_directory(include_untracked=False):
"""Returns whether or not the git directory is dirty. If this could not
be determined (timeout, file not found, etc.) then this returns None.
"""
timeout = builtins.__xonsh__.env.get("VC_BRANCH_TIMEOUT")
q = queue.Queue()
t = threading.Thread(
target=_git_dirty_working_directory, args=(q, include_untracked)
)
t.start()
t.join(timeout=timeout)
try:
return q.get_nowait()
except queue.Empty:
return None
def hg_dirty_working_directory():
"""Computes whether or not the mercurial working directory is dirty or not.
If this cannot be determined, None is returned.
"""
env = builtins.__xonsh__.env
cwd = env["PWD"]
denv = env.detype()
vcbt = env["VC_BRANCH_TIMEOUT"]
# Override user configurations settings and aliases
denv["HGRCPATH"] = ""
try:
s = subprocess.check_output(
["hg", "identify", "--id"],
stderr=subprocess.PIPE,
cwd=cwd,
timeout=vcbt,
universal_newlines=True,
env=denv,
)
return s.strip(os.linesep).endswith("+")
except (
subprocess.CalledProcessError,
subprocess.TimeoutExpired,
FileNotFoundError,
):
return None
def dirty_working_directory():
"""Returns a boolean as to whether there are uncommitted files in version
control repository we are inside. If this cannot be determined, returns
None. Currently supports git and hg.
"""
dwd = None
cmds = builtins.__xonsh__.commands_cache
if cmds.lazy_locate_binary("git", ignore_alias=True):
dwd = git_dirty_working_directory()
if cmds.lazy_locate_binary("hg", ignore_alias=True) and dwd is None:
dwd = hg_dirty_working_directory()
return dwd
def branch_color():
"""Return red if the current branch is dirty, yellow if the dirtiness can
not be determined, and green if it clean. These are bold, intense colors
for the foreground.
"""
dwd = dirty_working_directory()
if dwd is None:
color = "{BOLD_INTENSE_YELLOW}"
elif dwd:
color = "{BOLD_INTENSE_RED}"
else:
color = "{BOLD_INTENSE_GREEN}"
return color
def branch_bg_color():
"""Return red if the current branch is dirty, yellow if the dirtiness can
not be determined, and green if it clean. These are background colors.
"""
dwd = dirty_working_directory()
if dwd is None:
color = "{BACKGROUND_YELLOW}"
elif dwd:
color = "{BACKGROUND_RED}"
else:
color = "{BACKGROUND_GREEN}"
return color
|
main.py
|
import tensorflow as tf
import cv2
import time
import os
import sys
from rtmp import server
import av
import queue
import threading
from recognition import detection, facenet, utils
from recognition import recognition
def mosic_and_show_method(share_que: queue.Queue, width, height):
r = server.RTMPSendServer()
r.start(width,height, 8253)
while True:
itemlist = share_que.get()
if len(itemlist) == 1:
r.write(itemlist[0])
continue
img = itemlist[0]
det = itemlist[1]
match_names = ['Unknown' if name == 'temp' else name for name in itemlist[2]]
frame = itemlist[3]
img = utils.mosaic(img, det, match_names, 6)
img = utils.draw_box(img, det, match_names, p)
cv2.putText(img, 'FPS : {0:0.3f}'.format(1 / (toc / 2)), (img.shape[0] + 300, 30), cv2.FONT_HERSHEY_COMPLEX,
1, (255, 255, 255), thickness=2, lineType=2)
cv2.imshow('frame', img)
newframe = av.VideoFrame.from_ndarray(img, format="bgr24")
newframe.pts = frame.pts
newframe.time_base = frame.time_base
r.write(newframe)
ch = cv2.waitKey(1)
if ch == ord('q'):
break
if ch == ord('s'):
cv2.waitKey(0)
if __name__ == "__main__":
print(os.getcwd())
print(sys.version)
model = '20180402-114759'
print("Create Session")
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.6)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False))
recognition_threshold = 0.85
conf_threshold = 0.7
resize_rate = 0.5
print("Load Network")
detection = detection.Detection(sess=sess, resize_rate=resize_rate, conf_threshold=conf_threshold)
recognition = recognition.Recognition(sess=sess, recognition_threshold=recognition_threshold,
resize_rate=resize_rate, model_name=model, classifier_name="test_3")
bounding_boxes = match_names = p = []
print("Initializing Server")
s = server.Server()
s.start(1935)
share_que = queue.Queue()
t = threading.Thread(target=mosic_and_show_method, args=(share_que,720,1080))
t.daemon = True
t.start()
print("Start Reading...")
while True:
frame = s.read()
if not isinstance(frame, av.VideoFrame):
share_que.put([frame])
continue
# if frame.pts < time.perf_counter()*1000: # pts가 현재 시간과 너무 벌어지면 해당 프레임 버림
# continue
if s.getFrameQueSize() > 1:
continue
img = frame.to_ndarray(format="bgr24")
tic = time.time()
resize_img = cv2.resize(img, (0, 0), fx=resize_rate, fy=resize_rate)
if resize_img.ndim == 2:
resize_img = facenet.to_rgb(resize_img)
resize_img = resize_img[:, :, 0:3]
bounding_boxes = detection.detect_faces(resize_img, img.shape)
if bounding_boxes.shape[0] > 0:
match_names, p = recognition.recognize_faces(img, bounding_boxes)
else:
bounding_boxes = match_names = p = []
toc = time.time() - tic
cur = time.perf_counter()*1000
sys.stdout.write('\rdetect time : %.2f \tframe pts : %d \tcur time : %.2f\tcur-pts : %.2f'%(toc / 2*1000,frame.pts,cur,cur-frame.pts))
share_que.put([img, bounding_boxes, match_names, frame]) # que에 넣어두면 mosic thread가 알아서 가저다가 박스 및 모자이크 처리
|
zdrun.py
|
#!python
##############################################################################
#
# Copyright (c) 2001, 2002 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
"""zrdun -- run an application as a daemon.
Usage: python zrdun.py [zrdun-options] program [program-arguments]
"""
from stat import ST_MODE
import errno
import fcntl
import logging
import os
import select
import signal
import socket
import sys
import subprocess
import threading
import time
if __name__ == "__main__":
# Add the parent of the script directory to the module search path
# (but only when the script is run from inside the zdaemon package)
from os.path import dirname, basename, abspath, normpath
scriptdir = dirname(normpath(abspath(sys.argv[0])))
if basename(scriptdir).lower() == "zdaemon":
sys.path.append(dirname(scriptdir))
here = os.path.dirname(os.path.realpath(__file__))
swhome = os.path.dirname(here)
for parts in [("src",), ("lib", "python"), ("Lib", "site-packages")]:
d = os.path.join(swhome, *(parts + ("zdaemon",)))
if os.path.isdir(d):
d = os.path.join(swhome, *parts)
sys.path.insert(0, d)
break
from zdaemon.zdoptions import RunnerOptions
from ZConfig.components.logger.loghandler import reopenFiles
def string_list(arg):
return arg.split()
class ZDRunOptions(RunnerOptions):
__doc__ = __doc__
positional_args_allowed = 1
logsectionname = "runner.eventlog"
program = None
def __init__(self):
RunnerOptions.__init__(self)
self.add("schemafile", short="S:", long="schema=",
default="schema.xml",
handler=self.set_schemafile)
self.add("stoptimeut", "runner.stop_timeout")
self.add("starttestprogram", "runner.start_test_program")
def set_schemafile(self, file):
self.schemafile = file
def realize(self, *args, **kwds):
RunnerOptions.realize(self, *args, **kwds)
if self.args:
self.program = self.args
if not self.program:
self.usage("no program specified (use -C or positional args)")
if self.sockname:
# Convert socket name to absolute path
self.sockname = os.path.abspath(self.sockname)
if self.config_logger is None:
# This doesn't perform any configuration of the logging
# package, but that's reasonable in this case.
self.logger = logging.getLogger()
else:
self.logger = self.config_logger()
def load_logconf(self, sectname):
"""Load alternate eventlog if the specified section isn't present."""
RunnerOptions.load_logconf(self, sectname)
if self.config_logger is None and sectname != "eventlog":
RunnerOptions.load_logconf(self, "eventlog")
class Subprocess:
"""A class to manage a subprocess."""
# Initial state; overridden by instance variables
pid = 0 # Subprocess pid; 0 when not running
lasttime = 0 # Last time the subprocess was started; 0 if never
def __init__(self, options, args=None):
"""Constructor.
Arguments are a ZDRunOptions instance and a list of program
arguments; the latter's first item must be the program name.
"""
if args is None:
args = options.args
if not args:
options.usage("missing 'program' argument")
self.options = options
self.args = args
self.testing = set()
self._set_filename(args[0])
def _set_filename(self, program):
"""Internal: turn a program name into a file name, using $PATH."""
if "/" in program:
filename = program
try:
st = os.stat(filename)
except os.error:
self.options.usage("can't stat program %r" % program)
else:
path = get_path()
for dir in path:
filename = os.path.join(dir, program)
try:
st = os.stat(filename)
except os.error:
continue
mode = st[ST_MODE]
if mode & 0o111:
break
else:
self.options.usage("can't find program %r on PATH %s" %
(program, path))
if not os.access(filename, os.X_OK):
self.options.usage("no permission to run program %r" % filename)
self.filename = filename
def test(self, pid):
starttestprogram = self.options.starttestprogram
try:
while self.pid == pid:
if not subprocess.call(starttestprogram):
break
time.sleep(1)
finally:
self.testing.remove(pid)
def spawn(self):
"""Start the subprocess. It must not be running already.
Return the process id. If the fork() call fails, return 0.
"""
assert not self.pid
self.lasttime = time.time()
try:
pid = os.fork()
except os.error:
return 0
if pid != 0:
# Parent
self.pid = pid
if self.options.starttestprogram:
self.testing.add(pid)
thread = threading.Thread(target=self.test, args=(pid,))
thread.setDaemon(True)
thread.start()
self.options.logger.info("spawned process pid=%d" % pid)
return pid
else: # pragma: nocover
# Child
try:
# Close file descriptors except std{in,out,err}.
# XXX We don't know how many to close; hope 100 is plenty.
for i in range(3, 100):
try:
os.close(i)
except os.error:
pass
try:
os.execv(self.filename, self.args)
except os.error as err:
sys.stderr.write("can't exec %r: %s\n" %
(self.filename, err))
sys.stderr.flush() # just in case
finally:
os._exit(127)
# Does not return
def kill(self, sig):
"""Send a signal to the subprocess. This may or may not kill it.
Return None if the signal was sent, or an error message string
if an error occurred or if the subprocess is not running.
"""
if not self.pid:
return "no subprocess running"
try:
os.kill(self.pid, sig)
except os.error as msg:
return str(msg)
return None
def setstatus(self, sts):
"""Set process status returned by wait() or waitpid().
This simply notes the fact that the subprocess is no longer
running by setting self.pid to 0.
"""
self.pid = 0
class Daemonizer:
def main(self, args=None):
self.options = ZDRunOptions()
self.options.realize(args)
self.logger = self.options.logger
self.run()
def run(self):
self.proc = Subprocess(self.options)
self.opensocket()
try:
self.setsignals()
if self.options.daemon:
self.daemonize()
self.runforever()
finally:
try:
os.unlink(self.options.sockname)
except os.error:
pass
mastersocket = None
commandsocket = None
def opensocket(self):
sockname = self.options.sockname
tempname = "%s.%d" % (sockname, os.getpid())
self.unlink_quietly(tempname)
while True:
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
try:
sock.bind(tempname)
os.chmod(tempname, 0o700)
try:
os.link(tempname, sockname)
break
except os.error:
# Lock contention, or stale socket.
self.checkopen()
# Stale socket -- delete, sleep, and try again.
msg = "Unlinking stale socket %s; sleep 1" % sockname
sys.stderr.write(msg + "\n")
sys.stderr.flush() # just in case
self.logger.warn(msg)
self.unlink_quietly(sockname)
sock.close()
time.sleep(1)
continue
finally:
self.unlink_quietly(tempname)
sock.listen(1)
sock.setblocking(0)
try: # PEP 446, Python >= 3.4
sock.set_inheritable(True)
except AttributeError:
pass
self.mastersocket = sock
def unlink_quietly(self, filename):
try:
os.unlink(filename)
except os.error:
pass
def checkopen(self):
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
try:
s.connect(self.options.sockname)
s.send(b"status\n")
data = s.recv(1000).decode()
s.close()
except socket.error:
pass
else:
data = data.rstrip("\n")
msg = ("Another zrdun is already up using socket %r:\n%s" %
(self.options.sockname, data))
sys.stderr.write(msg + "\n")
sys.stderr.flush() # just in case
self.logger.critical(msg)
sys.exit(1)
def setsignals(self):
signal.signal(signal.SIGTERM, self.sigexit)
signal.signal(signal.SIGHUP, self.sigexit)
signal.signal(signal.SIGINT, self.sigexit)
signal.signal(signal.SIGCHLD, self.sigchild)
def sigexit(self, sig, frame):
self.logger.critical("daemon manager killed by %s" % signame(sig))
sys.exit(1)
waitstatus = None
def sigchild(self, sig, frame):
try:
pid, sts = os.waitpid(-1, os.WNOHANG)
except os.error:
return
if pid:
self.waitstatus = pid, sts
transcript = None
def daemonize(self):
# To daemonize, we need to become the leader of our own session
# (process) group. If we do not, signals sent to our
# parent process will also be sent to us. This might be bad because
# signals such as SIGINT can be sent to our parent process during
# normal (uninteresting) operations such as when we press Ctrl-C in the
# parent terminal window to escape from a logtail command.
# To disassociate ourselves from our parent's session group we use
# os.setsid. It means "set session id", which has the effect of
# disassociating a process from is current session and process group
# and setting itself up as a new session leader.
#
# Unfortunately we cannot call setsid if we're already a session group
# leader, so we use "fork" to make a copy of ourselves that is
# guaranteed to not be a session group leader.
#
# We also change directories, set stderr and stdout to null, and
# change our umask.
#
# This explanation was (gratefully) garnered from
# http://www.hawklord.uklinux.net/system/daemons/d3.htm
pid = os.fork()
if pid != 0: # pragma: nocover
# Parent
self.logger.debug("daemon manager forked; parent exiting")
os._exit(0)
# Child
self.logger.info("daemonizing the process")
if self.options.directory:
try:
os.chdir(self.options.directory)
except os.error as err:
self.logger.warn("can't chdir into %r: %s"
% (self.options.directory, err))
else:
self.logger.info("set current directory: %r"
% self.options.directory)
os.close(0)
sys.stdin = sys.__stdin__ = open("/dev/null")
self.transcript = Transcript(self.options.transcript)
os.setsid()
os.umask(self.options.umask)
# XXX Stevens, in his Advanced Unix book, section 13.3 (page
# 417) recommends calling umask(0) and closing unused
# file descriptors. In his Network Programming book, he
# additionally recommends ignoring SIGHUP and forking again
# after the setsid() call, for obscure SVR4 reasons.
should_be_up = True
delay = 0 # If nonzero, delay starting or killing until this time
killing = 0 # If true, send SIGKILL when delay expires
proc = None # Subprocess instance
def runforever(self):
sig_r, sig_w = os.pipe()
fcntl.fcntl(
sig_r, fcntl.F_SETFL, fcntl.fcntl(
sig_r, fcntl.F_GETFL) | os.O_NONBLOCK)
fcntl.fcntl(
sig_w, fcntl.F_SETFL, fcntl.fcntl(
sig_w, fcntl.F_GETFL) | os.O_NONBLOCK)
signal.set_wakeup_fd(sig_w)
self.logger.info("daemon manager started")
while self.should_be_up or self.proc.pid:
if self.should_be_up and not self.proc.pid and not self.delay:
pid = self.proc.spawn()
if not pid:
# Can't fork. Try again later...
self.delay = time.time() + self.backofflimit
if self.waitstatus:
self.reportstatus()
r, w, x = [self.mastersocket, sig_r], [], []
if self.commandsocket:
r.append(self.commandsocket)
timeout = self.options.backofflimit
if self.delay:
timeout = max(0, min(timeout, self.delay - time.time()))
if timeout <= 0:
self.delay = 0
if self.killing and self.proc.pid:
self.proc.kill(signal.SIGKILL)
self.delay = time.time() + self.options.backofflimit
try:
r, w, x = select.select(r, w, x, timeout)
except select.error as err:
if err.args[0] != errno.EINTR:
raise
r = w = x = []
if self.waitstatus:
self.reportstatus()
if self.commandsocket and self.commandsocket in r:
try:
self.dorecv()
except socket.error as msg:
self.logger.exception("socket.error in dorecv(): %s"
% str(msg))
self.commandsocket = None
if self.mastersocket in r:
try:
self.doaccept()
except socket.error as msg:
self.logger.exception("socket.error in doaccept(): %s"
% str(msg))
self.commandsocket = None
if sig_r in r:
os.read(sig_r, 1) # don't let the buffer fill up
self.logger.info("Exiting")
sys.exit(0)
def reportstatus(self):
pid, sts = self.waitstatus
self.waitstatus = None
es, msg = decode_wait_status(sts)
msg = "pid %d: " % pid + msg
if pid != self.proc.pid:
msg = "unknown " + msg
self.logger.warn(msg)
else:
killing = self.killing
if killing:
self.killing = 0
self.delay = 0
else:
self.governor()
self.proc.setstatus(sts)
if es in self.options.exitcodes and not killing:
msg = msg + "; exiting now"
self.logger.info(msg)
sys.exit(es)
self.logger.info(msg)
backoff = 0
def governor(self):
# Back off if respawning too frequently
now = time.time()
if not self.proc.lasttime:
pass
elif now - self.proc.lasttime < self.options.backofflimit:
# Exited rather quickly; slow down the restarts
self.backoff += 1
if self.backoff >= self.options.backofflimit:
if self.options.forever:
self.backoff = self.options.backofflimit
else:
self.logger.critical("restarting too frequently; quit")
sys.exit(1)
self.logger.info("sleep %s to avoid rapid restarts" % self.backoff)
self.delay = now + self.backoff
else:
# Reset the backoff timer
self.backoff = 0
self.delay = 0
def doaccept(self):
if self.commandsocket:
# Give up on previous command socket!
self.sendreply("Command superseded by new command")
self.commandsocket.close()
self.commandsocket = None
self.commandsocket, addr = self.mastersocket.accept()
try: # PEP 446, Python >= 3.4
self.commandsocket.set_inheritable(True)
except AttributeError:
pass
self.commandbuffer = b""
def dorecv(self):
data = self.commandsocket.recv(1000)
if not data:
self.sendreply("Command not terminated by newline")
self.commandsocket.close()
self.commandsocket = None
self.commandbuffer += data
if b"\n" in self.commandbuffer:
self.docommand()
self.commandsocket.close()
self.commandsocket = None
elif len(self.commandbuffer) > 10000:
self.sendreply("Command exceeds 10 KB")
self.commandsocket.close()
self.commandsocket = None
def docommand(self):
lines = self.commandbuffer.split(b"\n")
args = lines[0].split()
if not args:
self.sendreply("Empty command")
return
command = args[0].decode()
methodname = "cmd_" + command
method = getattr(self, methodname, None)
if method:
method([a.decode() for a in args])
else:
self.sendreply("Unknown command %r; 'help' for a list" % command)
def cmd_start(self, args):
self.should_be_up = True
self.backoff = 0
self.delay = 0
self.killing = 0
if not self.proc.pid:
self.proc.spawn()
self.sendreply("Application started")
else:
self.sendreply("Application already started")
def cmd_stop(self, args):
self.should_be_up = False
self.backoff = 0
self.delay = 0
self.killing = 0
if self.proc.pid:
self.proc.kill(signal.SIGTERM)
self.sendreply("Sent SIGTERM")
self.killing = 1
if self.options.stoptimeut:
self.delay = time.time() + self.options.stoptimeut
else:
self.sendreply("Application already stopped")
def cmd_restart(self, args):
self.should_be_up = True
self.backoff = 0
self.delay = 0
self.killing = 0
if self.proc.pid:
self.proc.kill(signal.SIGTERM)
self.sendreply("Sent SIGTERM; will restart later")
self.killing = 1
if self.options.stoptimeut:
self.delay = time.time() + self.options.stoptimeut
else:
self.proc.spawn()
self.sendreply("Application started")
def cmd_kill(self, args):
if args[1:]:
try:
sig = int(args[1])
except BaseException:
self.sendreply("Bad signal %r" % args[1])
return
else:
sig = signal.SIGTERM
if not self.proc.pid:
self.sendreply("Application not running")
else:
msg = self.proc.kill(sig)
if msg:
self.sendreply("Kill %d failed: %s" % (sig, msg))
else:
self.sendreply("Signal %d sent" % sig)
def cmd_status(self, args):
if not self.proc.pid:
status = "stopped"
else:
status = "running"
self.sendreply("status=%s\n" % status +
"now=%r\n" % time.time() +
"should_be_up=%d\n" % self.should_be_up +
"delay=%r\n" % self.delay +
"backoff=%r\n" % self.backoff +
"lasttime=%r\n" % self.proc.lasttime +
"application=%r\n" % self.proc.pid +
"testing=%d\n" % bool(self.proc.testing) +
"manager=%r\n" % os.getpid() +
"backofflimit=%r\n" % self.options.backofflimit +
"filename=%r\n" % self.proc.filename +
"args=%r\n" % self.proc.args)
def cmd_reopen_transcript(self, args):
reopenFiles()
if self.transcript is not None:
self.transcript.reopen()
def sendreply(self, msg):
try:
if not msg.endswith("\n"):
msg = msg + "\n"
msg = msg.encode()
if hasattr(self.commandsocket, "sendall"):
self.commandsocket.sendall(msg)
else: # pragma: nocover
# This is quadratic, but msg is rarely more than 100 bytes :-)
while msg:
sent = self.commandsocket.send(msg)
msg = msg[sent:]
except socket.error as msg:
self.logger.warn("Error sending reply: %s" % str(msg))
class Transcript:
def __init__(self, filename):
self.read_from, w = os.pipe()
os.dup2(w, 1)
sys.stdout = sys.__stdout__ = os.fdopen(1, "w", 1)
os.dup2(w, 2)
sys.stderr = sys.__stderr__ = os.fdopen(2, "w", 1)
self.filename = filename
self.file = open(filename, 'ab', 0)
self.write = self.file.write
self.lock = threading.Lock()
thread = threading.Thread(target=self.copy)
thread.setDaemon(True)
thread.start()
def copy(self):
try:
lock = self.lock
i = [self.read_from]
o = e = []
while True:
ii, oo, ee = select.select(i, o, e)
with lock:
for fd in ii:
self.write(os.read(fd, 8192))
finally:
# since there's no reader from this pipe we want the other side to
# get a SIGPIPE as soon as it tries to write to it, instead of
# deadlocking when the pipe buffer becomes full.
os.close(self.read_from)
def reopen(self):
new_file = open(self.filename, 'ab', 0)
with self.lock:
self.file.close()
self.file = new_file
self.write = self.file.write
# Helpers for dealing with signals and exit status
def decode_wait_status(sts):
"""Decode the status returned by wait() or waitpid().
Return a tuple (exitstatus, message) where exitstatus is the exit
status, or -1 if the process was killed by a signal; and message
is a message telling what happened. It is the caller's
responsibility to display the message.
"""
if os.WIFEXITED(sts):
es = os.WEXITSTATUS(sts) & 0xffff
msg = "exit status %s" % es
return es, msg
elif os.WIFSIGNALED(sts):
sig = os.WTERMSIG(sts)
msg = "terminated by %s" % signame(sig)
if hasattr(os, "WCOREDUMP"):
iscore = os.WCOREDUMP(sts)
else:
iscore = sts & 0x80
if iscore:
msg += " (core dumped)"
return -1, msg
else:
msg = "unknown termination cause 0x%04x" % sts
return -1, msg
_signames = None
def signame(sig):
"""Return a symbolic name for a signal.
Return "signal NNN" if there is no corresponding SIG name in the
signal module.
"""
if _signames is None:
_init_signames()
return _signames.get(sig) or "signal %d" % sig
def _init_signames():
global _signames
d = {}
for k, v in signal.__dict__.items():
k_startswith = getattr(k, "startswith", None)
if k_startswith is None: # pragma: nocover
continue
if k_startswith("SIG") and not k_startswith("SIG_"):
d[v] = k
_signames = d
def get_path():
"""Return a list corresponding to $PATH, or a default."""
path = ["/bin", "/usr/bin", "/usr/local/bin"]
if "PATH" in os.environ:
p = os.environ["PATH"]
if p:
path = p.split(os.pathsep)
return path
# Main program
def main(args=None):
assert os.name == "posix", "This code makes many Unix-specific assumptions"
d = Daemonizer()
d.main(args)
if __name__ == '__main__':
main()
|
frontend.py
|
#!/usr/bin/env python3
"""
@file tmtc_frontend.py
@date 01.11.2019
@brief This is part of the TMTC client developed by the SOURCE project by KSat
@description GUI is still work-in-progress
@manual
@author R. Mueller, P. Scheurenbrand, D. Nguyen
"""
import enum
import os
import sys
import time
import webbrowser
from multiprocessing import Process
from typing import Union
from PyQt5.QtWidgets import (
QMainWindow,
QGridLayout,
QTableWidget,
QWidget,
QLabel,
QCheckBox,
QDoubleSpinBox,
QFrame,
QComboBox,
QPushButton,
QTableWidgetItem,
QMenu,
QAction,
QMenuBar,
)
from PyQt5.QtGui import QPixmap, QIcon
from PyQt5.QtCore import Qt, pyqtSignal, QObject, QThread, QRunnable
from tmtccmd.core.frontend_base import FrontendBase
from tmtccmd.core.backend import TmTcHandler
from tmtccmd.config.hook import TmTcHookBase
from tmtccmd.config.definitions import CoreGlobalIds, CoreModeList, CoreComInterfaces
from tmtccmd.config.hook import get_global_hook_obj
from tmtccmd.utility.logger import get_console_logger
from tmtccmd.core.globals_manager import get_global, update_global
from tmtccmd.com_if.tcpip_utilities import TcpIpConfigIds
import tmtccmd.config as config_module
LOGGER = get_console_logger()
CONNECT_BTTN_STYLE = (
"background-color: #1fc600;"
"border-style: inset;"
"font: bold;"
"padding: 6px;"
"border-width: 2px;"
"border-radius: 6px;"
)
DISCONNECT_BTTN_STYLE = (
"background-color: orange;"
"border-style: inset;"
"font: bold;"
"padding: 6px;"
"border-width: 2px;"
"border-radius: 6px;"
)
COMMAND_BUTTON_STYLE = (
"background-color: #cdeefd;"
"border-style: inset;"
"font: bold;"
"padding: 6px;"
"border-width: 2px;"
"border-radius: 6px;"
)
class WorkerOperationsCodes(enum.IntEnum):
DISCONNECT = 0
SEQUENTIAL_COMMANDING = 1
class WorkerThread(QObject):
finished = pyqtSignal()
def __init__(self, op_code: WorkerOperationsCodes, tmtc_handler: TmTcHandler):
super(QObject, self).__init__()
self.op_code = op_code
self.tmtc_handler = tmtc_handler
def run_worker(self):
if self.op_code == WorkerOperationsCodes.DISCONNECT:
self.tmtc_handler.close_listener()
while True:
if not self.tmtc_handler.is_com_if_active():
break
else:
time.sleep(0.4)
self.finished.emit()
elif self.op_code == WorkerOperationsCodes.SEQUENTIAL_COMMANDING:
self.tmtc_handler.set_mode(CoreModeList.SEQUENTIAL_CMD_MODE)
# It is expected that the TMTC handler is in the according state to perform the operation
self.tmtc_handler.perform_operation()
self.finished.emit()
else:
LOGGER.warning("Unknown worker operation code!")
self.finished.emit()
class RunnableThread(QRunnable):
"""
Runnable thread which can be used with QThreadPool. Not used for now, might be needed in the future.
"""
def run(self):
pass
class TmTcFrontend(QMainWindow, FrontendBase):
def __init__(
self, hook_obj: TmTcHookBase, tmtc_backend: TmTcHandler, app_name: str
):
super(TmTcFrontend, self).__init__()
super(QMainWindow, self).__init__()
self._tmtc_handler = tmtc_backend
self._app_name = app_name
self._hook_obj = hook_obj
self._tmtc_handler.initialize()
self.service_op_code_dict = dict()
self._service_list = []
self._op_code_list = []
self._com_if_list = []
self._last_com_if = CoreComInterfaces.UNSPECIFIED.value
self._current_com_if = CoreComInterfaces.UNSPECIFIED.value
self._current_service = ""
self._current_op_code = ""
self._current_com_if_key = "unspec"
self.__connected = False
self.__worker = None
self.__thread = None
self.__debug_mode = False
self.__combo_box_op_codes: Union[None, QComboBox] = None
module_path = os.path.abspath(config_module.__file__).replace("__init__.py", "")
self.logo_path = f"{module_path}/logo.png"
def prepare_start(self, args: any) -> Process:
return Process(target=self.start)
def start(self, qt_app: any):
self.__start_ui()
sys.exit(qt_app.exec())
def set_gui_logo(self, logo_total_path: str):
if os.path.isfile(logo_total_path):
self.logo_path = logo_total_path
else:
LOGGER.warning("Could not set logo, path invalid!")
def __start_ui(self):
self.__create_menu_bar()
win = QWidget(self)
self.setCentralWidget(win)
grid = QGridLayout()
win.setLayout(grid)
row = 0
self.setWindowTitle(self._app_name)
self.setWindowIcon(QIcon(self.logo_path))
add_pixmap = False
if add_pixmap:
row = self.__set_up_pixmap(grid=grid, row=row)
row = self.__set_up_config_section(grid=grid, row=row)
row = self.__add_vertical_separator(grid=grid, row=row)
# com if configuration
row = self.__set_up_com_if_section(grid=grid, row=row)
row = self.__add_vertical_separator(grid=grid, row=row)
row = self.__set_up_service_op_code_section(grid=grid, row=row)
self.__command_button = QPushButton()
self.__command_button.setText("Send Command")
self.__command_button.setStyleSheet(COMMAND_BUTTON_STYLE)
self.__command_button.clicked.connect(self.__start_seq_cmd_op)
self.__command_button.setEnabled(False)
grid.addWidget(self.__command_button, row, 0, 1, 2)
row += 1
self.show()
def __start_seq_cmd_op(self):
if self.__debug_mode:
LOGGER.info("Start Service Test Button pressed.")
if not self.__get_send_button():
return
self.__set_send_button(False)
self._tmtc_handler.set_service(self._current_service)
self._tmtc_handler.set_opcode(self._current_op_code)
self.__start_qthread_task(
op_code=WorkerOperationsCodes.SEQUENTIAL_COMMANDING,
finish_callback=self.__finish_seq_cmd_op,
)
def __finish_seq_cmd_op(self):
self.__set_send_button(True)
def __connect_button_action(self):
if not self.__connected:
LOGGER.info("Starting TM listener..")
# Build and assign new communication interface
if self._current_com_if != self._last_com_if:
hook_obj = get_global_hook_obj()
new_com_if = hook_obj.assign_communication_interface(
com_if_key=self._current_com_if,
tmtc_printer=self._tmtc_handler.get_printer(),
)
self._tmtc_handler.set_com_if(new_com_if)
self._tmtc_handler.start_listener(False)
self.__connect_button.setStyleSheet(DISCONNECT_BTTN_STYLE)
self.__command_button.setEnabled(True)
self.__connect_button.setText("Disconnect")
self.__connected = True
else:
LOGGER.info("Closing TM listener..")
self.__command_button.setEnabled(False)
self.__connect_button.setEnabled(False)
self.__start_qthread_task(
op_code=WorkerOperationsCodes.DISCONNECT,
finish_callback=self.__finish_disconnect_button_op,
)
def __finish_disconnect_button_op(self):
self.__connect_button.setEnabled(True)
# self.__disconnect_button.setEnabled(False)
self.__connect_button.setStyleSheet(CONNECT_BTTN_STYLE)
self.__connect_button.setText("Connect")
LOGGER.info("Disconnect successfull")
self.__connected = False
def __create_menu_bar(self):
menu_bar = self.menuBar()
# Creating menus using a QMenu object
file_menu = QMenu("&File", self)
menu_bar.addMenu(file_menu)
# Creating menus using a title
help_menu = menu_bar.addMenu("&Help")
help_action = QAction("Help", self)
help_action.triggered.connect(self.__help_url)
help_menu.addAction(help_action)
@staticmethod
def __help_url():
webbrowser.open("https://tmtccmd.readthedocs.io/en/latest/")
def __set_up_config_section(self, grid: QGridLayout, row: int) -> int:
grid.addWidget(QLabel("Configuration:"), row, 0, 1, 2)
row += 1
checkbox_console = QCheckBox("Print output to console")
checkbox_console.stateChanged.connect(self.__checkbox_console_update)
checkbox_log = QCheckBox("Print output to log file")
checkbox_log.stateChanged.connect(self.__checkbox_log_update)
checkbox_raw_tm = QCheckBox("Print all raw TM data directly")
checkbox_raw_tm.stateChanged.connect(self.__checkbox_print_raw_data_update)
checkbox_hk = QCheckBox("Print Housekeeping Data")
# checkbox_hk.setChecked(tmtcc_config.G_PRINT_HK_DATA)
checkbox_hk.stateChanged.connect(checkbox_print_hk_data)
checkbox_short = QCheckBox("Short Display Mode")
# checkbox_short.setChecked(tmtcc_config.G_DISPLAY_MODE == "short")
checkbox_short.stateChanged.connect(checkbox_short_display_mode)
grid.addWidget(checkbox_log, row, 0, 1, 1)
grid.addWidget(checkbox_console, row, 1, 1, 1)
row += 1
grid.addWidget(checkbox_raw_tm, row, 0, 1, 1)
grid.addWidget(checkbox_hk, row, 1, 1, 1)
row += 1
grid.addWidget(checkbox_short, row, 0, 1, 1)
row += 1
grid.addWidget(QLabel("TM Timeout:"), row, 0, 1, 1)
grid.addWidget(QLabel("TM Timeout Factor:"), row, 1, 1, 1)
row += 1
spin_timeout = QDoubleSpinBox()
spin_timeout.setValue(4)
# TODO: set sensible min/max values
spin_timeout.setSingleStep(0.1)
spin_timeout.setMinimum(0.25)
spin_timeout.setMaximum(60)
# https://youtrack.jetbrains.com/issue/PY-22908
# Ignore those warnings for now.
spin_timeout.valueChanged.connect(number_timeout)
grid.addWidget(spin_timeout, row, 0, 1, 1)
spin_timeout_factor = QDoubleSpinBox()
# spin_timeout_factor.setValue(tmtcc_config.G_TC_SEND_TIMEOUT_FACTOR)
# TODO: set sensible min/max values
spin_timeout_factor.setSingleStep(0.1)
spin_timeout_factor.setMinimum(0.25)
spin_timeout_factor.setMaximum(10)
spin_timeout_factor.valueChanged.connect(number_timeout_factor)
grid.addWidget(spin_timeout_factor, row, 1, 1, 1)
row += 1
return row
def __set_up_com_if_section(self, grid: QGridLayout, row: int) -> int:
grid.addWidget(QLabel("Communication Interface:"), row, 0, 1, 1)
com_if_combo_box = QComboBox()
all_com_ifs = get_global(CoreGlobalIds.COM_IF_DICT)
index = 0
# add all possible ComIFs to the comboBox
for com_if_key, com_if_value in all_com_ifs.items():
com_if_combo_box.addItem(com_if_value)
self._com_if_list.append((com_if_key, com_if_value))
if self._tmtc_handler.get_com_if_id() == com_if_key:
com_if_combo_box.setCurrentIndex(index)
index += 1
com_if_combo_box.currentIndexChanged.connect(self.__com_if_index_changed)
grid.addWidget(com_if_combo_box, row, 1, 1, 1)
row += 1
self.com_if_cfg_button = QPushButton()
self.com_if_cfg_button.setText("Configure")
grid.addWidget(self.com_if_cfg_button, row, 0, 1, 2)
row += 1
self.__connect_button = QPushButton()
self.__connect_button.setText("Connect")
self.__connect_button.setStyleSheet(CONNECT_BTTN_STYLE)
self.__connect_button.clicked.connect(self.__connect_button_action)
grid.addWidget(self.__connect_button, row, 0, 1, 2)
row += 1
return row
def __set_up_service_op_code_section(self, grid: QGridLayout, row: int):
grid.addWidget(QLabel("Service: "), row, 0, 1, 2)
grid.addWidget(QLabel("Operation Code: "), row, 1, 1, 2)
row += 1
combo_box_services = QComboBox()
default_service = get_global(CoreGlobalIds.CURRENT_SERVICE)
self.service_op_code_dict = self._hook_obj.get_service_op_code_dictionary()
if self.service_op_code_dict is None:
LOGGER.warning("Invalid service to operation code dictionary")
LOGGER.warning("Setting default dictionary")
from tmtccmd.config.globals import get_default_service_op_code_dict
self.service_op_code_dict = get_default_service_op_code_dict()
index = 0
default_index = 0
for service_key, service_value in self.service_op_code_dict.items():
combo_box_services.addItem(service_value[0])
if service_key == default_service:
default_index = index
self._service_list.append(service_key)
index += 1
combo_box_services.setCurrentIndex(default_index)
self._current_service = self._service_list[default_index]
combo_box_services.currentIndexChanged.connect(self.__service_index_changed)
grid.addWidget(combo_box_services, row, 0, 1, 1)
self.__combo_box_op_codes = QComboBox()
self._current_service = self._service_list[default_index]
self.__update_op_code_combo_box()
self.__combo_box_op_codes.currentIndexChanged.connect(
self.__op_code_index_changed
)
# TODO: Combo box also needs to be updated if another service is selected
grid.addWidget(self.__combo_box_op_codes, row, 1, 1, 1)
row += 1
return row
def __set_up_pixmap(self, grid: QGridLayout, row: int) -> int:
label = QLabel(self)
label.setGeometry(720, 10, 100, 100)
label.adjustSize()
pixmap = QPixmap(self.logo_path)
pixmap_width = pixmap.width()
pixmap_height = pixmap.height()
row += 1
pixmap_scaled = pixmap.scaled(
pixmap_width * 0.3, pixmap_height * 0.3, Qt.KeepAspectRatio
)
label.setPixmap(pixmap_scaled)
label.setScaledContents(True)
grid.addWidget(label, row, 0, 1, 2)
row += 1
return row
def __start_qthread_task(self, op_code: WorkerOperationsCodes, finish_callback):
self.__thread = QThread()
self.__worker = WorkerThread(op_code=op_code, tmtc_handler=self._tmtc_handler)
self.__worker.moveToThread(self.__thread)
self.__thread.started.connect(self.__worker.run_worker)
self.__worker.finished.connect(self.__thread.quit)
self.__worker.finished.connect(self.__worker.deleteLater)
self.__thread.finished.connect(self.__thread.deleteLater)
self.__thread.finished.connect(finish_callback)
self.__thread.start()
@staticmethod
def __add_vertical_separator(grid: QGridLayout, row: int):
separator = QFrame()
separator.setFrameShape(QFrame.HLine)
grid.addWidget(separator, row, 0, 1, 2)
row += 1
return row
def __service_index_changed(self, index: int):
self._current_service = self._service_list[index]
self.__update_op_code_combo_box()
if self.__debug_mode:
LOGGER.info("Service changed")
def __op_code_index_changed(self, index: int):
self._current_op_code = self._op_code_list[index]
if self.__debug_mode:
LOGGER.info("Op Code changed")
def __update_op_code_combo_box(self):
self.__combo_box_op_codes.clear()
self._op_code_list = []
op_code_dict = self.service_op_code_dict[self._current_service][1]
if op_code_dict is not None:
for op_code_key, op_code_value in op_code_dict.items():
try:
self._op_code_list.append(op_code_key)
self.__combo_box_op_codes.addItem(op_code_value[0])
except TypeError:
LOGGER.warning(f"Invalid op code entry {op_code_value}, skipping..")
self._current_op_code = self._op_code_list[0]
def __checkbox_log_update(self, state: int):
update_global(CoreGlobalIds.PRINT_TO_FILE, state)
if self.__debug_mode:
LOGGER.info(["Enabled", "Disabled"][state == 0] + " print to log.")
def __checkbox_console_update(self, state: bool):
update_global(CoreGlobalIds.PRINT_TM, state)
if self.__debug_mode:
LOGGER.info(["enabled", "disabled"][state == 0] + " console print")
def __checkbox_print_raw_data_update(self, state: int):
update_global(CoreGlobalIds.PRINT_RAW_TM, state)
if self.__debug_mode:
LOGGER.info(["enabled", "disabled"][state == 0] + " printing of raw data")
def __set_send_button(self, state: bool):
self.__command_button.setEnabled(state)
def __get_send_button(self):
return self.__command_button.isEnabled()
def __com_if_index_changed(self, index: int):
self._current_com_if = self._com_if_list[index][0]
if self.__debug_mode:
LOGGER.info(f"Communication IF updated: {self._com_if_list[index][1]}")
class SingleCommandTable(QTableWidget):
def __init__(self):
super().__init__()
self.setRowCount(1)
self.setColumnCount(5)
self.setHorizontalHeaderItem(0, QTableWidgetItem("Service"))
self.setHorizontalHeaderItem(1, QTableWidgetItem("Subservice"))
self.setHorizontalHeaderItem(2, QTableWidgetItem("SSC"))
self.setHorizontalHeaderItem(3, QTableWidgetItem("Data"))
self.setHorizontalHeaderItem(4, QTableWidgetItem("CRC"))
self.setItem(0, 0, QTableWidgetItem("17"))
self.setItem(0, 1, QTableWidgetItem("1"))
self.setItem(0, 2, QTableWidgetItem("20"))
def checkbox_print_hk_data(state: int):
update_global(CoreGlobalIds.PRINT_HK, state)
LOGGER.info(["enabled", "disabled"][state == 0] + " printing of hk data")
def checkbox_short_display_mode(state: int):
update_global(CoreGlobalIds.DISPLAY_MODE, state)
LOGGER.info(["enabled", "disabled"][state == 0] + " short display mode")
def number_timeout(value: float):
update_global(CoreGlobalIds.TM_TIMEOUT, value)
LOGGER.info("PUS TM timeout changed to: " + str(value))
def number_timeout_factor(value: float):
update_global(CoreGlobalIds.TC_SEND_TIMEOUT_FACTOR, value)
LOGGER.info("PUS TM timeout factor changed to: " + str(value))
def ip_change_client(value):
ethernet_config = get_global(CoreGlobalIds.ETHERNET_CONFIG)
ethernet_config[TcpIpConfigIds.RECV_ADDRESS] = value
update_global(CoreGlobalIds.ETHERNET_CONFIG, ethernet_config)
LOGGER.info("Client IP changed: " + value)
def ip_change_board(value):
ethernet_config = get_global(CoreGlobalIds.ETHERNET_CONFIG)
ethernet_config[TcpIpConfigIds.SEND_ADDRESS] = value
update_global(CoreGlobalIds.ETHERNET_CONFIG, ethernet_config)
LOGGER.info("Board IP changed: " + value)
|
manager.py
|
#!/usr/bin/env python3.7
import os
import time
import sys
import fcntl
import errno
import signal
import shutil
import subprocess
import datetime
from selfdrive.dragonpilot.dragonconf import dragonpilot_set_params
from common.basedir import BASEDIR, PARAMS
from common.android import ANDROID
sys.path.append(os.path.join(BASEDIR, "pyextra"))
os.environ['BASEDIR'] = BASEDIR
TOTAL_SCONS_NODES = 1195
prebuilt = os.path.exists(os.path.join(BASEDIR, 'prebuilt'))
# Create folders needed for msgq
try:
os.mkdir("/dev/shm")
except FileExistsError:
pass
except PermissionError:
print("WARNING: failed to make /dev/shm")
if ANDROID:
os.chmod("/dev/shm", 0o777)
def unblock_stdout():
# get a non-blocking stdout
child_pid, child_pty = os.forkpty()
if child_pid != 0: # parent
# child is in its own process group, manually pass kill signals
signal.signal(signal.SIGINT, lambda signum, frame: os.kill(child_pid, signal.SIGINT))
signal.signal(signal.SIGTERM, lambda signum, frame: os.kill(child_pid, signal.SIGTERM))
fcntl.fcntl(sys.stdout, fcntl.F_SETFL,
fcntl.fcntl(sys.stdout, fcntl.F_GETFL) | os.O_NONBLOCK)
while True:
try:
dat = os.read(child_pty, 4096)
except OSError as e:
if e.errno == errno.EIO:
break
continue
if not dat:
break
try:
sys.stdout.write(dat.decode('utf8'))
except (OSError, IOError, UnicodeDecodeError):
pass
os._exit(os.wait()[1])
if __name__ == "__main__":
unblock_stdout()
from common.spinner import Spinner
else:
from common.spinner import FakeSpinner as Spinner
import importlib
import traceback
from multiprocessing import Process
# Run scons
spinner = Spinner()
spinner.update("0")
if not prebuilt:
for retry in [True, False]:
# run scons
env = os.environ.copy()
env['SCONS_PROGRESS'] = "1"
env['SCONS_CACHE'] = "1"
nproc = os.cpu_count()
j_flag = "" if nproc is None else "-j%d" % (nproc - 1)
scons = subprocess.Popen(["scons", j_flag], cwd=BASEDIR, env=env, stderr=subprocess.PIPE)
# Read progress from stderr and update spinner
while scons.poll() is None:
try:
line = scons.stderr.readline()
if line is None:
continue
line = line.rstrip()
prefix = b'progress: '
if line.startswith(prefix):
i = int(line[len(prefix):])
if spinner is not None:
spinner.update("%d" % (50.0 * (i / TOTAL_SCONS_NODES)))
elif len(line):
print(line.decode('utf8'))
except Exception:
pass
if scons.returncode != 0:
if retry:
print("scons build failed, cleaning in")
for i in range(3,-1,-1):
print("....%d" % i)
time.sleep(1)
subprocess.check_call(["scons", "-c"], cwd=BASEDIR, env=env)
shutil.rmtree("/tmp/scons_cache")
else:
raise RuntimeError("scons build failed")
else:
break
import cereal
import cereal.messaging as messaging
from common.params import Params
import selfdrive.crash as crash
from selfdrive.swaglog import cloudlog
from selfdrive.registration import register
from selfdrive.version import version, dirty
from selfdrive.loggerd.config import ROOT
from selfdrive.launcher import launcher
from common import android
from common.apk import update_apks, pm_apply_packages, start_frame
ThermalStatus = cereal.log.ThermalData.ThermalStatus
# comment out anything you don't want to run
managed_processes = {
"thermald": "selfdrive.thermald",
"uploader": "selfdrive.loggerd.uploader",
"deleter": "selfdrive.loggerd.deleter",
"controlsd": "selfdrive.controls.controlsd",
"plannerd": "selfdrive.controls.plannerd",
"radard": "selfdrive.controls.radard",
"dmonitoringd": "selfdrive.controls.dmonitoringd",
"ubloxd": ("selfdrive/locationd", ["./ubloxd"]),
"loggerd": ("selfdrive/loggerd", ["./loggerd"]),
"logmessaged": "selfdrive.logmessaged",
"locationd": "selfdrive.locationd.locationd",
"tombstoned": "selfdrive.tombstoned",
"logcatd": ("selfdrive/logcatd", ["./logcatd"]),
"proclogd": ("selfdrive/proclogd", ["./proclogd"]),
"boardd": ("selfdrive/boardd", ["./boardd"]), # not used directly
"pandad": "selfdrive.pandad",
"ui": ("selfdrive/ui", ["./ui"]),
"calibrationd": "selfdrive.locationd.calibrationd",
"paramsd": ("selfdrive/locationd", ["./paramsd"]),
"camerad": ("selfdrive/camerad", ["./camerad"]),
"sensord": ("selfdrive/sensord", ["./sensord"]),
"clocksd": ("selfdrive/clocksd", ["./clocksd"]),
"gpsd": ("selfdrive/sensord", ["./gpsd"]),
"updated": "selfdrive.updated",
"dmonitoringmodeld": ("selfdrive/modeld", ["./dmonitoringmodeld"]),
"modeld": ("selfdrive/modeld", ["./modeld"]),
"dashcamd": "selfdrive.dragonpilot.dashcamd.dashcamd",
"shutdownd": "selfdrive.dragonpilot.shutdownd.shutdownd",
"appd": "selfdrive.dragonpilot.appd.appd",
}
daemon_processes = {
"manage_athenad": ("selfdrive.athena.manage_athenad", "AthenadPid"),
}
running = {}
def get_running():
return running
# due to qualcomm kernel bugs SIGKILLing camerad sometimes causes page table corruption
unkillable_processes = ['camerad']
# processes to end with SIGINT instead of SIGTERM
interrupt_processes = []
# processes to end with SIGKILL instead of SIGTERM
kill_processes = ['sensord', 'paramsd']
# processes to end if thermal conditions exceed Green parameters
green_temp_processes = ['uploader']
persistent_processes = [
'thermald',
'logmessaged',
'ui',
'uploader',
]
if ANDROID:
persistent_processes += [
'logcatd',
'tombstoned',
'updated',
'shutdownd',
'appd',
]
car_started_processes = [
'controlsd',
'plannerd',
'loggerd',
'radard',
'dmonitoringd',
'calibrationd',
'paramsd',
'camerad',
'modeld',
'proclogd',
'ubloxd',
'locationd',
]
if ANDROID:
car_started_processes += [
'sensord',
'clocksd',
'gpsd',
'dmonitoringmodeld',
'deleter',
'dashcamd',
]
def register_managed_process(name, desc, car_started=False):
global managed_processes, car_started_processes, persistent_processes
print("registering %s" % name)
managed_processes[name] = desc
if car_started:
car_started_processes.append(name)
else:
persistent_processes.append(name)
# ****************** process management functions ******************
def nativelauncher(pargs, cwd):
# exec the process
os.chdir(cwd)
# because when extracted from pex zips permissions get lost -_-
os.chmod(pargs[0], 0o700)
os.execvp(pargs[0], pargs)
def start_managed_process(name):
if name in running or name not in managed_processes:
return
proc = managed_processes[name]
if isinstance(proc, str):
cloudlog.info("starting python %s" % proc)
running[name] = Process(name=name, target=launcher, args=(proc,))
else:
pdir, pargs = proc
cwd = os.path.join(BASEDIR, pdir)
cloudlog.info("starting process %s" % name)
running[name] = Process(name=name, target=nativelauncher, args=(pargs, cwd))
running[name].start()
def start_daemon_process(name):
params = Params()
proc, pid_param = daemon_processes[name]
pid = params.get(pid_param, encoding='utf-8')
if pid is not None:
try:
os.kill(int(pid), 0)
with open(f'/proc/{pid}/cmdline') as f:
if proc in f.read():
# daemon is running
return
except (OSError, FileNotFoundError):
# process is dead
pass
cloudlog.info("starting daemon %s" % name)
proc = subprocess.Popen(['python', '-m', proc],
stdin=open('/dev/null', 'r'),
stdout=open('/dev/null', 'w'),
stderr=open('/dev/null', 'w'),
preexec_fn=os.setpgrp)
params.put(pid_param, str(proc.pid))
def prepare_managed_process(p):
proc = managed_processes[p]
if isinstance(proc, str):
# import this python
cloudlog.info("preimporting %s" % proc)
importlib.import_module(proc)
elif os.path.isfile(os.path.join(BASEDIR, proc[0], "Makefile")):
# build this process
cloudlog.info("building %s" % (proc,))
try:
subprocess.check_call(["make", "-j4"], cwd=os.path.join(BASEDIR, proc[0]))
except subprocess.CalledProcessError:
# make clean if the build failed
cloudlog.warning("building %s failed, make clean" % (proc, ))
subprocess.check_call(["make", "clean"], cwd=os.path.join(BASEDIR, proc[0]))
subprocess.check_call(["make", "-j4"], cwd=os.path.join(BASEDIR, proc[0]))
def join_process(process, timeout):
# Process().join(timeout) will hang due to a python 3 bug: https://bugs.python.org/issue28382
# We have to poll the exitcode instead
t = time.time()
while time.time() - t < timeout and process.exitcode is None:
time.sleep(0.001)
def kill_managed_process(name):
if name not in running or name not in managed_processes:
return
cloudlog.info("killing %s" % name)
if running[name].exitcode is None:
if name in interrupt_processes:
os.kill(running[name].pid, signal.SIGINT)
elif name in kill_processes:
os.kill(running[name].pid, signal.SIGKILL)
else:
running[name].terminate()
join_process(running[name], 5)
if running[name].exitcode is None:
if name in unkillable_processes:
cloudlog.critical("unkillable process %s failed to exit! rebooting in 15 if it doesn't die" % name)
join_process(running[name], 15)
if running[name].exitcode is None:
cloudlog.critical("FORCE REBOOTING PHONE!")
os.system("date >> /sdcard/unkillable_reboot")
os.system("reboot")
raise RuntimeError
else:
cloudlog.info("killing %s with SIGKILL" % name)
os.kill(running[name].pid, signal.SIGKILL)
running[name].join()
cloudlog.info("%s is dead with %d" % (name, running[name].exitcode))
del running[name]
def cleanup_all_processes(signal, frame):
cloudlog.info("caught ctrl-c %s %s" % (signal, frame))
if ANDROID:
pm_apply_packages('disable')
for name in list(running.keys()):
kill_managed_process(name)
cloudlog.info("everything is dead")
# ****************** run loop ******************
def manager_init(should_register=True):
if should_register:
reg_res = register()
if reg_res:
dongle_id, dongle_secret = reg_res
else:
raise Exception("server registration failed")
else:
dongle_id = "c"*16
# set dongle id
cloudlog.info("dongle id is " + dongle_id)
os.environ['DONGLE_ID'] = dongle_id
cloudlog.info("dirty is %d" % dirty)
if not dirty:
os.environ['CLEAN'] = '1'
cloudlog.bind_global(dongle_id=dongle_id, version=version, dirty=dirty, is_eon=True)
crash.bind_user(id=dongle_id)
crash.bind_extra(version=version, dirty=dirty, is_eon=True)
os.umask(0)
try:
os.mkdir(ROOT, 0o777)
except OSError:
pass
# ensure shared libraries are readable by apks
if ANDROID:
os.chmod(BASEDIR, 0o755)
os.chmod(os.path.join(BASEDIR, "cereal"), 0o755)
os.chmod(os.path.join(BASEDIR, "cereal", "libmessaging_shared.so"), 0o755)
def manager_thread():
# now loop
thermal_sock = messaging.sub_sock('thermal')
cloudlog.info("manager start")
cloudlog.info({"environ": os.environ})
# save boot log
subprocess.call(["./loggerd", "--bootlog"], cwd=os.path.join(BASEDIR, "selfdrive/loggerd"))
params = Params()
# start daemon processes
for p in daemon_processes:
start_daemon_process(p)
# start persistent processes
for p in persistent_processes:
start_managed_process(p)
# start frame
if ANDROID:
pm_apply_packages('enable')
start_frame()
if os.getenv("NOBOARD") is None:
start_managed_process("pandad")
logger_dead = False
while 1:
msg = messaging.recv_sock(thermal_sock, wait=True)
# heavyweight batch processes are gated on favorable thermal conditions
if msg.thermal.thermalStatus >= ThermalStatus.yellow:
for p in green_temp_processes:
if p in persistent_processes:
kill_managed_process(p)
else:
for p in green_temp_processes:
if p in persistent_processes:
start_managed_process(p)
if msg.thermal.freeSpace < 0.05:
logger_dead = True
if msg.thermal.started:
for p in car_started_processes:
if p == "loggerd" and logger_dead:
kill_managed_process(p)
else:
start_managed_process(p)
else:
logger_dead = False
for p in reversed(car_started_processes):
kill_managed_process(p)
# check the status of all processes, did any of them die?
running_list = ["%s%s\u001b[0m" % ("\u001b[32m" if running[p].is_alive() else "\u001b[31m", p) for p in running]
cloudlog.debug(' '.join(running_list))
# Exit main loop when uninstall is needed
if params.get("DoUninstall", encoding='utf8') == "1":
break
def manager_prepare(spinner=None):
# build all processes
os.chdir(os.path.dirname(os.path.abspath(__file__)))
# Spinner has to start from 70 here
total = 100.0 if prebuilt else 50.0
for i, p in enumerate(managed_processes):
if spinner is not None:
spinner.update("%d" % ((100.0 - total) + total * (i + 1) / len(managed_processes),))
prepare_managed_process(p)
def uninstall():
cloudlog.warning("uninstalling")
with open('/cache/recovery/command', 'w') as f:
f.write('--wipe_data\n')
# IPowerManager.reboot(confirm=false, reason="recovery", wait=true)
android.reboot(reason="recovery")
def main():
os.environ['PARAMS_PATH'] = PARAMS
# the flippening!
os.system('LD_LIBRARY_PATH="" content insert --uri content://settings/system --bind name:s:user_rotation --bind value:i:1')
# disable bluetooth
os.system('service call bluetooth_manager 8')
params = Params()
params.manager_start()
# set unset params
if params.get("CommunityFeaturesToggle") is None:
params.put("CommunityFeaturesToggle", "0")
if params.get("CompletedTrainingVersion") is None:
params.put("CompletedTrainingVersion", "0")
if params.get("IsMetric") is None:
params.put("IsMetric", "0")
if params.get("RecordFront") is None:
params.put("RecordFront", "0")
if params.get("HasAcceptedTerms") is None:
params.put("HasAcceptedTerms", "0")
if params.get("HasCompletedSetup") is None:
params.put("HasCompletedSetup", "0")
if params.get("IsUploadRawEnabled") is None:
params.put("IsUploadRawEnabled", "1")
if params.get("IsLdwEnabled") is None:
params.put("IsLdwEnabled", "1")
if params.get("IsGeofenceEnabled") is None:
params.put("IsGeofenceEnabled", "-1")
if params.get("SpeedLimitOffset") is None:
params.put("SpeedLimitOffset", "0")
if params.get("LongitudinalControl") is None:
params.put("LongitudinalControl", "0")
if params.get("LimitSetSpeed") is None:
params.put("LimitSetSpeed", "0")
if params.get("LimitSetSpeedNeural") is None:
params.put("LimitSetSpeedNeural", "0")
if params.get("LastUpdateTime") is None:
t = datetime.datetime.now().isoformat()
params.put("LastUpdateTime", t.encode('utf8'))
if params.get("OpenpilotEnabledToggle") is None:
params.put("OpenpilotEnabledToggle", "1")
if params.get("LaneChangeEnabled") is None:
params.put("LaneChangeEnabled", "1")
dragonpilot_set_params(params)
# is this chffrplus?
if os.getenv("PASSIVE") is not None:
params.put("Passive", str(int(os.getenv("PASSIVE"))))
if params.get("Passive") is None:
raise Exception("Passive must be set to continue")
if ANDROID:
update_apks()
manager_init()
manager_prepare(spinner)
spinner.close()
if os.getenv("PREPAREONLY") is not None:
return
if params.get("DragonEnableLogger", encoding='utf8') == "0":
del managed_processes['loggerd']
del managed_processes['tombstoned']
if params.get("DragonEnableUploader", encoding='utf8') == "0":
del managed_processes['uploader']
# SystemExit on sigterm
signal.signal(signal.SIGTERM, lambda signum, frame: sys.exit(1))
try:
manager_thread()
except Exception:
traceback.print_exc()
crash.capture_exception()
finally:
cleanup_all_processes(None, None)
if params.get("DoUninstall", encoding='utf8') == "1":
uninstall()
if __name__ == "__main__":
main()
# manual exit because we are forked
sys.exit(0)
|
client.py
|
import logging
import os, copy
import threading
import tkinter, tkinter.font
from time import sleep
from pynput.keyboard import Key, Listener
from src.util.connection import Client, Communication
__buffer = {
'clicked': False,
'accept': True,
'move': "None",
'note': "None",
'page': '',
}
___client = Client()
__prev = {}
def __on_press(key):
global ___client, __buffer
logging.info('{} pressed...'.format(key))
logging.info('clicked ==> {} and accept ==> {}'.format(__buffer['clicked'], __buffer['accept']))
if key == Key.esc:
logging.info('close the application')
os._exit(0)
if not __buffer['clicked'] and __buffer['accept']:
if key == Key.right:
__buffer['move'] = 'next'
__buffer['clicked'] = True
elif key == Key.left:
__buffer['move'] = 'prev'
__buffer['clicked'] = True
# valid input then wait the server signal
if __buffer['clicked']:
__buffer['accept'] = False
__run(___client, __buffer)
def __on_release(key):
global __buffer
logging.info('{} released...'.format(key))
__buffer['clicked'] = False
def __run(_client, _buffer):
try:
_temp = {
'clicked': _buffer['clicked'],
'accept': _buffer['accept'],
'move': _buffer['move']
}
Communication.send_data(_client, _temp)
recv_data = Communication.receive_data(_client)
_buffer['note'] = recv_data['note']
_buffer['note'] = _buffer['note'].replace("\r", "\r\n")
_buffer['accept'] = True
except ConnectionError:
logging.error('server connection closed')
os._exit(-1)
def __window_destroy():
logging.info("destroy windows")
os._exit(0)
def __data_update(_window, _text):
global __buffer, __prev
hasNote = __buffer['note'] == "None"
isChange = __buffer != __prev
if hasNote or not isChange:
return -1
data = "[[ script ]] \n\n"+__buffer['note']
_text.delete('1.0', tkinter.END)
_text.insert(tkinter.END, data)
__prev = copy.deepcopy(__buffer)
return 0
def __my_window():
window = tkinter.Tk()
window.protocol("WM_DELETE_WINDOW", __window_destroy)
window.title("pptx-client.py")
window.geometry("1600x900")
scroll = tkinter.Scrollbar(window)
# if you want to change the font family then change the font family value
font = tkinter.font.Font(family="맑은 고딕", size=12)
text = tkinter.Text(window, height=50, width=75, font=font)
scroll.pack(side=tkinter.RIGHT, fill=tkinter.Y)
text.pack(side=tkinter.LEFT, fill=tkinter.BOTH, expand=True)
scroll.config(command=text.yview)
text.config(yscrollcommand=scroll.set)
__data_update(window, text)
while True:
sleep(1/24)
__data_update(window, text)
window.update()
def run(ip, port):
try:
___client.init(ip, port)
__run(___client, __buffer)
wThread = threading.Thread(target=__my_window)
wThread.start()
with Listener(on_press=__on_press,
on_release=__on_release) as listener:
listener.join()
except Exception:
logging.error("Unexpected ended occur")
os._exit(-1)
|
ipc.py
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2017 Sebastian Golasch (plugin.video.netflix)
Copyright (C) 2018 Caphm (original implementation module)
Helper functions for inter-process communication via AddonSignals
SPDX-License-Identifier: MIT
See LICENSES/MIT.md for more information.
"""
from __future__ import absolute_import, division, unicode_literals
from functools import wraps
import AddonSignals
from resources.lib.globals import g
import resources.lib.api.exceptions as apierrors
from .logging import debug, error
from .misc_utils import time_execution
try: # Python 2
unicode
except NameError: # Python 3
unicode = str # pylint: disable=redefined-builtin
class BackendNotReady(Exception):
"""The background services are not started yet"""
class Signals(object): # pylint: disable=no-init
"""Signal names for use with AddonSignals"""
# pylint: disable=too-few-public-methods
PLAYBACK_INITIATED = 'playback_initiated'
ESN_CHANGED = 'esn_changed'
LIBRARY_UPDATE_REQUESTED = 'library_update_requested'
UPNEXT_ADDON_INIT = 'upnext_data'
INVALIDATE_SERVICE_CACHE = 'invalidate_service_cache'
def register_slot(callback, signal=None, source_id=None):
"""Register a callback with AddonSignals for return calls"""
name = signal if signal else _signal_name(callback)
AddonSignals.registerSlot(
signaler_id=source_id or g.ADDON_ID,
signal=name,
callback=callback)
debug('Registered AddonSignals slot {} to {}'.format(name, callback))
def unregister_slot(callback, signal=None):
"""Remove a registered callback from AddonSignals"""
name = signal if signal else _signal_name(callback)
AddonSignals.unRegisterSlot(
signaler_id=g.ADDON_ID,
signal=name)
debug('Unregistered AddonSignals slot {}'.format(name))
def send_signal(signal, data=None, non_blocking=False):
"""Send a signal via AddonSignals"""
if non_blocking:
# Using sendSignal of AddonSignals you might think that it is not a blocking call instead is blocking because it
# uses executeJSONRPC that is a blocking call, so the invoker will remain blocked until the function called by
# executeJSONRPC has completed his operations, even if it does not return any data.
# This workaround call sendSignal in a separate thread so immediately releases the invoker.
# This is to be considered according to the functions to be called,
# because it could keep the caller blocked for a certain amount of time unnecessarily.
# To note that several consecutive calls, are made in sequence not at the same time.
from threading import Thread
thread = Thread(target=_send_signal, args=[signal, data])
thread.start()
else:
_send_signal(signal, data)
def _send_signal(signal, data):
AddonSignals.sendSignal(
source_id=g.ADDON_ID,
signal=signal,
data=data)
@time_execution(immediate=False)
def make_call(callname, data=None):
if g.IPC_OVER_HTTP:
return make_http_call(callname, data)
return make_addonsignals_call(callname, data)
def make_http_call(callname, data):
"""Make an IPC call via HTTP and wait for it to return.
The contents of data will be expanded to kwargs and passed into the target
function."""
from collections import OrderedDict
try: # Python 3
from urllib.request import build_opener, install_opener, ProxyHandler, URLError, urlopen
except ImportError: # Python 2
from urllib2 import build_opener, install_opener, ProxyHandler, URLError, urlopen
import json
debug('Handling HTTP IPC call to {}'.format(callname))
# don't use proxy for localhost
url = 'http://127.0.0.1:{}/{}'.format(
g.LOCAL_DB.get_value('ns_service_port', 8001), callname)
install_opener(build_opener(ProxyHandler({})))
try:
result = json.loads(
urlopen(url=url, data=json.dumps(data).encode('utf-8'), timeout=16).read(),
object_pairs_hook=OrderedDict)
except URLError:
raise BackendNotReady
_raise_for_error(callname, result)
return result
def make_addonsignals_call(callname, data):
"""Make an IPC call via AddonSignals and wait for it to return.
The contents of data will be expanded to kwargs and passed into the target
function."""
debug('Handling AddonSignals IPC call to {}'.format(callname))
result = AddonSignals.makeCall(
source_id=g.ADDON_ID,
signal=callname,
data=data,
timeout_ms=16000)
_raise_for_error(callname, result)
if result is None:
raise Exception('Addon Signals call timeout')
return result
def _raise_for_error(callname, result):
if isinstance(result, dict) and 'error' in result:
error('IPC call {callname} returned {error}: {message}'
.format(callname=callname, **result))
try:
raise apierrors.__dict__[result['error']](result['message'])
except KeyError:
raise Exception(result['error'])
def addonsignals_return_call(func):
"""Makes func return callable through AddonSignals and
handles catching, conversion and forwarding of exceptions"""
@wraps(func)
def make_return_call(instance, data):
"""Makes func return callable through AddonSignals and
handles catching, conversion and forwarding of exceptions"""
# pylint: disable=broad-except
try:
result = call(instance, func, data)
except Exception as exc:
error('IPC callback raised exception: {exc}', exc=exc)
import traceback
error(traceback.format_exc())
result = {
'error': exc.__class__.__name__,
'message': unicode(exc),
}
if g.IPC_OVER_HTTP:
return result
# Do not return None or AddonSignals will keep waiting till timeout
if result is None:
result = {}
AddonSignals.returnCall(
signal=_signal_name(func), source_id=g.ADDON_ID, data=result)
return result
return make_return_call
def call(instance, func, data):
if isinstance(data, dict):
return func(instance, **data)
if data is not None:
return func(instance, data)
return func(instance)
def _signal_name(func):
return func.__name__
|
cli.py
|
# encoding: utf-8
import collections
import csv
import multiprocessing as mp
import os
import datetime
import sys
from pprint import pprint
import re
import itertools
import json
import logging
import urlparse
from optparse import OptionConflictError
import traceback
import sqlalchemy as sa
import routes
import paste.script
from paste.registry import Registry
from paste.script.util.logging_config import fileConfig
import click
import ckan.logic as logic
import ckan.model as model
import ckan.include.rjsmin as rjsmin
import ckan.include.rcssmin as rcssmin
import ckan.plugins as p
from ckan.common import config
from ckan.tests.helpers import _get_test_app
# This is a test Flask request context to be used internally.
# Do not use it!
_cli_test_request_context = None
# NB No CKAN imports are allowed until after the config file is loaded.
# i.e. do the imports in methods, after _load_config is called.
# Otherwise loggers get disabled.
def deprecation_warning(message=None):
'''
Print a deprecation warning to STDERR.
If ``message`` is given it is also printed to STDERR.
'''
sys.stderr.write(u'WARNING: This function is deprecated.')
if message:
sys.stderr.write(u' ' + message.strip())
sys.stderr.write(u'\n')
def error(msg):
'''
Print an error message to STDOUT and exit with return code 1.
'''
sys.stderr.write(msg)
if not msg.endswith('\n'):
sys.stderr.write('\n')
sys.exit(1)
def parse_db_config(config_key='sqlalchemy.url'):
''' Takes a config key for a database connection url and parses it into
a dictionary. Expects a url like:
'postgres://tester:pass@localhost/ckantest3'
'''
from ckan.common import config
url = config[config_key]
regex = [
'^\s*(?P<db_type>\w*)',
'://',
'(?P<db_user>[^:]*)',
':?',
'(?P<db_pass>[^@]*)',
'@',
'(?P<db_host>[^/:]*)',
':?',
'(?P<db_port>[^/]*)',
'/',
'(?P<db_name>[\w.-]*)'
]
db_details_match = re.match(''.join(regex), url)
if not db_details_match:
raise Exception('Could not extract db details from url: %r' % url)
db_details = db_details_match.groupdict()
return db_details
def user_add(args):
'''Add new user if we use paster sysadmin add
or paster user add
'''
if len(args) < 1:
error('Error: you need to specify the user name.')
username = args[0]
# parse args into data_dict
data_dict = {'name': username}
for arg in args[1:]:
try:
field, value = arg.split('=', 1)
data_dict[field] = value
except ValueError:
raise ValueError(
'Could not parse arg: %r (expected "<option>=<value>)"' % arg
)
# Required
while not data_dict.get('email'):
data_dict['email'] = raw_input('Email address: ')
if 'password' not in data_dict:
data_dict['password'] = UserCmd.password_prompt()
# Optional
if 'fullname' in data_dict:
data_dict['fullname'] = data_dict['fullname'].decode(
sys.getfilesystemencoding()
)
print('Creating user: %r' % username)
try:
import ckan.logic as logic
import ckan.model as model
site_user = logic.get_action('get_site_user')({
'model': model,
'ignore_auth': True},
{}
)
context = {
'model': model,
'session': model.Session,
'ignore_auth': True,
'user': site_user['name'],
}
user_dict = logic.get_action('user_create')(context, data_dict)
pprint(user_dict)
except logic.ValidationError, e:
error(traceback.format_exc())
## from http://code.activestate.com/recipes/577058/ MIT licence.
## Written by Trent Mick
def query_yes_no(question, default="yes"):
"""Ask a yes/no question via raw_input() and return their answer.
"question" is a string that is presented to the user.
"default" is the presumed answer if the user just hits <Enter>.
It must be "yes" (the default), "no" or None (meaning
an answer is required of the user).
The "answer" return value is one of "yes" or "no".
"""
valid = {"yes": "yes", "y": "yes", "ye": "yes",
"no": "no", "n": "no"}
if default is None:
prompt = " [y/n] "
elif default == "yes":
prompt = " [Y/n] "
elif default == "no":
prompt = " [y/N] "
else:
raise ValueError("invalid default answer: '%s'" % default)
while 1:
sys.stdout.write(question + prompt)
choice = raw_input().lower()
if default is not None and choice == '':
return default
elif choice in valid.keys():
return valid[choice]
else:
sys.stdout.write("Please respond with 'yes' or 'no' "\
"(or 'y' or 'n').\n")
class MockTranslator(object):
def gettext(self, value):
return value
def ugettext(self, value):
return value
def ungettext(self, singular, plural, n):
if n > 1:
return plural
return singular
def _get_config(config=None):
from paste.deploy import appconfig
if config:
filename = os.path.abspath(config)
config_source = '-c parameter'
elif os.environ.get('CKAN_INI'):
filename = os.environ.get('CKAN_INI')
config_source = '$CKAN_INI'
else:
default_filename = 'development.ini'
filename = os.path.join(os.getcwd(), default_filename)
if not os.path.exists(filename):
# give really clear error message for this common situation
msg = 'ERROR: You need to specify the CKAN config (.ini) '\
'file path.'\
'\nUse the --config parameter or set environment ' \
'variable CKAN_INI or have {}\nin the current directory.' \
.format(default_filename)
exit(msg)
if not os.path.exists(filename):
msg = 'Config file not found: %s' % filename
msg += '\n(Given by: %s)' % config_source
exit(msg)
fileConfig(filename)
return appconfig('config:' + filename)
def load_config(config, load_site_user=True):
conf = _get_config(config)
assert 'ckan' not in dir() # otherwise loggers would be disabled
# We have now loaded the config. Now we can import ckan for the
# first time.
from ckan.config.environment import load_environment
load_environment(conf.global_conf, conf.local_conf)
# Set this internal test request context with the configured environment so
# it can be used when calling url_for from the CLI.
global _cli_test_request_context
flask_app = _get_test_app().flask_app
_cli_test_request_context = flask_app.test_request_context()
registry = Registry()
registry.prepare()
import pylons
registry.register(pylons.translator, MockTranslator())
site_user = None
if model.user_table.exists() and load_site_user:
# If the DB has already been initialized, create and register
# a pylons context object, and add the site user to it, so the
# auth works as in a normal web request
c = pylons.util.AttribSafeContextObj()
registry.register(pylons.c, c)
site_user = logic.get_action('get_site_user')({'ignore_auth': True}, {})
pylons.c.user = site_user['name']
pylons.c.userobj = model.User.get(site_user['name'])
## give routes enough information to run url_for
parsed = urlparse.urlparse(conf.get('ckan.site_url', 'http://0.0.0.0'))
request_config = routes.request_config()
request_config.host = parsed.netloc + parsed.path
request_config.protocol = parsed.scheme
return site_user
def paster_click_group(summary):
'''Return a paster command click.Group for paster subcommands
:param command: the paster command linked to this function from
setup.py, used in help text (e.g. "datastore")
:param summary: summary text used in paster's help/command listings
(e.g. "Perform commands to set up the datastore")
'''
class PasterClickGroup(click.Group):
'''A click.Group that may be called like a paster command'''
def __call__(self, ignored_command):
sys.argv.remove(ignored_command)
return super(PasterClickGroup, self).__call__(
prog_name=u'paster ' + ignored_command,
help_option_names=[u'-h', u'--help'],
obj={})
@click.group(cls=PasterClickGroup)
@click.option(
'--plugin',
metavar='ckan',
help='paster plugin (when run outside ckan directory)')
@click_config_option
@click.pass_context
def cli(ctx, plugin, config):
ctx.obj['config'] = config
cli.summary = summary
cli.group_name = u'ckan'
return cli
# common definition for paster ... --config
click_config_option = click.option(
'-c',
'--config',
default=None,
metavar='CONFIG',
help=u'Config file to use (default: development.ini)')
class CkanCommand(paste.script.command.Command):
'''Base class for classes that implement CKAN paster commands to inherit.'''
parser = paste.script.command.Command.standard_parser(verbose=True)
parser.add_option('-c', '--config', dest='config',
help='Config file to use.')
parser.add_option('-f', '--file',
action='store',
dest='file_path',
help="File to dump results to (if needed)")
default_verbosity = 1
group_name = 'ckan'
def _load_config(self, load_site_user=True):
self.site_user = load_config(self.options.config, load_site_user)
class ManageDb(CkanCommand):
'''Perform various tasks on the database.
db create - alias of db upgrade
db init - create and put in default data
db clean - clears db (including dropping tables) and
search index
db upgrade [version no.] - Data migrate
db version - returns current version of data schema
db dump FILE_PATH - dump to a pg_dump file [DEPRECATED]
db load FILE_PATH - load a pg_dump from a file [DEPRECATED]
db load-only FILE_PATH - load a pg_dump from a file but don\'t do
the schema upgrade or search indexing [DEPRECATED]
db create-from-model - create database from the model (indexes not made)
db migrate-filestore - migrate all uploaded data from the 2.1 filesore.
'''
summary = __doc__.split('\n')[0]
usage = __doc__
max_args = None
min_args = 1
def command(self):
cmd = self.args[0]
self._load_config(cmd!='upgrade')
import ckan.model as model
import ckan.lib.search as search
if cmd == 'init':
model.repo.init_db()
if self.verbose:
print 'Initialising DB: SUCCESS'
elif cmd == 'clean' or cmd == 'drop':
# remove any *.pyc version files to prevent conflicts
v_path = os.path.join(os.path.dirname(__file__),
'..', 'migration', 'versions', '*.pyc')
import glob
filelist = glob.glob(v_path)
for f in filelist:
os.remove(f)
model.repo.clean_db()
search.clear_all()
if self.verbose:
print 'Cleaning DB: SUCCESS'
elif cmd == 'upgrade':
if len(self.args) > 1:
model.repo.upgrade_db(self.args[1])
else:
model.repo.upgrade_db()
elif cmd == 'version':
self.version()
elif cmd == 'dump':
self.dump()
elif cmd == 'load':
self.load()
elif cmd == 'load-only':
self.load(only_load=True)
elif cmd == 'create-from-model':
model.repo.create_db()
if self.verbose:
print 'Creating DB: SUCCESS'
elif cmd == 'migrate-filestore':
self.migrate_filestore()
else:
error('Command %s not recognized' % cmd)
def _get_db_config(self):
return parse_db_config()
def _get_postgres_cmd(self, command):
self.db_details = self._get_db_config()
if self.db_details.get('db_type') not in ('postgres', 'postgresql'):
raise AssertionError('Expected postgres database - not %r' % self.db_details.get('db_type'))
pg_cmd = command
pg_cmd += ' -U %(db_user)s' % self.db_details
if self.db_details.get('db_pass') not in (None, ''):
pg_cmd = 'export PGPASSWORD=%(db_pass)s && ' % self.db_details + pg_cmd
if self.db_details.get('db_host') not in (None, ''):
pg_cmd += ' -h %(db_host)s' % self.db_details
if self.db_details.get('db_port') not in (None, ''):
pg_cmd += ' -p %(db_port)s' % self.db_details
return pg_cmd
def _get_psql_cmd(self):
psql_cmd = self._get_postgres_cmd('psql')
psql_cmd += ' -d %(db_name)s' % self.db_details
return psql_cmd
def _postgres_dump(self, filepath):
pg_dump_cmd = self._get_postgres_cmd('pg_dump')
pg_dump_cmd += ' %(db_name)s' % self.db_details
pg_dump_cmd += ' > %s' % filepath
self._run_cmd(pg_dump_cmd)
print 'Dumped database to: %s' % filepath
def _postgres_load(self, filepath):
import ckan.model as model
assert not model.repo.are_tables_created(), "Tables already found. You need to 'db clean' before a load."
pg_cmd = self._get_psql_cmd() + ' -f %s' % filepath
self._run_cmd(pg_cmd)
print 'Loaded CKAN database: %s' % filepath
def _run_cmd(self, command_line):
import subprocess
retcode = subprocess.call(command_line, shell=True)
if retcode != 0:
raise SystemError('Command exited with errorcode: %i' % retcode)
def dump(self):
deprecation_warning(u"Use PostgreSQL's pg_dump instead.")
if len(self.args) < 2:
print 'Need pg_dump filepath'
return
dump_path = self.args[1]
psql_cmd = self._get_psql_cmd() + ' -f %s'
pg_cmd = self._postgres_dump(dump_path)
def load(self, only_load=False):
deprecation_warning(u"Use PostgreSQL's pg_restore instead.")
if len(self.args) < 2:
print 'Need pg_dump filepath'
return
dump_path = self.args[1]
psql_cmd = self._get_psql_cmd() + ' -f %s'
pg_cmd = self._postgres_load(dump_path)
if not only_load:
print 'Upgrading DB'
import ckan.model as model
model.repo.upgrade_db()
print 'Rebuilding search index'
import ckan.lib.search
ckan.lib.search.rebuild()
else:
print 'Now remember you have to call \'db upgrade\' and then \'search-index rebuild\'.'
print 'Done'
def migrate_filestore(self):
from ckan.model import Session
import requests
from ckan.lib.uploader import ResourceUpload
results = Session.execute("select id, revision_id, url from resource "
"where resource_type = 'file.upload' "
"and (url_type <> 'upload' or url_type is null)"
"and url like '%storage%'")
for id, revision_id, url in results:
response = requests.get(url, stream=True)
if response.status_code != 200:
print "failed to fetch %s (code %s)" % (url,
response.status_code)
continue
resource_upload = ResourceUpload({'id': id})
assert resource_upload.storage_path, "no storage configured aborting"
directory = resource_upload.get_directory(id)
filepath = resource_upload.get_path(id)
try:
os.makedirs(directory)
except OSError, e:
## errno 17 is file already exists
if e.errno != 17:
raise
with open(filepath, 'wb+') as out:
for chunk in response.iter_content(1024):
if chunk:
out.write(chunk)
Session.execute("update resource set url_type = 'upload'"
"where id = :id", {'id': id})
Session.execute("update resource_revision set url_type = 'upload'"
"where id = :id and "
"revision_id = :revision_id",
{'id': id, 'revision_id': revision_id})
Session.commit()
print "Saved url %s" % url
def version(self):
from ckan.model import Session
print Session.execute('select version from migrate_version;').fetchall()
class SearchIndexCommand(CkanCommand):
'''Creates a search index for all datasets
Usage:
search-index [-i] [-o] [-r] [-e] [-q] rebuild [dataset_name] - reindex dataset_name if given, if not then rebuild
full search index (all datasets)
search-index rebuild_fast - reindex using multiprocessing using all cores.
This acts in the same way as rubuild -r [EXPERIMENTAL]
search-index check - checks for datasets not indexed
search-index show DATASET_NAME - shows index of a dataset
search-index clear [dataset_name] - clears the search index for the provided dataset or
for the whole ckan instance
'''
summary = __doc__.split('\n')[0]
usage = __doc__
max_args = 2
min_args = 0
def __init__(self, name):
super(SearchIndexCommand, self).__init__(name)
self.parser.add_option('-i', '--force', dest='force',
action='store_true', default=False,
help='Ignore exceptions when rebuilding the index')
self.parser.add_option('-o', '--only-missing', dest='only_missing',
action='store_true', default=False,
help='Index non indexed datasets only')
self.parser.add_option('-r', '--refresh', dest='refresh',
action='store_true', default=False,
help='Refresh current index (does not clear the existing one)')
self.parser.add_option('-q', '--quiet', dest='quiet',
action='store_true', default=False,
help='Do not output index rebuild progress')
self.parser.add_option('-e', '--commit-each', dest='commit_each',
action='store_true', default=False, help=
'''Perform a commit after indexing each dataset. This ensures that changes are
immediately available on the search, but slows significantly the process.
Default is false.''')
def command(self):
if not self.args:
# default to printing help
print self.usage
return
cmd = self.args[0]
# Do not run load_config yet
if cmd == 'rebuild_fast':
self.rebuild_fast()
return
self._load_config()
if cmd == 'rebuild':
self.rebuild()
elif cmd == 'check':
self.check()
elif cmd == 'show':
self.show()
elif cmd == 'clear':
self.clear()
else:
print 'Command %s not recognized' % cmd
def rebuild(self):
from ckan.lib.search import rebuild, commit
# BY default we don't commit after each request to Solr, as it is
# a really heavy operation and slows things a lot
if len(self.args) > 1:
rebuild(self.args[1])
else:
rebuild(only_missing=self.options.only_missing,
force=self.options.force,
refresh=self.options.refresh,
defer_commit=(not self.options.commit_each),
quiet=self.options.quiet)
if not self.options.commit_each:
commit()
def check(self):
from ckan.lib.search import check
check()
def show(self):
from ckan.lib.search import show
if not len(self.args) == 2:
print 'Missing parameter: dataset-name'
return
index = show(self.args[1])
pprint(index)
def clear(self):
from ckan.lib.search import clear, clear_all
package_id = self.args[1] if len(self.args) > 1 else None
if not package_id:
clear_all()
else:
clear(package_id)
def rebuild_fast(self):
### Get out config but without starting pylons environment ####
conf = self._get_config()
### Get ids using own engine, otherwise multiprocess will balk
db_url = conf['sqlalchemy.url']
engine = sa.create_engine(db_url)
package_ids = []
result = engine.execute("select id from package where state = 'active';")
for row in result:
package_ids.append(row[0])
def start(ids):
## load actual enviroment for each subprocess, so each have thier own
## sa session
self._load_config()
from ckan.lib.search import rebuild, commit
rebuild(package_ids=ids)
commit()
def chunks(l, n):
""" Yield n successive chunks from l.
"""
newn = int(len(l) / n)
for i in xrange(0, n-1):
yield l[i*newn:i*newn+newn]
yield l[n*newn-newn:]
processes = []
for chunk in chunks(package_ids, mp.cpu_count()):
process = mp.Process(target=start, args=(chunk,))
processes.append(process)
process.daemon = True
process.start()
for process in processes:
process.join()
class Notification(CkanCommand):
'''Send out modification notifications.
In "replay" mode, an update signal is sent for each dataset in the database.
Usage:
notify replay - send out modification signals
'''
summary = __doc__.split('\n')[0]
usage = __doc__
max_args = 1
min_args = 0
def command(self):
self._load_config()
from ckan.model import Session, Package, DomainObjectOperation
from ckan.model.modification import DomainObjectModificationExtension
if not self.args:
# default to run
cmd = 'replay'
else:
cmd = self.args[0]
if cmd == 'replay':
dome = DomainObjectModificationExtension()
for package in Session.query(Package):
dome.notify(package, DomainObjectOperation.changed)
else:
print 'Command %s not recognized' % cmd
class RDFExport(CkanCommand):
'''Export active datasets as RDF
This command dumps out all currently active datasets as RDF into the
specified folder.
Usage:
paster rdf-export /path/to/store/output
'''
summary = __doc__.split('\n')[0]
usage = __doc__
def command(self):
self._load_config()
if not self.args:
# default to run
print RDFExport.__doc__
else:
self.export_datasets(self.args[0])
def export_datasets(self, out_folder):
'''
Export datasets as RDF to an output folder.
'''
import urlparse
import urllib2
from ckan.common import config
import ckan.model as model
import ckan.logic as logic
import ckan.lib.helpers as h
# Create output folder if not exists
if not os.path.isdir(out_folder):
os.makedirs(out_folder)
fetch_url = config['ckan.site_url']
user = logic.get_action('get_site_user')({'model': model, 'ignore_auth': True}, {})
context = {'model': model, 'session': model.Session, 'user': user['name']}
dataset_names = logic.get_action('package_list')(context, {})
for dataset_name in dataset_names:
dd = logic.get_action('package_show')(context, {'id': dataset_name})
if not dd['state'] == 'active':
continue
url = h.url_for(controller='package', action='read', id=dd['name'])
url = urlparse.urljoin(fetch_url, url[1:]) + '.rdf'
try:
fname = os.path.join(out_folder, dd['name']) + ".rdf"
try:
r = urllib2.urlopen(url).read()
except urllib2.HTTPError, e:
if e.code == 404:
error('Please install ckanext-dcat and enable the ' +
'`dcat` plugin to use the RDF serializations')
with open(fname, 'wb') as f:
f.write(r)
except IOError, ioe:
sys.stderr.write(str(ioe) + "\n")
class Sysadmin(CkanCommand):
'''Gives sysadmin rights to a named user
Usage:
sysadmin - lists sysadmins
sysadmin list - lists sysadmins
sysadmin add USERNAME - make an existing user into a sysadmin
sysadmin add USERNAME [FIELD1=VALUE1 FIELD2=VALUE2 ...]
- creates a new user that is a sysadmin
(prompts for password and email if not
supplied).
Field can be: apikey
password
email
sysadmin remove USERNAME - removes user from sysadmins
'''
summary = __doc__.split('\n')[0]
usage = __doc__
max_args = None
min_args = 0
def command(self):
self._load_config()
cmd = self.args[0] if self.args else None
if cmd is None or cmd == 'list':
self.list()
elif cmd == 'add':
self.add()
elif cmd == 'remove':
self.remove()
else:
print 'Command %s not recognized' % cmd
def list(self):
import ckan.model as model
print 'Sysadmins:'
sysadmins = model.Session.query(model.User).filter_by(sysadmin=True,
state='active')
print 'count = %i' % sysadmins.count()
for sysadmin in sysadmins:
print '%s name=%s email=%s id=%s' % (
sysadmin.__class__.__name__,
sysadmin.name,
sysadmin.email,
sysadmin.id)
def add(self):
import ckan.model as model
if len(self.args) < 2:
print 'Need name of the user to be made sysadmin.'
return
username = self.args[1]
user = model.User.by_name(unicode(username))
if not user:
print 'User "%s" not found' % username
makeuser = raw_input('Create new user: %s? [y/n]' % username)
if makeuser == 'y':
user_add(self.args[1:])
user = model.User.by_name(unicode(username))
else:
print 'Exiting ...'
return
user.sysadmin = True
model.Session.add(user)
model.repo.commit_and_remove()
print 'Added %s as sysadmin' % username
def remove(self):
import ckan.model as model
if len(self.args) < 2:
print 'Need name of the user to be made sysadmin.'
return
username = self.args[1]
user = model.User.by_name(unicode(username))
if not user:
print 'Error: user "%s" not found!' % username
return
user.sysadmin = False
model.repo.commit_and_remove()
class UserCmd(CkanCommand):
'''Manage users
Usage:
user - lists users
user list - lists users
user USERNAME - shows user properties
user add USERNAME [FIELD1=VALUE1 FIELD2=VALUE2 ...]
- add a user (prompts for email and
password if not supplied).
Field can be: apikey
password
email
user setpass USERNAME - set user password (prompts)
user remove USERNAME - removes user from users
user search QUERY - searches for a user name
'''
summary = __doc__.split('\n')[0]
usage = __doc__
max_args = None
min_args = 0
def command(self):
self._load_config()
if not self.args:
self.list()
else:
cmd = self.args[0]
if cmd == 'add':
self.add()
elif cmd == 'remove':
self.remove()
elif cmd == 'search':
self.search()
elif cmd == 'setpass':
self.setpass()
elif cmd == 'list':
self.list()
else:
self.show()
def get_user_str(self, user):
user_str = 'name=%s' % user.name
if user.name != user.display_name:
user_str += ' display=%s' % user.display_name
return user_str
def list(self):
import ckan.model as model
print 'Users:'
users = model.Session.query(model.User).filter_by(state='active')
print 'count = %i' % users.count()
for user in users:
print self.get_user_str(user)
def show(self):
import ckan.model as model
username = self.args[0]
user = model.User.get(unicode(username))
print 'User: \n', user
def setpass(self):
import ckan.model as model
if len(self.args) < 2:
print 'Need name of the user.'
return
username = self.args[1]
user = model.User.get(username)
print('Editing user: %r' % user.name)
password = self.password_prompt()
user.password = password
model.repo.commit_and_remove()
print 'Done'
def search(self):
import ckan.model as model
if len(self.args) < 2:
print 'Need user name query string.'
return
query_str = self.args[1]
query = model.User.search(query_str)
print '%i users matching %r:' % (query.count(), query_str)
for user in query.all():
print self.get_user_str(user)
@classmethod
def password_prompt(cls):
import getpass
password1 = None
while not password1:
password1 = getpass.getpass('Password: ')
password2 = getpass.getpass('Confirm password: ')
if password1 != password2:
error('Passwords do not match')
return password1
def add(self):
user_add(self.args[1:])
def remove(self):
import ckan.model as model
if len(self.args) < 2:
print 'Need name of the user.'
return
username = self.args[1]
p.toolkit.get_action('user_delete')(
{'model': model, 'ignore_auth': True},
{'id': username})
print('Deleted user: %s' % username)
class DatasetCmd(CkanCommand):
'''Manage datasets
Usage:
dataset DATASET_NAME|ID - shows dataset properties
dataset show DATASET_NAME|ID - shows dataset properties
dataset list - lists datasets
dataset delete [DATASET_NAME|ID] - changes dataset state to 'deleted'
dataset purge [DATASET_NAME|ID] - removes dataset from db entirely
'''
summary = __doc__.split('\n')[0]
usage = __doc__
max_args = 3
min_args = 0
def command(self):
self._load_config()
if not self.args:
print self.usage
else:
cmd = self.args[0]
if cmd == 'delete':
self.delete(self.args[1])
elif cmd == 'purge':
self.purge(self.args[1])
elif cmd == 'list':
self.list()
elif cmd == 'show':
self.show(self.args[1])
else:
self.show(self.args[0])
def list(self):
import ckan.model as model
print 'Datasets:'
datasets = model.Session.query(model.Package)
print 'count = %i' % datasets.count()
for dataset in datasets:
state = ('(%s)' % dataset.state) if dataset.state != 'active' else ''
print '%s %s %s' % (dataset.id, dataset.name, state)
def _get_dataset(self, dataset_ref):
import ckan.model as model
dataset = model.Package.get(unicode(dataset_ref))
assert dataset, 'Could not find dataset matching reference: %r' % dataset_ref
return dataset
def show(self, dataset_ref):
import pprint
dataset = self._get_dataset(dataset_ref)
pprint.pprint(dataset.as_dict())
def delete(self, dataset_ref):
import ckan.model as model
dataset = self._get_dataset(dataset_ref)
old_state = dataset.state
rev = model.repo.new_revision()
dataset.delete()
model.repo.commit_and_remove()
dataset = self._get_dataset(dataset_ref)
print '%s %s -> %s' % (dataset.name, old_state, dataset.state)
def purge(self, dataset_ref):
import ckan.logic as logic
dataset = self._get_dataset(dataset_ref)
name = dataset.name
site_user = logic.get_action('get_site_user')({'ignore_auth': True}, {})
context = {'user': site_user['name']}
logic.get_action('dataset_purge')(
context, {'id': dataset_ref})
print '%s purged' % name
class Celery(CkanCommand):
'''Celery daemon [DEPRECATED]
This command is DEPRECATED, use `paster jobs` instead.
Usage:
celeryd <run> - run the celery daemon
celeryd run concurrency - run the celery daemon with
argument 'concurrency'
celeryd view - view all tasks in the queue
celeryd clean - delete all tasks in the queue
'''
min_args = 0
max_args = 2
summary = __doc__.split('\n')[0]
usage = __doc__
def command(self):
if not self.args:
self.run_()
else:
cmd = self.args[0]
if cmd == 'run':
self.run_()
elif cmd == 'view':
self.view()
elif cmd == 'clean':
self.clean()
else:
error('Command %s not recognized' % cmd)
def run_(self):
deprecation_warning(u'Use `paster jobs worker` instead.')
default_ini = os.path.join(os.getcwd(), 'development.ini')
if self.options.config:
os.environ['CKAN_CONFIG'] = os.path.abspath(self.options.config)
elif os.path.isfile(default_ini):
os.environ['CKAN_CONFIG'] = default_ini
else:
error('No .ini specified and none was found in current directory')
from ckan.lib.celery_app import celery
celery_args = []
if len(self.args) == 2 and self.args[1] == 'concurrency':
celery_args.append('--concurrency=1')
celery.worker_main(argv=['celeryd', '--loglevel=INFO'] + celery_args)
def view(self):
deprecation_warning(u'Use `paster jobs list` instead.')
self._load_config()
import ckan.model as model
from kombu.transport.sqlalchemy.models import Message
q = model.Session.query(Message)
q_visible = q.filter_by(visible=True)
print '%i messages (total)' % q.count()
print '%i visible messages' % q_visible.count()
for message in q:
if message.visible:
print '%i: Visible' % (message.id)
else:
print '%i: Invisible Sent:%s' % (message.id, message.sent_at)
def clean(self):
deprecation_warning(u'Use `paster jobs clear` instead.')
self._load_config()
import ckan.model as model
query = model.Session.execute("select * from kombu_message")
tasks_initially = query.rowcount
if not tasks_initially:
print 'No tasks to delete'
sys.exit(0)
query = model.Session.execute("delete from kombu_message")
query = model.Session.execute("select * from kombu_message")
tasks_afterwards = query.rowcount
print '%i of %i tasks deleted' % (tasks_initially - tasks_afterwards,
tasks_initially)
if tasks_afterwards:
error('Failed to delete all tasks')
model.repo.commit_and_remove()
class Ratings(CkanCommand):
'''Manage the ratings stored in the db
Usage:
ratings count - counts ratings
ratings clean - remove all ratings
ratings clean-anonymous - remove only anonymous ratings
'''
summary = __doc__.split('\n')[0]
usage = __doc__
max_args = 1
min_args = 1
def command(self):
self._load_config()
import ckan.model as model
cmd = self.args[0]
if cmd == 'count':
self.count()
elif cmd == 'clean':
self.clean()
elif cmd == 'clean-anonymous':
self.clean(user_ratings=False)
else:
print 'Command %s not recognized' % cmd
def count(self):
import ckan.model as model
q = model.Session.query(model.Rating)
print "%i ratings" % q.count()
q = q.filter(model.Rating.user_id is None)
print "of which %i are anonymous ratings" % q.count()
def clean(self, user_ratings=True):
import ckan.model as model
q = model.Session.query(model.Rating)
print "%i ratings" % q.count()
if not user_ratings:
q = q.filter(model.Rating.user_id is None)
print "of which %i are anonymous ratings" % q.count()
ratings = q.all()
for rating in ratings:
rating.purge()
model.repo.commit_and_remove()
## Used by the Tracking class
_ViewCount = collections.namedtuple("ViewCount", "id name count")
class Tracking(CkanCommand):
'''Update tracking statistics
Usage:
tracking update [start_date] - update tracking stats
tracking export FILE [start_date] - export tracking stats to a csv file
'''
summary = __doc__.split('\n')[0]
usage = __doc__
max_args = 3
min_args = 1
def command(self):
self._load_config()
import ckan.model as model
engine = model.meta.engine
cmd = self.args[0]
if cmd == 'update':
start_date = self.args[1] if len(self.args) > 1 else None
self.update_all(engine, start_date)
elif cmd == 'export':
if len(self.args) <= 1:
error(self.__class__.__doc__)
output_file = self.args[1]
start_date = self.args[2] if len(self.args) > 2 else None
self.update_all(engine, start_date)
self.export_tracking(engine, output_file)
else:
error(self.__class__.__doc__)
def update_all(self, engine, start_date=None):
if start_date:
start_date = datetime.datetime.strptime(start_date, '%Y-%m-%d')
else:
# No date given. See when we last have data for and get data
# from 2 days before then in case new data is available.
# If no date here then use 2011-01-01 as the start date
sql = '''SELECT tracking_date from tracking_summary
ORDER BY tracking_date DESC LIMIT 1;'''
result = engine.execute(sql).fetchall()
if result:
start_date = result[0]['tracking_date']
start_date += datetime.timedelta(-2)
# convert date to datetime
combine = datetime.datetime.combine
start_date = combine(start_date, datetime.time(0))
else:
start_date = datetime.datetime(2011, 1, 1)
start_date_solrsync = start_date
end_date = datetime.datetime.now()
while start_date < end_date:
stop_date = start_date + datetime.timedelta(1)
self.update_tracking(engine, start_date)
print 'tracking updated for %s' % start_date
start_date = stop_date
self.update_tracking_solr(engine, start_date_solrsync)
def _total_views(self, engine):
sql = '''
SELECT p.id,
p.name,
COALESCE(SUM(s.count), 0) AS total_views
FROM package AS p
LEFT OUTER JOIN tracking_summary AS s ON s.package_id = p.id
GROUP BY p.id, p.name
ORDER BY total_views DESC
'''
return [_ViewCount(*t) for t in engine.execute(sql).fetchall()]
def _recent_views(self, engine, measure_from):
sql = '''
SELECT p.id,
p.name,
COALESCE(SUM(s.count), 0) AS total_views
FROM package AS p
LEFT OUTER JOIN tracking_summary AS s ON s.package_id = p.id
WHERE s.tracking_date >= %(measure_from)s
GROUP BY p.id, p.name
ORDER BY total_views DESC
'''
return [_ViewCount(*t) for t in engine.execute(sql, measure_from=str(measure_from)).fetchall()]
def export_tracking(self, engine, output_filename):
'''Write tracking summary to a csv file.'''
HEADINGS = [
"dataset id",
"dataset name",
"total views",
"recent views (last 2 weeks)",
]
measure_from = datetime.date.today() - datetime.timedelta(days=14)
recent_views = self._recent_views(engine, measure_from)
total_views = self._total_views(engine)
with open(output_filename, 'w') as fh:
f_out = csv.writer(fh)
f_out.writerow(HEADINGS)
recent_views_for_id = dict((r.id, r.count) for r in recent_views)
f_out.writerows([(r.id,
r.name,
r.count,
recent_views_for_id.get(r.id, 0))
for r in total_views])
def update_tracking(self, engine, summary_date):
PACKAGE_URL = '/dataset/'
# clear out existing data before adding new
sql = '''DELETE FROM tracking_summary
WHERE tracking_date='%s'; ''' % summary_date
engine.execute(sql)
sql = '''SELECT DISTINCT url, user_key,
CAST(access_timestamp AS Date) AS tracking_date,
tracking_type INTO tracking_tmp
FROM tracking_raw
WHERE CAST(access_timestamp as Date)=%s;
INSERT INTO tracking_summary
(url, count, tracking_date, tracking_type)
SELECT url, count(user_key), tracking_date, tracking_type
FROM tracking_tmp
GROUP BY url, tracking_date, tracking_type;
DROP TABLE tracking_tmp;
COMMIT;'''
engine.execute(sql, summary_date)
# get ids for dataset urls
sql = '''UPDATE tracking_summary t
SET package_id = COALESCE(
(SELECT id FROM package p
WHERE p.name = regexp_replace(' ' || t.url, '^[ ]{1}(/\w{2}){0,1}' || %s, ''))
,'~~not~found~~')
WHERE t.package_id IS NULL
AND tracking_type = 'page';'''
engine.execute(sql, PACKAGE_URL)
# update summary totals for resources
sql = '''UPDATE tracking_summary t1
SET running_total = (
SELECT sum(count)
FROM tracking_summary t2
WHERE t1.url = t2.url
AND t2.tracking_date <= t1.tracking_date
)
,recent_views = (
SELECT sum(count)
FROM tracking_summary t2
WHERE t1.url = t2.url
AND t2.tracking_date <= t1.tracking_date AND t2.tracking_date >= t1.tracking_date - 14
)
WHERE t1.running_total = 0 AND tracking_type = 'resource';'''
engine.execute(sql)
# update summary totals for pages
sql = '''UPDATE tracking_summary t1
SET running_total = (
SELECT sum(count)
FROM tracking_summary t2
WHERE t1.package_id = t2.package_id
AND t2.tracking_date <= t1.tracking_date
)
,recent_views = (
SELECT sum(count)
FROM tracking_summary t2
WHERE t1.package_id = t2.package_id
AND t2.tracking_date <= t1.tracking_date AND t2.tracking_date >= t1.tracking_date - 14
)
WHERE t1.running_total = 0 AND tracking_type = 'page'
AND t1.package_id IS NOT NULL
AND t1.package_id != '~~not~found~~';'''
engine.execute(sql)
def update_tracking_solr(self, engine, start_date):
sql = '''SELECT package_id FROM tracking_summary
where package_id!='~~not~found~~'
and tracking_date >= %s;'''
results = engine.execute(sql, start_date)
package_ids = set()
for row in results:
package_ids.add(row['package_id'])
total = len(package_ids)
not_found = 0
print '%i package index%s to be rebuilt starting from %s' % (total, '' if total < 2 else 'es', start_date)
from ckan.lib.search import rebuild
for package_id in package_ids:
try:
rebuild(package_id)
except logic.NotFound:
print "Error: package %s not found." % (package_id)
not_found += 1
except KeyboardInterrupt:
print "Stopped."
return
except:
raise
print 'search index rebuilding done.' + (' %i not found.' % (not_found) if not_found else "")
class PluginInfo(CkanCommand):
'''Provide info on installed plugins.
'''
summary = __doc__.split('\n')[0]
usage = __doc__
max_args = 0
min_args = 0
def command(self):
self.get_info()
def get_info(self):
''' print info about current plugins from the .ini file'''
import ckan.plugins as p
self._load_config()
interfaces = {}
plugins = {}
for name in dir(p):
item = getattr(p, name)
try:
if issubclass(item, p.Interface):
interfaces[item] = {'class': item}
except TypeError:
pass
for interface in interfaces:
for plugin in p.PluginImplementations(interface):
name = plugin.name
if name not in plugins:
plugins[name] = {'doc': plugin.__doc__,
'class': plugin,
'implements': []}
plugins[name]['implements'].append(interface.__name__)
for plugin in plugins:
p = plugins[plugin]
print plugin + ':'
print '-' * (len(plugin) + 1)
if p['doc']:
print p['doc']
print 'Implements:'
for i in p['implements']:
extra = None
if i == 'ITemplateHelpers':
extra = self.template_helpers(p['class'])
if i == 'IActions':
extra = self.actions(p['class'])
print ' %s' % i
if extra:
print extra
print
def actions(self, cls):
''' Return readable action function info. '''
actions = cls.get_actions()
return self.function_info(actions)
def template_helpers(self, cls):
''' Return readable helper function info. '''
helpers = cls.get_helpers()
return self.function_info(helpers)
def function_info(self, functions):
''' Take a dict of functions and output readable info '''
import inspect
output = []
for function_name in functions:
fn = functions[function_name]
args_info = inspect.getargspec(fn)
params = args_info.args
num_params = len(params)
if args_info.varargs:
params.append('*' + args_info.varargs)
if args_info.keywords:
params.append('**' + args_info.keywords)
if args_info.defaults:
offset = num_params - len(args_info.defaults)
for i, v in enumerate(args_info.defaults):
params[i + offset] = params[i + offset] + '=' + repr(v)
# is this a classmethod if so remove the first parameter
if inspect.ismethod(fn) and inspect.isclass(fn.__self__):
params = params[1:]
params = ', '.join(params)
output.append(' %s(%s)' % (function_name, params))
# doc string
if fn.__doc__:
bits = fn.__doc__.split('\n')
for bit in bits:
output.append(' %s' % bit)
return ('\n').join(output)
class CreateTestDataCommand(CkanCommand):
'''Create test data in the database.
Tests can also delete the created objects easily with the delete() method.
create-test-data - annakarenina and warandpeace
create-test-data search - realistic data to test search
create-test-data gov - government style data
create-test-data family - package relationships data
create-test-data user - create a user 'tester' with api key 'tester'
create-test-data translations - annakarenina, warandpeace, and some test
translations of terms
create-test-data vocabs - annakerenina, warandpeace, and some test
vocabularies
create-test-data hierarchy - hierarchy of groups
'''
summary = __doc__.split('\n')[0]
usage = __doc__
max_args = 1
min_args = 0
def command(self):
self._load_config()
from ckan import plugins
from create_test_data import CreateTestData
if self.args:
cmd = self.args[0]
else:
cmd = 'basic'
if self.verbose:
print 'Creating %s test data' % cmd
if cmd == 'basic':
CreateTestData.create_basic_test_data()
elif cmd == 'user':
CreateTestData.create_test_user()
print 'Created user %r with password %r and apikey %r' % ('tester',
'tester', 'tester')
elif cmd == 'search':
CreateTestData.create_search_test_data()
elif cmd == 'gov':
CreateTestData.create_gov_test_data()
elif cmd == 'family':
CreateTestData.create_family_test_data()
elif cmd == 'translations':
CreateTestData.create_translations_test_data()
elif cmd == 'vocabs':
CreateTestData.create_vocabs_test_data()
elif cmd == 'hierarchy':
CreateTestData.create_group_hierarchy_test_data()
else:
print 'Command %s not recognized' % cmd
raise NotImplementedError
if self.verbose:
print 'Creating %s test data: Complete!' % cmd
class Profile(CkanCommand):
'''Code speed profiler
Provide a ckan url and it will make the request and record
how long each function call took in a file that can be read
by pstats.Stats (command-line) or runsnakerun (gui).
Usage:
profile URL [username]
e.g. profile /data/search
The result is saved in profile.data.search
To view the profile in runsnakerun:
runsnakerun ckan.data.search.profile
You may need to install python module: cProfile
'''
summary = __doc__.split('\n')[0]
usage = __doc__
max_args = 2
min_args = 1
def _load_config_into_test_app(self):
from paste.deploy import loadapp
import paste.fixture
if not self.options.config:
msg = 'No config file supplied'
raise self.BadCommand(msg)
self.filename = os.path.abspath(self.options.config)
if not os.path.exists(self.filename):
raise AssertionError('Config filename %r does not exist.' % self.filename)
fileConfig(self.filename)
wsgiapp = loadapp('config:' + self.filename)
self.app = paste.fixture.TestApp(wsgiapp)
def command(self):
self._load_config_into_test_app()
import paste.fixture
import cProfile
import re
url = self.args[0]
if self.args[1:]:
user = self.args[1]
else:
user = 'visitor'
def profile_url(url):
try:
res = self.app.get(url, status=[200],
extra_environ={'REMOTE_USER': user})
except paste.fixture.AppError:
print 'App error: ', url.strip()
except KeyboardInterrupt:
raise
except Exception:
error(traceback.format_exc())
output_filename = 'ckan%s.profile' % re.sub('[/?]', '.', url.replace('/', '.'))
profile_command = "profile_url('%s')" % url
cProfile.runctx(profile_command, globals(), locals(), filename=output_filename)
import pstats
stats = pstats.Stats(output_filename)
stats.sort_stats('cumulative')
stats.print_stats(0.1) # show only top 10% of lines
print 'Only top 10% of lines shown'
print 'Written profile to: %s' % output_filename
class CreateColorSchemeCommand(CkanCommand):
'''Create or remove a color scheme.
After running this, you'll need to regenerate the css files. See paster's less command for details.
color - creates a random color scheme
color clear - clears any color scheme
color <'HEX'> - uses as base color eg '#ff00ff' must be quoted.
color <VALUE> - a float between 0.0 and 1.0 used as base hue
color <COLOR_NAME> - html color name used for base color eg lightblue
'''
summary = __doc__.split('\n')[0]
usage = __doc__
max_args = 1
min_args = 0
rules = [
'@layoutLinkColor',
'@mastheadBackgroundColor',
'@btnPrimaryBackground',
'@btnPrimaryBackgroundHighlight',
]
# list of predefined colors
color_list = {
'aliceblue': '#f0fff8',
'antiquewhite': '#faebd7',
'aqua': '#00ffff',
'aquamarine': '#7fffd4',
'azure': '#f0ffff',
'beige': '#f5f5dc',
'bisque': '#ffe4c4',
'black': '#000000',
'blanchedalmond': '#ffebcd',
'blue': '#0000ff',
'blueviolet': '#8a2be2',
'brown': '#a52a2a',
'burlywood': '#deb887',
'cadetblue': '#5f9ea0',
'chartreuse': '#7fff00',
'chocolate': '#d2691e',
'coral': '#ff7f50',
'cornflowerblue': '#6495ed',
'cornsilk': '#fff8dc',
'crimson': '#dc143c',
'cyan': '#00ffff',
'darkblue': '#00008b',
'darkcyan': '#008b8b',
'darkgoldenrod': '#b8860b',
'darkgray': '#a9a9a9',
'darkgrey': '#a9a9a9',
'darkgreen': '#006400',
'darkkhaki': '#bdb76b',
'darkmagenta': '#8b008b',
'darkolivegreen': '#556b2f',
'darkorange': '#ff8c00',
'darkorchid': '#9932cc',
'darkred': '#8b0000',
'darksalmon': '#e9967a',
'darkseagreen': '#8fbc8f',
'darkslateblue': '#483d8b',
'darkslategray': '#2f4f4f',
'darkslategrey': '#2f4f4f',
'darkturquoise': '#00ced1',
'darkviolet': '#9400d3',
'deeppink': '#ff1493',
'deepskyblue': '#00bfff',
'dimgray': '#696969',
'dimgrey': '#696969',
'dodgerblue': '#1e90ff',
'firebrick': '#b22222',
'floralwhite': '#fffaf0',
'forestgreen': '#228b22',
'fuchsia': '#ff00ff',
'gainsboro': '#dcdcdc',
'ghostwhite': '#f8f8ff',
'gold': '#ffd700',
'goldenrod': '#daa520',
'gray': '#808080',
'grey': '#808080',
'green': '#008000',
'greenyellow': '#adff2f',
'honeydew': '#f0fff0',
'hotpink': '#ff69b4',
'indianred ': '#cd5c5c',
'indigo ': '#4b0082',
'ivory': '#fffff0',
'khaki': '#f0e68c',
'lavender': '#e6e6fa',
'lavenderblush': '#fff0f5',
'lawngreen': '#7cfc00',
'lemonchiffon': '#fffacd',
'lightblue': '#add8e6',
'lightcoral': '#f08080',
'lightcyan': '#e0ffff',
'lightgoldenrodyellow': '#fafad2',
'lightgray': '#d3d3d3',
'lightgrey': '#d3d3d3',
'lightgreen': '#90ee90',
'lightpink': '#ffb6c1',
'lightsalmon': '#ffa07a',
'lightseagreen': '#20b2aa',
'lightskyblue': '#87cefa',
'lightslategray': '#778899',
'lightslategrey': '#778899',
'lightsteelblue': '#b0c4de',
'lightyellow': '#ffffe0',
'lime': '#00ff00',
'limegreen': '#32cd32',
'linen': '#faf0e6',
'magenta': '#ff00ff',
'maroon': '#800000',
'mediumaquamarine': '#66cdaa',
'mediumblue': '#0000cd',
'mediumorchid': '#ba55d3',
'mediumpurple': '#9370d8',
'mediumseagreen': '#3cb371',
'mediumslateblue': '#7b68ee',
'mediumspringgreen': '#00fa9a',
'mediumturquoise': '#48d1cc',
'mediumvioletred': '#c71585',
'midnightblue': '#191970',
'mintcream': '#f5fffa',
'mistyrose': '#ffe4e1',
'moccasin': '#ffe4b5',
'navajowhite': '#ffdead',
'navy': '#000080',
'oldlace': '#fdf5e6',
'olive': '#808000',
'olivedrab': '#6b8e23',
'orange': '#ffa500',
'orangered': '#ff4500',
'orchid': '#da70d6',
'palegoldenrod': '#eee8aa',
'palegreen': '#98fb98',
'paleturquoise': '#afeeee',
'palevioletred': '#d87093',
'papayawhip': '#ffefd5',
'peachpuff': '#ffdab9',
'peru': '#cd853f',
'pink': '#ffc0cb',
'plum': '#dda0dd',
'powderblue': '#b0e0e6',
'purple': '#800080',
'red': '#ff0000',
'rosybrown': '#bc8f8f',
'royalblue': '#4169e1',
'saddlebrown': '#8b4513',
'salmon': '#fa8072',
'sandybrown': '#f4a460',
'seagreen': '#2e8b57',
'seashell': '#fff5ee',
'sienna': '#a0522d',
'silver': '#c0c0c0',
'skyblue': '#87ceeb',
'slateblue': '#6a5acd',
'slategray': '#708090',
'slategrey': '#708090',
'snow': '#fffafa',
'springgreen': '#00ff7f',
'steelblue': '#4682b4',
'tan': '#d2b48c',
'teal': '#008080',
'thistle': '#d8bfd8',
'tomato': '#ff6347',
'turquoise': '#40e0d0',
'violet': '#ee82ee',
'wheat': '#f5deb3',
'white': '#ffffff',
'whitesmoke': '#f5f5f5',
'yellow': '#ffff00',
'yellowgreen': '#9acd32',
}
def create_colors(self, hue, num_colors=5, saturation=None, lightness=None):
if saturation is None:
saturation = 0.9
if lightness is None:
lightness = 40
else:
lightness *= 100
import math
saturation -= math.trunc(saturation)
print hue, saturation
import colorsys
''' Create n related colours '''
colors = []
for i in xrange(num_colors):
ix = i * (1.0/num_colors)
_lightness = (lightness + (ix * 40))/100.
if _lightness > 1.0:
_lightness = 1.0
color = colorsys.hls_to_rgb(hue, _lightness, saturation)
hex_color = '#'
for part in color:
hex_color += '%02x' % int(part * 255)
# check and remove any bad values
if not re.match('^\#[0-9a-f]{6}$', hex_color):
hex_color = '#FFFFFF'
colors.append(hex_color)
return colors
def command(self):
hue = None
saturation = None
lightness = None
public = config.get(u'ckan.base_public_folder')
path = os.path.dirname(__file__)
path = os.path.join(path, '..', public, 'base', 'less', 'custom.less')
if self.args:
arg = self.args[0]
rgb = None
if arg == 'clear':
os.remove(path)
print 'custom colors removed.'
elif arg.startswith('#'):
color = arg[1:]
if len(color) == 3:
rgb = [int(x, 16) * 16 for x in color]
elif len(color) == 6:
rgb = [int(x, 16) for x in re.findall('..', color)]
else:
print 'ERROR: invalid color'
elif arg.lower() in self.color_list:
color = self.color_list[arg.lower()][1:]
rgb = [int(x, 16) for x in re.findall('..', color)]
else:
try:
hue = float(self.args[0])
except ValueError:
print 'ERROR argument `%s` not recognised' % arg
if rgb:
import colorsys
hue, lightness, saturation = colorsys.rgb_to_hls(*rgb)
lightness = lightness / 340
# deal with greys
if not (hue == 0.0 and saturation == 0.0):
saturation = None
else:
import random
hue = random.random()
if hue is not None:
f = open(path, 'w')
colors = self.create_colors(hue, saturation=saturation, lightness=lightness)
for i in xrange(len(self.rules)):
f.write('%s: %s;\n' % (self.rules[i], colors[i]))
print '%s: %s;\n' % (self.rules[i], colors[i])
f.close
print 'Color scheme has been created.'
print 'Make sure less is run for changes to take effect.'
class TranslationsCommand(CkanCommand):
'''Translation helper functions
trans js - generate the javascript translations
trans mangle - mangle the zh_TW translations for testing
'''
summary = __doc__.split('\n')[0]
usage = __doc__
max_args = 1
min_args = 1
def command(self):
self._load_config()
from ckan.common import config
from ckan.lib.i18n import build_js_translations
ckan_path = os.path.join(os.path.dirname(__file__), '..')
self.i18n_path = config.get('ckan.i18n_directory',
os.path.join(ckan_path, 'i18n'))
command = self.args[0]
if command == 'mangle':
self.mangle_po()
elif command == 'js':
build_js_translations()
else:
print 'command not recognised'
def mangle_po(self):
''' This will mangle the zh_TW translations for translation coverage
testing.
NOTE: This will destroy the current translations fot zh_TW
'''
import polib
pot_path = os.path.join(self.i18n_path, 'ckan.pot')
po = polib.pofile(pot_path)
# we don't want to mangle the following items in strings
# %(...)s %s %0.3f %1$s %2$0.3f [1:...] {...} etc
# sprintf bit after %
spf_reg_ex = "\+?(0|'.)?-?\d*(.\d*)?[\%bcdeufosxX]"
extract_reg_ex = '(\%\([^\)]*\)' + spf_reg_ex + \
'|\[\d*\:[^\]]*\]' + \
'|\{[^\}]*\}' + \
'|<[^>}]*>' + \
'|\%((\d)*\$)?' + spf_reg_ex + ')'
for entry in po:
msg = entry.msgid.encode('utf-8')
matches = re.finditer(extract_reg_ex, msg)
length = len(msg)
position = 0
translation = u''
for match in matches:
translation += '-' * (match.start() - position)
position = match.end()
translation += match.group(0)
translation += '-' * (length - position)
entry.msgstr = translation
out_dir = os.path.join(self.i18n_path, 'zh_TW', 'LC_MESSAGES')
try:
os.makedirs(out_dir)
except OSError:
pass
po.metadata['Plural-Forms'] = "nplurals=1; plural=0\n"
out_po = os.path.join(out_dir, 'ckan.po')
out_mo = os.path.join(out_dir, 'ckan.mo')
po.save(out_po)
po.save_as_mofile(out_mo)
print 'zh_TW has been mangled'
class MinifyCommand(CkanCommand):
'''Create minified versions of the given Javascript and CSS files.
Usage:
paster minify [--clean] PATH
for example:
paster minify ckan/public/base
paster minify ckan/public/base/css/*.css
paster minify ckan/public/base/css/red.css
if the --clean option is provided any minified files will be removed.
'''
summary = __doc__.split('\n')[0]
usage = __doc__
min_args = 1
exclude_dirs = ['vendor']
def __init__(self, name):
super(MinifyCommand, self).__init__(name)
self.parser.add_option('--clean', dest='clean',
action='store_true', default=False,
help='remove any minified files in the path')
def command(self):
clean = getattr(self.options, 'clean', False)
self._load_config()
for base_path in self.args:
if os.path.isfile(base_path):
if clean:
self.clear_minifyed(base_path)
else:
self.minify_file(base_path)
elif os.path.isdir(base_path):
for root, dirs, files in os.walk(base_path):
dirs[:] = [d for d in dirs if not d in self.exclude_dirs]
for filename in files:
path = os.path.join(root, filename)
if clean:
self.clear_minifyed(path)
else:
self.minify_file(path)
else:
# Path is neither a file or a dir?
continue
def clear_minifyed(self, path):
path_only, extension = os.path.splitext(path)
if extension not in ('.css', '.js'):
# This is not a js or css file.
return
if path_only.endswith('.min'):
print 'removing %s' % path
os.remove(path)
def minify_file(self, path):
'''Create the minified version of the given file.
If the file is not a .js or .css file (e.g. it's a .min.js or .min.css
file, or it's some other type of file entirely) it will not be
minifed.
:param path: The path to the .js or .css file to minify
'''
import ckan.lib.fanstatic_resources as fanstatic_resources
path_only, extension = os.path.splitext(path)
if path_only.endswith('.min'):
# This is already a minified file.
return
if extension not in ('.css', '.js'):
# This is not a js or css file.
return
path_min = fanstatic_resources.min_path(path)
source = open(path, 'r').read()
f = open(path_min, 'w')
if path.endswith('.css'):
f.write(rcssmin.cssmin(source))
elif path.endswith('.js'):
f.write(rjsmin.jsmin(source))
f.close()
print "Minified file '{0}'".format(path)
class LessCommand(CkanCommand):
'''Compile all root less documents into their CSS counterparts
Usage:
paster less
'''
summary = __doc__.split('\n')[0]
usage = __doc__
min_args = 0
def command(self):
self._load_config()
self.less()
custom_css = {
'fuchsia': '''
@layoutLinkColor: #E73892;
@footerTextColor: mix(#FFF, @layoutLinkColor, 60%);
@footerLinkColor: @footerTextColor;
@mastheadBackgroundColor: @layoutLinkColor;
@btnPrimaryBackground: lighten(@layoutLinkColor, 10%);
@btnPrimaryBackgroundHighlight: @layoutLinkColor;
''',
'green': '''
@layoutLinkColor: #2F9B45;
@footerTextColor: mix(#FFF, @layoutLinkColor, 60%);
@footerLinkColor: @footerTextColor;
@mastheadBackgroundColor: @layoutLinkColor;
@btnPrimaryBackground: lighten(@layoutLinkColor, 10%);
@btnPrimaryBackgroundHighlight: @layoutLinkColor;
''',
'red': '''
@layoutLinkColor: #C14531;
@footerTextColor: mix(#FFF, @layoutLinkColor, 60%);
@footerLinkColor: @footerTextColor;
@mastheadBackgroundColor: @layoutLinkColor;
@btnPrimaryBackground: lighten(@layoutLinkColor, 10%);
@btnPrimaryBackgroundHighlight: @layoutLinkColor;
''',
'maroon': '''
@layoutLinkColor: #810606;
@footerTextColor: mix(#FFF, @layoutLinkColor, 60%);
@footerLinkColor: @footerTextColor;
@mastheadBackgroundColor: @layoutLinkColor;
@btnPrimaryBackground: lighten(@layoutLinkColor, 10%);
@btnPrimaryBackgroundHighlight: @layoutLinkColor;
''',
}
def less(self):
''' Compile less files '''
import subprocess
command = 'npm bin'
process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
output = process.communicate()
directory = output[0].strip()
less_bin = os.path.join(directory, 'lessc')
public = config.get(u'ckan.base_public_folder')
root = os.path.join(os.path.dirname(__file__), '..', public, 'base')
root = os.path.abspath(root)
custom_less = os.path.join(root, 'less', 'custom.less')
for color in self.custom_css:
f = open(custom_less, 'w')
f.write(self.custom_css[color])
f.close()
self.compile_less(root, less_bin, color)
f = open(custom_less, 'w')
f.write('// This file is needed in order for ./bin/less to compile in less 1.3.1+\n')
f.close()
self.compile_less(root, less_bin, 'main')
def compile_less(self, root, less_bin, color):
print 'compile %s.css' % color
import subprocess
main_less = os.path.join(root, 'less', 'main.less')
main_css = os.path.join(root, 'css', '%s.css' % color)
command = '%s %s %s' % (less_bin, main_less, main_css)
process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
output = process.communicate()
print output
class FrontEndBuildCommand(CkanCommand):
'''Creates and minifies css and JavaScript files
Usage:
paster front-end-build
'''
summary = __doc__.split('\n')[0]
usage = __doc__
min_args = 0
def command(self):
self._load_config()
# Less css
cmd = LessCommand('less')
cmd.options = self.options
cmd.command()
# js translation strings
cmd = TranslationsCommand('trans')
cmd.options = self.options
cmd.args = ('js',)
cmd.command()
# minification
cmd = MinifyCommand('minify')
cmd.options = self.options
public = config.get(u'ckan.base_public_folder')
root = os.path.join(os.path.dirname(__file__), '..', public, 'base')
root = os.path.abspath(root)
ckanext = os.path.join(os.path.dirname(__file__), '..', '..', 'ckanext')
ckanext = os.path.abspath(ckanext)
cmd.args = (root, ckanext)
cmd.command()
class ViewsCommand(CkanCommand):
'''Manage resource views.
Usage:
paster views create [options] [type1] [type2] ...
Create views on relevant resources. You can optionally provide
specific view types (eg `recline_view`, `image_view`). If no types
are provided, the default ones will be used. These are generally
the ones defined in the `ckan.views.default_views` config option.
Note that on either case, plugins must be loaded (ie added to
`ckan.plugins`), otherwise the command will stop.
paster views clear [options] [type1] [type2] ...
Permanently delete all views or the ones with the provided types.
paster views clean
Permanently delete views for all types no longer present in the
`ckan.plugins` configuration option.
'''
summary = __doc__.split('\n')[0]
usage = __doc__
min_args = 1
def __init__(self, name):
super(ViewsCommand, self).__init__(name)
self.parser.add_option('-y', '--yes', dest='assume_yes',
action='store_true',
default=False,
help='''Automatic yes to prompts. Assume "yes"
as answer to all prompts and run non-interactively''')
self.parser.add_option('-d', '--dataset', dest='dataset_id',
action='append',
help='''Create views on a particular dataset.
You can use the dataset id or name, and it can be defined multiple times.''')
self.parser.add_option('--no-default-filters',
dest='no_default_filters',
action='store_true',
default=False,
help='''Do not add default filters for relevant
resource formats for the view types provided. Note that filters are not added
by default anyway if an unsupported view type is provided or when using the
`-s` or `-d` options.''')
self.parser.add_option('-s', '--search', dest='search_params',
action='store',
default=False,
help='''Extra search parameters that will be
used for getting the datasets to create the resource views on. It must be a
JSON object like the one used by the `package_search` API call. Supported
fields are `q`, `fq` and `fq_list`. Check the documentation for examples.
Not used when using the `-d` option.''')
def command(self):
self._load_config()
if not self.args:
print self.usage
elif self.args[0] == 'create':
view_plugin_types = self.args[1:]
self.create_views(view_plugin_types)
elif self.args[0] == 'clear':
view_plugin_types = self.args[1:]
self.clear_views(view_plugin_types)
elif self.args[0] == 'clean':
self.clean_views()
else:
print self.usage
_page_size = 100
def _get_view_plugins(self, view_plugin_types,
get_datastore_views=False):
'''
Returns the view plugins that were succesfully loaded
Views are provided as a list of ``view_plugin_types``. If no types are
provided, the default views defined in the ``ckan.views.default_views``
will be created. Only in this case (when the default view plugins are
used) the `get_datastore_views` parameter can be used to get also view
plugins that require data to be in the DataStore.
If any of the provided plugins could not be loaded (eg it was not added
to `ckan.plugins`) the command will stop.
Returns a list of loaded plugin names.
'''
from ckan.lib.datapreview import (get_view_plugins,
get_default_view_plugins
)
log = logging.getLogger(__name__)
view_plugins = []
if not view_plugin_types:
log.info('No view types provided, using default types')
view_plugins = get_default_view_plugins()
if get_datastore_views:
view_plugins.extend(
get_default_view_plugins(get_datastore_views=True))
else:
view_plugins = get_view_plugins(view_plugin_types)
loaded_view_plugins = [view_plugin.info()['name']
for view_plugin in view_plugins]
plugins_not_found = list(set(view_plugin_types) -
set(loaded_view_plugins))
if plugins_not_found:
error('View plugin(s) not found : {0}. '.format(plugins_not_found)
+ 'Have they been added to the `ckan.plugins` configuration'
+ ' option?')
return loaded_view_plugins
def _add_default_filters(self, search_data_dict, view_types):
'''
Adds extra filters to the `package_search` dict for common view types
It basically adds `fq` parameters that filter relevant resource formats
for the view types provided. For instance, if one of the view types is
`pdf_view` the following will be added to the final query:
fq=res_format:"pdf" OR res_format:"PDF"
This obviously should only be used if all view types are known and can
be filtered, otherwise we want all datasets to be returned. If a
non-filterable view type is provided, the search params are not
modified.
Returns the provided data_dict for `package_search`, optionally
modified with extra filters.
'''
from ckanext.imageview.plugin import DEFAULT_IMAGE_FORMATS
from ckanext.textview.plugin import get_formats as get_text_formats
from ckanext.datapusher.plugin import DEFAULT_FORMATS as \
datapusher_formats
filter_formats = []
for view_type in view_types:
if view_type == 'image_view':
for _format in DEFAULT_IMAGE_FORMATS:
filter_formats.extend([_format, _format.upper()])
elif view_type == 'text_view':
formats = get_text_formats(config)
for _format in itertools.chain.from_iterable(formats.values()):
filter_formats.extend([_format, _format.upper()])
elif view_type == 'pdf_view':
filter_formats.extend(['pdf', 'PDF'])
elif view_type in ['recline_view', 'recline_grid_view',
'recline_graph_view', 'recline_map_view']:
if datapusher_formats[0] in filter_formats:
continue
for _format in datapusher_formats:
if '/' not in _format:
filter_formats.extend([_format, _format.upper()])
else:
# There is another view type provided so we can't add any
# filter
return search_data_dict
filter_formats_query = ['+res_format:"{0}"'.format(_format)
for _format in filter_formats]
search_data_dict['fq_list'].append(' OR '.join(filter_formats_query))
return search_data_dict
def _update_search_params(self, search_data_dict):
'''
Update the `package_search` data dict with the user provided parameters
Supported fields are `q`, `fq` and `fq_list`.
If the provided JSON object can not be parsed the process stops with
an error.
Returns the updated data dict
'''
log = logging.getLogger(__name__)
if not self.options.search_params:
return search_data_dict
try:
user_search_params = json.loads(self.options.search_params)
except ValueError, e:
error('Unable to parse JSON search parameters: {0}'.format(e))
if user_search_params.get('q'):
search_data_dict['q'] = user_search_params['q']
if user_search_params.get('fq'):
if search_data_dict['fq']:
search_data_dict['fq'] += ' ' + user_search_params['fq']
else:
search_data_dict['fq'] = user_search_params['fq']
if (user_search_params.get('fq_list') and
isinstance(user_search_params['fq_list'], list)):
search_data_dict['fq_list'].extend(user_search_params['fq_list'])
def _search_datasets(self, page=1, view_types=[]):
'''
Perform a query with `package_search` and return the result
Results can be paginated using the `page` parameter
'''
n = self._page_size
search_data_dict = {
'q': '',
'fq': '',
'fq_list': [],
'include_private': True,
'rows': n,
'start': n * (page - 1),
}
if self.options.dataset_id:
search_data_dict['q'] = ' OR '.join(
['id:{0} OR name:"{0}"'.format(dataset_id)
for dataset_id in self.options.dataset_id]
)
elif self.options.search_params:
self._update_search_params(search_data_dict)
elif not self.options.no_default_filters:
self._add_default_filters(search_data_dict, view_types)
if not search_data_dict.get('q'):
search_data_dict['q'] = '*:*'
query = p.toolkit.get_action('package_search')(
{}, search_data_dict)
return query
def create_views(self, view_plugin_types=[]):
from ckan.lib.datapreview import add_views_to_dataset_resources
log = logging.getLogger(__name__)
datastore_enabled = 'datastore' in config['ckan.plugins'].split()
loaded_view_plugins = self._get_view_plugins(view_plugin_types,
datastore_enabled)
context = {'user': self.site_user['name']}
page = 1
while True:
query = self._search_datasets(page, loaded_view_plugins)
if page == 1 and query['count'] == 0:
error('No datasets to create resource views on, exiting...')
elif page == 1 and not self.options.assume_yes:
msg = ('\nYou are about to check {0} datasets for the ' +
'following view plugins: {1}\n' +
' Do you want to continue?')
confirm = query_yes_no(msg.format(query['count'],
loaded_view_plugins))
if confirm == 'no':
error('Command aborted by user')
if query['results']:
for dataset_dict in query['results']:
if not dataset_dict.get('resources'):
continue
views = add_views_to_dataset_resources(
context,
dataset_dict,
view_types=loaded_view_plugins)
if views:
view_types = list(set([view['view_type']
for view in views]))
msg = ('Added {0} view(s) of type(s) {1} to ' +
'resources from dataset {2}')
log.debug(msg.format(len(views),
', '.join(view_types),
dataset_dict['name']))
if len(query['results']) < self._page_size:
break
page += 1
else:
break
log.info('Done')
def clear_views(self, view_plugin_types=[]):
log = logging.getLogger(__name__)
if not self.options.assume_yes:
if view_plugin_types:
msg = 'Are you sure you want to delete all resource views ' + \
'of type {0}?'.format(', '.join(view_plugin_types))
else:
msg = 'Are you sure you want to delete all resource views?'
result = query_yes_no(msg, default='no')
if result == 'no':
error('Command aborted by user')
context = {'user': self.site_user['name']}
logic.get_action('resource_view_clear')(
context, {'view_types': view_plugin_types})
log.info('Done')
def clean_views(self):
names = []
for plugin in p.PluginImplementations(p.IResourceView):
names.append(str(plugin.info()['name']))
results = model.ResourceView.get_count_not_in_view_types(names)
if not results:
print 'No resource views to delete'
return
print 'This command will delete.\n'
for row in results:
print '%s of type %s' % (row[1], row[0])
result = query_yes_no('Do you want to delete these resource views:', default='no')
if result == 'no':
print 'Not Deleting.'
return
model.ResourceView.delete_not_in_view_types(names)
model.Session.commit()
print 'Deleted resource views.'
class ConfigToolCommand(paste.script.command.Command):
'''Tool for editing options in a CKAN config file
paster config-tool <default.ini> <key>=<value> [<key>=<value> ...]
paster config-tool <default.ini> -f <custom_options.ini>
Examples:
paster config-tool default.ini sqlalchemy.url=123 'ckan.site_title=ABC'
paster config-tool default.ini -s server:main -e port=8080
paster config-tool default.ini -f custom_options.ini
'''
parser = paste.script.command.Command.standard_parser(verbose=True)
default_verbosity = 1
group_name = 'ckan'
usage = __doc__
summary = usage.split('\n')[0]
parser.add_option('-s', '--section', dest='section',
default='app:main', help='Section of the config file')
parser.add_option(
'-e', '--edit', action='store_true', dest='edit', default=False,
help='Checks the option already exists in the config file')
parser.add_option(
'-f', '--file', dest='merge_filepath', metavar='FILE',
help='Supply an options file to merge in')
def command(self):
import config_tool
if len(self.args) < 1:
self.parser.error('Not enough arguments (got %i, need at least 1)'
% len(self.args))
config_filepath = self.args[0]
if not os.path.exists(config_filepath):
self.parser.error('Config filename %r does not exist.' %
config_filepath)
if self.options.merge_filepath:
config_tool.config_edit_using_merge_file(
config_filepath, self.options.merge_filepath)
options = self.args[1:]
if not (options or self.options.merge_filepath):
self.parser.error('No options provided')
if options:
for option in options:
if '=' not in option:
error(
'An option does not have an equals sign: %r '
'It should be \'key=value\'. If there are spaces '
'you\'ll need to quote the option.\n' % option)
try:
config_tool.config_edit_using_option_strings(
config_filepath, options, self.options.section,
edit=self.options.edit)
except config_tool.ConfigToolError, e:
error(traceback.format_exc())
class JobsCommand(CkanCommand):
'''Manage background jobs
Usage:
paster jobs worker [--burst] [QUEUES]
Start a worker that fetches jobs from queues and executes
them. If no queue names are given then the worker listens
to the default queue, this is equivalent to
paster jobs worker default
If queue names are given then the worker listens to those
queues and only those:
paster jobs worker my-custom-queue
Hence, if you want the worker to listen to the default queue
and some others then you must list the default queue explicitly:
paster jobs worker default my-custom-queue
If the `--burst` option is given then the worker will exit
as soon as all its queues are empty.
paster jobs list [QUEUES]
List currently enqueued jobs from the given queues. If no queue
names are given then the jobs from all queues are listed.
paster jobs show ID
Show details about a specific job.
paster jobs cancel ID
Cancel a specific job. Jobs can only be canceled while they are
enqueued. Once a worker has started executing a job it cannot
be aborted anymore.
paster jobs clear [QUEUES]
Cancel all jobs on the given queues. If no queue names are
given then ALL queues are cleared.
paster jobs test [QUEUES]
Enqueue a test job. If no queue names are given then the job is
added to the default queue. If queue names are given then a
separate test job is added to each of the queues.
'''
summary = __doc__.split(u'\n')[0]
usage = __doc__
min_args = 0
def __init__(self, *args, **kwargs):
super(JobsCommand, self).__init__(*args, **kwargs)
try:
self.parser.add_option(u'--burst', action='store_true',
default=False,
help=u'Start worker in burst mode.')
except OptionConflictError:
# Option has already been added in previous call
pass
def command(self):
self._load_config()
try:
cmd = self.args.pop(0)
except IndexError:
print(self.__doc__)
sys.exit(0)
if cmd == u'worker':
self.worker()
elif cmd == u'list':
self.list()
elif cmd == u'show':
self.show()
elif cmd == u'cancel':
self.cancel()
elif cmd == u'clear':
self.clear()
elif cmd == u'test':
self.test()
else:
error(u'Unknown command "{}"'.format(cmd))
def worker(self):
from ckan.lib.jobs import Worker
Worker(self.args).work(burst=self.options.burst)
def list(self):
data_dict = {
u'queues': self.args,
}
jobs = p.toolkit.get_action(u'job_list')({}, data_dict)
for job in jobs:
if job[u'title'] is None:
job[u'title'] = ''
else:
job[u'title'] = u'"{}"'.format(job[u'title'])
print(u'{created} {id} {queue} {title}'.format(**job))
def show(self):
if not self.args:
error(u'You must specify a job ID')
id = self.args[0]
try:
job = p.toolkit.get_action(u'job_show')({}, {u'id': id})
except logic.NotFound:
error(u'There is no job with ID "{}"'.format(id))
print(u'ID: {}'.format(job[u'id']))
if job[u'title'] is None:
title = u'None'
else:
title = u'"{}"'.format(job[u'title'])
print(u'Title: {}'.format(title))
print(u'Created: {}'.format(job[u'created']))
print(u'Queue: {}'.format(job[u'queue']))
def cancel(self):
if not self.args:
error(u'You must specify a job ID')
id = self.args[0]
try:
p.toolkit.get_action(u'job_cancel')({}, {u'id': id})
except logic.NotFound:
error(u'There is no job with ID "{}"'.format(id))
print(u'Cancelled job {}'.format(id))
def clear(self):
data_dict = {
u'queues': self.args,
}
queues = p.toolkit.get_action(u'job_clear')({}, data_dict)
queues = (u'"{}"'.format(q) for q in queues)
print(u'Cleared queue(s) {}'.format(u', '.join(queues)))
def test(self):
from ckan.lib.jobs import DEFAULT_QUEUE_NAME, enqueue, test_job
for queue in (self.args or [DEFAULT_QUEUE_NAME]):
job = enqueue(test_job, [u'A test job'], title=u'A test job', queue=queue)
print(u'Added test job {} to queue "{}"'.format(job.id, queue))
|
pyunit_h2oshutdown_DEPRECATED.py
|
from __future__ import print_function
import sys
sys.path.insert(1,"../../../")
from tests import pyunit_utils
import h2o
import threading
from h2o.utils.typechecks import assert_is_type
def h2oshutdown():
"""
Python API test: h2o.shutdown(prompt=False)
Deprecated, use h2o.cluster().shutdown()
"""
try:
bthread = threading.Thread(target=call_badshutdown())
bthread.daemon=True
bthread.start()
bthread.join(1.0)
except Exception as e:
print("*** Error in thread is caught=> ")
print(e) # if we see this warning message, the error is caught correctly
assert_is_type(e, TypeError)
assert "badparam" in e.args[0], "h2o.shutdown() command is not working."
try:
thread = threading.Thread(target=call_shutdown)
thread.daemon =True
thread.start()
thread.join(1.0)
except Exception as e:
assert False, "h2o.shutdown() command is not working."
def call_shutdown():
h2o.shutdown(prompt=True) # call shutdown but do not actually shut anything down.
def call_badshutdown(): # added this test per Pasha request. Want to see error from one thread will pass on exception
h2o.shutdown(badparam=1, prompt=True)
if __name__ == "__main__":
pyunit_utils.standalone_test(h2oshutdown)
else:
h2oshutdown()
|
multibot_testing.py
|
import template_tool_testing
import template_ghost_bot
from threading import Thread
"""
Each value in the bots dict is like so:
token: Discord token.
is_bot: If the token is for a bot account or user account.
client: Function to call to initialize our bot's client and execute its code
"""
bots = {
"DarkElement":("token", False, template_tool_testing.main)
}
for bot in bots.keys():
"""
Spawn a new thread for this bot.
"""
token, is_bot, client = bots[bot]
Thread(target=client, args=(token, is_bot)).start()
|
kegdata.py
|
#!/usr/bin/python
# coding: UTF-8
# kegdata service to read about key status
# Written by: Ron Ritchey
import json, threading, logging, queue, time, getopt, sys, logging
import RPi.GPIO as GPIO
from .hx711 import HX711
# HOW TO CALCULATE THE REFFERENCE UNIT
# To set the reference unit to 1. Put 1kg on your sensor or anything you have and know exactly how much it weights.
# In this case, 92 is 1 gram because, with 1 as a reference unit I got numbers near 0 without any weight
# and I got numbers around 184000 when I added 2kg. So, according to the rule of thirds:
# If 2000 grams is 184000 then 1000 grams is 184000 / 2000 = 92.
#hx.set_reference_unit(1)
#hx.set_reference_unit(92)
#hx.set_reference_unit(10772)
class kegdata():
kegdata_init = {
'name':"Sharon's Stout",
'description':'Rich Chocolate and Coffee Flavor',
'ABV':7.5,
'IBU':23,
'weight':320
}
varcheck = {
'unicode':
[
'name',
'description',
],
'int':
[
'weight',
],
'float':
[
'ABV',
'IBU',
]
}
def __init__(self, q):
self.dataqueue = q
self.kegdata = self.kegdata_init
self.kegdata_prev = { }
print("Initializing keg data service")
self.hx = HX711(4,17)
self.hx.set_reading_format("LSB", "MSB")
self.hx.set_reference_unit(673)
self.hx.reset()
self.hx.tare()
# Now set up a thread to listen to the channel and update our data when
# the channel indicates a relevant key has changed
data_t = threading.Thread(target=self.run)
data_t.daemon = True
data_t.start()
# self.server = server
# self.port = port
# self.pwd = pwd
# self.connection_failed = 0
#
# self.dataclient = None
# Now set up a thread to listen to the channel and update our data when
# the channel indicates a relevant key has changed
# data_t = threading.Thread(target=self.run)
# data_t.daemon = True
# data_t.start()
def validatekegvars(self, vars):
for vtype, members in self.varcheck.items():
if vtype == 'unicode':
for v in members:
try:
if type(vars[v]) is str:
continue
if type(vars[v]) is None:
vars[v] = ""
elif type(vars[v]) is str:
logging.debug("Received string in {0}. Converting to Unicode".format(v))
vars[v] = vars[v].decode()
else:
# This happens so often when playing from webradio that I'm disabling logging for now.
# logging.debug(u"Received non-string type {0} in {1}. Converting to null".format(type(vars[v]),v))
vars[v] = ""
except KeyError:
logging.debug("Missing required value {0}. Adding empty version".format(v))
vars[v] = ""
elif vtype == 'bool':
for v in members:
try:
if type(vars[v]) is bool:
continue
if type(vars[v]) is None:
vars[v] = False
elif type(vars[v]) is int:
logging.debug("Received integer in {0}. Converting to boolean".format(v))
vars[v] = bool(vars[v])
else:
logging.debug("Received non-bool type {0} in {1}. Converting to False".format(type(vars[v]),v))
vars[v] = False
except KeyError:
logging.debug("Missing required value {0}. Adding empty version".format(v))
vars[v] = False
elif vtype == 'int':
for v in members:
try:
if type(vars[v]) is int:
continue
if type(vars[v]) is None:
vars[v] = 0
elif type(vars[v]) is bool:
logging.debug("Received boolean in {0}. Converting to integer".format(v))
vars[v] = int(vars[v])
else:
logging.debug("Received non-integer type {0} in {1}. Converting to 0".format(type(vars[v]),v))
vars[v] = 0
except KeyError:
logging.debug("Missing required value {0}. Adding empty version".format(v))
vars[v] = 0
# def connect(self):
#
# # Try up to 10 times to connect to REDIS
# self.connection_failed = 0
#
# logging.debug(u"Connecting to Rune Redis service on {0}:{1}".format(self.server, self.port))
#
# while True:
# if self.connection_failed >= 10:
# logging.debug(u"Could not connect to Rune Redis service")
# raise RuntimeError(u"Could not connect to Rune Redis service")
# try:
# # Connection to REDIS
# client = redis.StrictRedis(self.server, self.port, self.pwd)
#
# # Configure REDIS to send keyspace messages for set events
# client.config_set(u'notify-keyspace-events', u'KEA')
# self.dataclient = client
# logging.debug(u"Connected to Rune Redis service")
# break
# except:
# self.dataclient = None
# self.connection_failed += 1
# time.sleep(1)
#
#
# def subscribe(self):
# # Try to subscribe. If you fail, reconnect and try again.
# # If you fail, allow the resulting exception to be passed on.
#
# try:
# # Create a pubsub to receive messages
# self.pubsub = self.dataclient.pubsub(ignore_subscribe_messages=True)
#
# # Subscribe to act_player_info keyspace events
# self.pubsub.psubscribe(u'__key*__:act_player_info')
# except redis.ConnectionError:
# self.connect()
#
# # Try again to subscribe
# # Create a pubsub to receive messages
# self.pubsub = self.dataclient.pubsub(ignore_subscribe_messages=True)
#
# # Subscribe to act_player_info keyspace events
# self.pubsub.subscribe(u'__key*__:act_player_info')
def run(self):
logging.debug("kegdata service starting")
while True:
# if self.dataclient is None:
# try:
# # Try to connect
# self.connect()
# self.subscribe()
# self.status()
# self.sendUpdate()
# except (redis.ConnectionError, RuntimeError):
# self.dataclient = None
# # On connection error, sleep 5 and then return to top and try again
# time.sleep(5)
# continue
# try:
# # Wait for notice that key has changed
# msg = self.pubsub.get_message()
# if msg:
# # act_player_info key event occured
# self.status()
# self.sendUpdate()
# time.sleep(.01)
# except (redis.ConnectionError, RuntimeError):
# # if we lose our connection while trying to query DB
# # sleep 5 and then return to top to try again
# self.dataclient = None
# logging.debug(u"Could not get status from Rune Redis service")
# time.sleep(5)
# continue
self.status()
self.sendUpdate()
time.sleep(5)
def status(self):
# Read kegplayer status and update kegdata
# Update keg variables
self.kegdata['name'] = "Sharon's Stout"
self.kegdata['description'] = "Rich Chocolate and Coffee Flavor"
self.kegdata['ABV'] = 7.5
self.kegdata['IBU'] = 23
self.kegdata['weight'] = int(self.hx.get_weight(10))
print("Weight is {0} in oz".format(self.kegdata['weight']))
self.hx.power_down()
self.hx.power_up()
self.validatekegvars(self.kegdata)
def sendUpdate(self):
# Figure out what has changed and then send just those values across dataqueue
md = { }
for k, v in self.kegdata.items():
pv = self.kegdata_prev[k] if k in self.kegdata_prev else None
if pv != v:
md[k] = v
# Send md to queue if anything has changed
if len(md) > 0:
# # elapsed is special as it needs to be sent to guarantee that the timer gets updated correctly. Even if it hasn't changed, send it anyway
# md[u'elapsed'] = self.kegdata[u'elapsed']
self.dataqueue.put(md)
# Update kegdata_prev
self.kegdata_prev = self.kegdata.copy()
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s:%(levelname)s:%(message)s', filename='kegdata.log', level=logging.DEBUG)
logging.getLogger().addHandler(logging.StreamHandler())
# try:
# opts, args = getopt.getopt(sys.argv[1:],u"hs:p:w:",[u"server=",u"port=",u"pwd="])
# except getopt.GetoptError:
# print u'kegdata_rune.py -s <server> -p <port> -w <password>'
# sys.exit(2)
# Set defaults
# server = u'localhost'
# port = 6379
# pwd= u''
# for opt, arg in opts:
# if opt == u'-h':
# print u'kegdata_rune.py -s <server> -p <port> -w <password>'
# sys.exit()
# elif opt in (u"-s", u"--server"):
# server = arg
# elif opt in (u"-p", u"--port"):
# port = arg
# elif opt in (u"-w", u"--pwd"):
# pwd = arg
import sys
q = queue.Queue()
kd = kegdata(q)
try:
start = time.time()
while True:
if start+120 < time.time():
break;
try:
item = q.get(timeout=1000)
print("++++++++++")
for k,v in item.items():
print("[{0}] '{1}' type {2}".format(k,v,type(v)))
print("++++++++++")
print()
q.task_done()
except queue.Empty:
pass
except KeyboardInterrupt:
print('')
pass
print("Exiting...")
|
compat.py
|
import inspect
import threading
import logging
import sys
from xml.etree import cElementTree as ET
import os
if sys.version_info < (3,):
# python 2
import Queue as queue
else:
# python 3
import queue
if sys.version_info < (3,):
# python 2
str_ = basestring
def clean_repr(x):
if isinstance(x, unicode):
return repr(x)[1:]
else:
return repr(x)
else:
# python 3
str_ = str
clean_repr = repr
if sys.version_info < (3,):
# python 2
long_ = long # pylint:disable=invalid-name
else:
# python 3
long_ = int # pylint:disable=invalid-name
try:
# ruamel exists, us this OrderedDict as it is faster
from ruamel.ordereddict import ordereddict as OrderedDict
except ImportError:
# Fallback to slower collections one
from collections import OrderedDict
def get_profiler_dir():
return os.environ.get("PYMALCOLM_PROFILER_DIR", "/tmp/imalcolm_profiles")
def get_stack_size():
return int(os.environ.get("PYMALCOLM_STACK_SIZE", "0"))
def getargspec(f):
if sys.version_info < (3,):
args, varargs, keywords, defaults = inspect.getargspec(f)
else:
# Need to use fullargspec in case there are annotations
args, varargs, keywords, defaults = inspect.getfullargspec(f)[:4]
return inspect.ArgSpec(args, varargs, keywords, defaults)
def et_to_string(element):
# type: (ET.Element) -> str
xml = '<?xml version="1.0" ?>'
try:
xml += ET.tostring(element, encoding="unicode")
except LookupError:
xml += ET.tostring(element)
return xml
# Exception handling from future.utils
if sys.version_info < (3,):
exec('''
def raise_with_traceback(exc, traceback=Ellipsis):
if traceback == Ellipsis:
_, _, traceback = sys.exc_info()
raise exc, None, traceback
''')
else:
def raise_with_traceback(exc, traceback=Ellipsis):
if traceback == Ellipsis:
_, _, traceback = sys.exc_info()
raise exc.with_traceback(traceback)
try:
# Python2
from thread import get_ident as get_thread_ident
except ImportError:
# Python3
from threading import get_ident as get_thread_ident
try:
# Python3
from logging import QueueHandler
except ImportError:
# Python2
class QueueHandler(logging.Handler):
"""Cut down version of the QueueHandler in Python3"""
def __init__(self, queue):
logging.Handler.__init__(self)
self.queue = queue
def prepare(self, record):
# The format operation gets traceback text into record.exc_text
# (if there's exception data), and also puts the message into
# record.message. We can then use this to replace the original
# msg + args, as these might be unpickleable. We also zap the
# exc_info attribute, as it's no longer needed and, if not None,
# will typically not be pickleable.
self.format(record)
record.msg = record.message
record.args = None
record.exc_info = None
return record
def emit(self, record):
try:
self.queue.put_nowait(self.prepare(record))
except Exception:
self.handleError(record)
if sys.version_info < (3, 5):
# Python2 and old Python3 without respect_handler_level
class QueueListener(object):
"""Cut down version of the QueueHandler in Python3.5"""
_sentinel = None
def __init__(self, queue, *handlers, **kwargs):
self.queue = queue
self.handlers = handlers
self._thread = None
def start(self):
self._thread = threading.Thread(target=self._monitor)
self._thread.daemon = True
self._thread.start()
def handle(self, record):
"""
Handle a record.
This just loops through the handlers offering them the record
to handle.
"""
for handler in self.handlers:
if record.levelno >= handler.level:
handler.handle(record)
def _monitor(self):
"""
Monitor the queue for records, and ask the handler
to deal with them.
This method runs on a separate, internal thread.
The thread will terminate if it sees a sentinel object in the queue.
"""
q = self.queue
has_task_done = hasattr(q, 'task_done')
while True:
record = q.get(True)
if record is self._sentinel:
break
self.handle(record)
if has_task_done:
q.task_done()
def stop(self):
self.queue.put_nowait(self._sentinel)
self._thread.join()
self._thread = None
else:
# Python3.5 introduced respect_handler_level
from logging.handlers import QueueListener
|
lsbook.py
|
import json
import logging
import os
from concurrent.futures.process import ProcessPoolExecutor
from threading import Thread
from urllib import request
from LsBook.utils.fs import copytree, rmdir
from . import __version__
from .models.book import Book
from .output.generateBook import generateBook
from .utils.argument import cmd_argument
from .utils.logger import log_init
from .utils.path import get_pure_path
msg = None
def query_version():
try:
global msg
r = request.urlopen('https://pypi.org/pypi/lsbook/json', timeout=2)
version = json.loads(r.read().decode('utf-8')).get("info").get("version")
for x, y in zip(version.split("."), __version__.split(".")):
if int(x) < int(y):
break
elif int(x) == int(y):
continue
msg = f"\n当前版本:{__version__}\t已发布最新版本:{version}\n请使用命令\t'pip install -U lsbook'\t升级"
except:
pass
def main(debug=False):
th = Thread(target=query_version)
if not debug:
th.start()
args = cmd_argument()
build: bool = args.build
book_path: str = args.book
book_output: str = args.output
log_level: str = args.log
base_assets = args.base_assets
assets = args.assets
log_init(log_level)
logging.debug(f"入参:{args}")
try:
if build:
logging.info("开始生成书籍")
if debug:
pool = ProcessPoolExecutor(1)
else:
pool = ProcessPoolExecutor()
book = Book(book_path, book_output, pool, base_assets)
# 生成书籍
generateBook(book)
elif assets:
out = get_pure_path(assets, "lsbook")
logging.info(f"释放资源:{out}")
rmdir(out)
copytree(get_pure_path(os.path.dirname(__file__), "assets", "lsbook"), out)
logging.info(f"释放资源完毕")
else:
logging.warning("lsbook 查看帮助")
finally:
if not debug:
th.join()
if msg:
logging.warning(msg)
if __name__ == '__main__':
main()
|
ble_scanner.py
|
#!/usr/bin/env python
import datetime
import requests
import sys
import time
from subprocess import PIPE, Popen
from threading import Thread
from Queue import Queue, Empty
SCANNER_NAME = 'SCANNER0';
ON_POSIX = 'posix' in sys.builtin_module_names
def enqueue_output(out, queue):
for line in iter(out.readline, b''):
queue.put(line)
out.close()
p = Popen(['hcitool', 'lescan', '--duplicate'], stdout=PIPE, bufsize=1, close_fds=ON_POSIX)
q = Queue()
t = Thread(target=enqueue_output, args=(p.stdout, q))
t.daemon = True # thread dies with the program
t.start()
def post_scan(mac_address, device_name):
data = {
'type': 'ble',
'rssi': -100,
'macAddress': mac_address,
'deviceName': device_name,
'source': SCANNER_NAME,
}
requests.post('https://straylight.jp/connect/hooks/blescan', data=data)
while p.poll() is None:
try:
line = q.get_nowait()
except Empty:
time.sleep(0.01)
continue
else:
parts = line.split()
mac_address = parts[0]
device_name = parts[1]
if device_name.startswith('SLBeacon'):
post_scan(mac_address, device_name)
|
test_decimal.py
|
# Copyright (c) 2004 Python Software Foundation.
# All rights reserved.
# Written by Eric Price <eprice at tjhsst.edu>
# and Facundo Batista <facundo at taniquetil.com.ar>
# and Raymond Hettinger <python at rcn.com>
# and Aahz (aahz at pobox.com)
# and Tim Peters
"""
These are the test cases for the Decimal module.
There are two groups of tests, Arithmetic and Behaviour. The former test
the Decimal arithmetic using the tests provided by Mike Cowlishaw. The latter
test the pythonic behaviour according to PEP 327.
Cowlishaw's tests can be downloaded from:
www2.hursley.ibm.com/decimal/dectest.zip
This test module can be called from command line with one parameter (Arithmetic
or Behaviour) to test each part, or without parameter to test both parts. If
you're working through IDLE, you can import this test module and call test_main()
with the corresponding argument.
"""
import math
import os, sys
import operator
import warnings
import pickle, copy
import unittest
from decimal import *
import numbers
from test.support import (run_unittest, run_doctest, is_resource_enabled,
requires_IEEE_754)
from test.support import check_warnings
import random
try:
import threading
except ImportError:
threading = None
# Useful Test Constant
Signals = tuple(getcontext().flags.keys())
# Signals ordered with respect to precedence: when an operation
# produces multiple signals, signals occurring later in the list
# should be handled before those occurring earlier in the list.
OrderedSignals = (Clamped, Rounded, Inexact, Subnormal,
Underflow, Overflow, DivisionByZero, InvalidOperation)
# Tests are built around these assumed context defaults.
# test_main() restores the original context.
def init():
global ORIGINAL_CONTEXT
ORIGINAL_CONTEXT = getcontext().copy()
DefaultTestContext = Context(
prec = 9,
rounding = ROUND_HALF_EVEN,
traps = dict.fromkeys(Signals, 0)
)
setcontext(DefaultTestContext)
TESTDATADIR = 'decimaltestdata'
if __name__ == '__main__':
file = sys.argv[0]
else:
file = __file__
testdir = os.path.dirname(file) or os.curdir
directory = testdir + os.sep + TESTDATADIR + os.sep
skip_expected = not os.path.isdir(directory)
# list of individual .decTest test ids that correspond to tests that
# we're skipping for one reason or another.
skipped_test_ids = set([
# Skip implementation-specific scaleb tests.
'scbx164',
'scbx165',
# For some operations (currently exp, ln, log10, power), the decNumber
# reference implementation imposes additional restrictions on the context
# and operands. These restrictions are not part of the specification;
# however, the effect of these restrictions does show up in some of the
# testcases. We skip testcases that violate these restrictions, since
# Decimal behaves differently from decNumber for these testcases so these
# testcases would otherwise fail.
'expx901',
'expx902',
'expx903',
'expx905',
'lnx901',
'lnx902',
'lnx903',
'lnx905',
'logx901',
'logx902',
'logx903',
'logx905',
'powx1183',
'powx1184',
'powx4001',
'powx4002',
'powx4003',
'powx4005',
'powx4008',
'powx4010',
'powx4012',
'powx4014',
])
# Make sure it actually raises errors when not expected and caught in flags
# Slower, since it runs some things several times.
EXTENDEDERRORTEST = False
#Map the test cases' error names to the actual errors
ErrorNames = {'clamped' : Clamped,
'conversion_syntax' : InvalidOperation,
'division_by_zero' : DivisionByZero,
'division_impossible' : InvalidOperation,
'division_undefined' : InvalidOperation,
'inexact' : Inexact,
'invalid_context' : InvalidOperation,
'invalid_operation' : InvalidOperation,
'overflow' : Overflow,
'rounded' : Rounded,
'subnormal' : Subnormal,
'underflow' : Underflow}
def Nonfunction(*args):
"""Doesn't do anything."""
return None
RoundingDict = {'ceiling' : ROUND_CEILING, #Maps test-case names to roundings.
'down' : ROUND_DOWN,
'floor' : ROUND_FLOOR,
'half_down' : ROUND_HALF_DOWN,
'half_even' : ROUND_HALF_EVEN,
'half_up' : ROUND_HALF_UP,
'up' : ROUND_UP,
'05up' : ROUND_05UP}
# Name adapter to be able to change the Decimal and Context
# interface without changing the test files from Cowlishaw
nameAdapter = {'and':'logical_and',
'apply':'_apply',
'class':'number_class',
'comparesig':'compare_signal',
'comparetotal':'compare_total',
'comparetotmag':'compare_total_mag',
'copy':'copy_decimal',
'copyabs':'copy_abs',
'copynegate':'copy_negate',
'copysign':'copy_sign',
'divideint':'divide_int',
'invert':'logical_invert',
'iscanonical':'is_canonical',
'isfinite':'is_finite',
'isinfinite':'is_infinite',
'isnan':'is_nan',
'isnormal':'is_normal',
'isqnan':'is_qnan',
'issigned':'is_signed',
'issnan':'is_snan',
'issubnormal':'is_subnormal',
'iszero':'is_zero',
'maxmag':'max_mag',
'minmag':'min_mag',
'nextminus':'next_minus',
'nextplus':'next_plus',
'nexttoward':'next_toward',
'or':'logical_or',
'reduce':'normalize',
'remaindernear':'remainder_near',
'samequantum':'same_quantum',
'squareroot':'sqrt',
'toeng':'to_eng_string',
'tointegral':'to_integral_value',
'tointegralx':'to_integral_exact',
'tosci':'to_sci_string',
'xor':'logical_xor',
}
# The following functions return True/False rather than a Decimal instance
LOGICAL_FUNCTIONS = (
'is_canonical',
'is_finite',
'is_infinite',
'is_nan',
'is_normal',
'is_qnan',
'is_signed',
'is_snan',
'is_subnormal',
'is_zero',
'same_quantum',
)
class DecimalTest(unittest.TestCase):
"""Class which tests the Decimal class against the test cases.
Changed for unittest.
"""
def setUp(self):
self.context = Context()
self.ignore_list = ['#']
# Basically, a # means return NaN InvalidOperation.
# Different from a sNaN in trim
self.ChangeDict = {'precision' : self.change_precision,
'rounding' : self.change_rounding_method,
'maxexponent' : self.change_max_exponent,
'minexponent' : self.change_min_exponent,
'clamp' : self.change_clamp}
def eval_file(self, file):
global skip_expected
if skip_expected:
raise unittest.SkipTest
return
with open(file) as f:
for line in f:
line = line.replace('\r\n', '').replace('\n', '')
#print line
try:
t = self.eval_line(line)
except DecimalException as exception:
#Exception raised where there shoudn't have been one.
self.fail('Exception "'+exception.__class__.__name__ + '" raised on line '+line)
return
def eval_line(self, s):
if s.find(' -> ') >= 0 and s[:2] != '--' and not s.startswith(' --'):
s = (s.split('->')[0] + '->' +
s.split('->')[1].split('--')[0]).strip()
else:
s = s.split('--')[0].strip()
for ignore in self.ignore_list:
if s.find(ignore) >= 0:
#print s.split()[0], 'NotImplemented--', ignore
return
if not s:
return
elif ':' in s:
return self.eval_directive(s)
else:
return self.eval_equation(s)
def eval_directive(self, s):
funct, value = (x.strip().lower() for x in s.split(':'))
if funct == 'rounding':
value = RoundingDict[value]
else:
try:
value = int(value)
except ValueError:
pass
funct = self.ChangeDict.get(funct, Nonfunction)
funct(value)
def eval_equation(self, s):
#global DEFAULT_PRECISION
#print DEFAULT_PRECISION
if not TEST_ALL and random.random() < 0.90:
return
try:
Sides = s.split('->')
L = Sides[0].strip().split()
id = L[0]
if DEBUG:
print("Test ", id, end=" ")
funct = L[1].lower()
valstemp = L[2:]
L = Sides[1].strip().split()
ans = L[0]
exceptions = L[1:]
except (TypeError, AttributeError, IndexError):
raise InvalidOperation
def FixQuotes(val):
val = val.replace("''", 'SingleQuote').replace('""', 'DoubleQuote')
val = val.replace("'", '').replace('"', '')
val = val.replace('SingleQuote', "'").replace('DoubleQuote', '"')
return val
if id in skipped_test_ids:
return
fname = nameAdapter.get(funct, funct)
if fname == 'rescale':
return
funct = getattr(self.context, fname)
vals = []
conglomerate = ''
quote = 0
theirexceptions = [ErrorNames[x.lower()] for x in exceptions]
for exception in Signals:
self.context.traps[exception] = 1 #Catch these bugs...
for exception in theirexceptions:
self.context.traps[exception] = 0
for i, val in enumerate(valstemp):
if val.count("'") % 2 == 1:
quote = 1 - quote
if quote:
conglomerate = conglomerate + ' ' + val
continue
else:
val = conglomerate + val
conglomerate = ''
v = FixQuotes(val)
if fname in ('to_sci_string', 'to_eng_string'):
if EXTENDEDERRORTEST:
for error in theirexceptions:
self.context.traps[error] = 1
try:
funct(self.context.create_decimal(v))
except error:
pass
except Signals as e:
self.fail("Raised %s in %s when %s disabled" % \
(e, s, error))
else:
self.fail("Did not raise %s in %s" % (error, s))
self.context.traps[error] = 0
v = self.context.create_decimal(v)
else:
v = Decimal(v, self.context)
vals.append(v)
ans = FixQuotes(ans)
if EXTENDEDERRORTEST and fname not in ('to_sci_string', 'to_eng_string'):
for error in theirexceptions:
self.context.traps[error] = 1
try:
funct(*vals)
except error:
pass
except Signals as e:
self.fail("Raised %s in %s when %s disabled" % \
(e, s, error))
else:
self.fail("Did not raise %s in %s" % (error, s))
self.context.traps[error] = 0
# as above, but add traps cumulatively, to check precedence
ordered_errors = [e for e in OrderedSignals if e in theirexceptions]
for error in ordered_errors:
self.context.traps[error] = 1
try:
funct(*vals)
except error:
pass
except Signals as e:
self.fail("Raised %s in %s; expected %s" %
(type(e), s, error))
else:
self.fail("Did not raise %s in %s" % (error, s))
# reset traps
for error in ordered_errors:
self.context.traps[error] = 0
if DEBUG:
print("--", self.context)
try:
result = str(funct(*vals))
if fname in LOGICAL_FUNCTIONS:
result = str(int(eval(result))) # 'True', 'False' -> '1', '0'
except Signals as error:
self.fail("Raised %s in %s" % (error, s))
except: #Catch any error long enough to state the test case.
print("ERROR:", s)
raise
myexceptions = self.getexceptions()
self.context.clear_flags()
myexceptions.sort(key=repr)
theirexceptions.sort(key=repr)
self.assertEqual(result, ans,
'Incorrect answer for ' + s + ' -- got ' + result)
self.assertEqual(myexceptions, theirexceptions,
'Incorrect flags set in ' + s + ' -- got ' + str(myexceptions))
return
def getexceptions(self):
return [e for e in Signals if self.context.flags[e]]
def change_precision(self, prec):
self.context.prec = prec
def change_rounding_method(self, rounding):
self.context.rounding = rounding
def change_min_exponent(self, exp):
self.context.Emin = exp
def change_max_exponent(self, exp):
self.context.Emax = exp
def change_clamp(self, clamp):
self.context.clamp = clamp
# The following classes test the behaviour of Decimal according to PEP 327
class DecimalExplicitConstructionTest(unittest.TestCase):
'''Unit tests for Explicit Construction cases of Decimal.'''
def test_explicit_empty(self):
self.assertEqual(Decimal(), Decimal("0"))
def test_explicit_from_None(self):
self.assertRaises(TypeError, Decimal, None)
def test_explicit_from_int(self):
#positive
d = Decimal(45)
self.assertEqual(str(d), '45')
#very large positive
d = Decimal(500000123)
self.assertEqual(str(d), '500000123')
#negative
d = Decimal(-45)
self.assertEqual(str(d), '-45')
#zero
d = Decimal(0)
self.assertEqual(str(d), '0')
def test_explicit_from_string(self):
#empty
self.assertEqual(str(Decimal('')), 'NaN')
#int
self.assertEqual(str(Decimal('45')), '45')
#float
self.assertEqual(str(Decimal('45.34')), '45.34')
#engineer notation
self.assertEqual(str(Decimal('45e2')), '4.5E+3')
#just not a number
self.assertEqual(str(Decimal('ugly')), 'NaN')
#leading and trailing whitespace permitted
self.assertEqual(str(Decimal('1.3E4 \n')), '1.3E+4')
self.assertEqual(str(Decimal(' -7.89')), '-7.89')
def test_explicit_from_tuples(self):
#zero
d = Decimal( (0, (0,), 0) )
self.assertEqual(str(d), '0')
#int
d = Decimal( (1, (4, 5), 0) )
self.assertEqual(str(d), '-45')
#float
d = Decimal( (0, (4, 5, 3, 4), -2) )
self.assertEqual(str(d), '45.34')
#weird
d = Decimal( (1, (4, 3, 4, 9, 1, 3, 5, 3, 4), -25) )
self.assertEqual(str(d), '-4.34913534E-17')
#wrong number of items
self.assertRaises(ValueError, Decimal, (1, (4, 3, 4, 9, 1)) )
#bad sign
self.assertRaises(ValueError, Decimal, (8, (4, 3, 4, 9, 1), 2) )
self.assertRaises(ValueError, Decimal, (0., (4, 3, 4, 9, 1), 2) )
self.assertRaises(ValueError, Decimal, (Decimal(1), (4, 3, 4, 9, 1), 2))
#bad exp
self.assertRaises(ValueError, Decimal, (1, (4, 3, 4, 9, 1), 'wrong!') )
self.assertRaises(ValueError, Decimal, (1, (4, 3, 4, 9, 1), 0.) )
self.assertRaises(ValueError, Decimal, (1, (4, 3, 4, 9, 1), '1') )
#bad coefficients
self.assertRaises(ValueError, Decimal, (1, (4, 3, 4, None, 1), 2) )
self.assertRaises(ValueError, Decimal, (1, (4, -3, 4, 9, 1), 2) )
self.assertRaises(ValueError, Decimal, (1, (4, 10, 4, 9, 1), 2) )
self.assertRaises(ValueError, Decimal, (1, (4, 3, 4, 'a', 1), 2) )
def test_explicit_from_bool(self):
self.assertIs(bool(Decimal(0)), False)
self.assertIs(bool(Decimal(1)), True)
self.assertEqual(Decimal(False), Decimal(0))
self.assertEqual(Decimal(True), Decimal(1))
def test_explicit_from_Decimal(self):
#positive
d = Decimal(45)
e = Decimal(d)
self.assertEqual(str(e), '45')
self.assertNotEqual(id(d), id(e))
#very large positive
d = Decimal(500000123)
e = Decimal(d)
self.assertEqual(str(e), '500000123')
self.assertNotEqual(id(d), id(e))
#negative
d = Decimal(-45)
e = Decimal(d)
self.assertEqual(str(e), '-45')
self.assertNotEqual(id(d), id(e))
#zero
d = Decimal(0)
e = Decimal(d)
self.assertEqual(str(e), '0')
self.assertNotEqual(id(d), id(e))
@requires_IEEE_754
def test_explicit_from_float(self):
r = Decimal(0.1)
self.assertEqual(type(r), Decimal)
self.assertEqual(str(r),
'0.1000000000000000055511151231257827021181583404541015625')
self.assertTrue(Decimal(float('nan')).is_qnan())
self.assertTrue(Decimal(float('inf')).is_infinite())
self.assertTrue(Decimal(float('-inf')).is_infinite())
self.assertEqual(str(Decimal(float('nan'))),
str(Decimal('NaN')))
self.assertEqual(str(Decimal(float('inf'))),
str(Decimal('Infinity')))
self.assertEqual(str(Decimal(float('-inf'))),
str(Decimal('-Infinity')))
self.assertEqual(str(Decimal(float('-0.0'))),
str(Decimal('-0')))
for i in range(200):
x = random.expovariate(0.01) * (random.random() * 2.0 - 1.0)
self.assertEqual(x, float(Decimal(x))) # roundtrip
def test_explicit_context_create_decimal(self):
nc = copy.copy(getcontext())
nc.prec = 3
# empty
d = Decimal()
self.assertEqual(str(d), '0')
d = nc.create_decimal()
self.assertEqual(str(d), '0')
# from None
self.assertRaises(TypeError, nc.create_decimal, None)
# from int
d = nc.create_decimal(456)
self.assertIsInstance(d, Decimal)
self.assertEqual(nc.create_decimal(45678),
nc.create_decimal('457E+2'))
# from string
d = Decimal('456789')
self.assertEqual(str(d), '456789')
d = nc.create_decimal('456789')
self.assertEqual(str(d), '4.57E+5')
# leading and trailing whitespace should result in a NaN;
# spaces are already checked in Cowlishaw's test-suite, so
# here we just check that a trailing newline results in a NaN
self.assertEqual(str(nc.create_decimal('3.14\n')), 'NaN')
# from tuples
d = Decimal( (1, (4, 3, 4, 9, 1, 3, 5, 3, 4), -25) )
self.assertEqual(str(d), '-4.34913534E-17')
d = nc.create_decimal( (1, (4, 3, 4, 9, 1, 3, 5, 3, 4), -25) )
self.assertEqual(str(d), '-4.35E-17')
# from Decimal
prevdec = Decimal(500000123)
d = Decimal(prevdec)
self.assertEqual(str(d), '500000123')
d = nc.create_decimal(prevdec)
self.assertEqual(str(d), '5.00E+8')
def test_unicode_digits(self):
test_values = {
'\uff11': '1',
'\u0660.\u0660\u0663\u0667\u0662e-\u0663' : '0.0000372',
'-nan\u0c68\u0c6a\u0c66\u0c66' : '-NaN2400',
}
for input, expected in test_values.items():
self.assertEqual(str(Decimal(input)), expected)
class DecimalImplicitConstructionTest(unittest.TestCase):
'''Unit tests for Implicit Construction cases of Decimal.'''
def test_implicit_from_None(self):
self.assertRaises(TypeError, eval, 'Decimal(5) + None', globals())
def test_implicit_from_int(self):
#normal
self.assertEqual(str(Decimal(5) + 45), '50')
#exceeding precision
self.assertEqual(Decimal(5) + 123456789000, Decimal(123456789000))
def test_implicit_from_string(self):
self.assertRaises(TypeError, eval, 'Decimal(5) + "3"', globals())
def test_implicit_from_float(self):
self.assertRaises(TypeError, eval, 'Decimal(5) + 2.2', globals())
def test_implicit_from_Decimal(self):
self.assertEqual(Decimal(5) + Decimal(45), Decimal(50))
def test_rop(self):
# Allow other classes to be trained to interact with Decimals
class E:
def __divmod__(self, other):
return 'divmod ' + str(other)
def __rdivmod__(self, other):
return str(other) + ' rdivmod'
def __lt__(self, other):
return 'lt ' + str(other)
def __gt__(self, other):
return 'gt ' + str(other)
def __le__(self, other):
return 'le ' + str(other)
def __ge__(self, other):
return 'ge ' + str(other)
def __eq__(self, other):
return 'eq ' + str(other)
def __ne__(self, other):
return 'ne ' + str(other)
self.assertEqual(divmod(E(), Decimal(10)), 'divmod 10')
self.assertEqual(divmod(Decimal(10), E()), '10 rdivmod')
self.assertEqual(eval('Decimal(10) < E()'), 'gt 10')
self.assertEqual(eval('Decimal(10) > E()'), 'lt 10')
self.assertEqual(eval('Decimal(10) <= E()'), 'ge 10')
self.assertEqual(eval('Decimal(10) >= E()'), 'le 10')
self.assertEqual(eval('Decimal(10) == E()'), 'eq 10')
self.assertEqual(eval('Decimal(10) != E()'), 'ne 10')
# insert operator methods and then exercise them
oplist = [
('+', '__add__', '__radd__'),
('-', '__sub__', '__rsub__'),
('*', '__mul__', '__rmul__'),
('/', '__truediv__', '__rtruediv__'),
('%', '__mod__', '__rmod__'),
('//', '__floordiv__', '__rfloordiv__'),
('**', '__pow__', '__rpow__')
]
for sym, lop, rop in oplist:
setattr(E, lop, lambda self, other: 'str' + lop + str(other))
setattr(E, rop, lambda self, other: str(other) + rop + 'str')
self.assertEqual(eval('E()' + sym + 'Decimal(10)'),
'str' + lop + '10')
self.assertEqual(eval('Decimal(10)' + sym + 'E()'),
'10' + rop + 'str')
class DecimalFormatTest(unittest.TestCase):
'''Unit tests for the format function.'''
def test_formatting(self):
# triples giving a format, a Decimal, and the expected result
test_values = [
('e', '0E-15', '0e-15'),
('e', '2.3E-15', '2.3e-15'),
('e', '2.30E+2', '2.30e+2'), # preserve significant zeros
('e', '2.30000E-15', '2.30000e-15'),
('e', '1.23456789123456789e40', '1.23456789123456789e+40'),
('e', '1.5', '1.5e+0'),
('e', '0.15', '1.5e-1'),
('e', '0.015', '1.5e-2'),
('e', '0.0000000000015', '1.5e-12'),
('e', '15.0', '1.50e+1'),
('e', '-15', '-1.5e+1'),
('e', '0', '0e+0'),
('e', '0E1', '0e+1'),
('e', '0.0', '0e-1'),
('e', '0.00', '0e-2'),
('.6e', '0E-15', '0.000000e-9'),
('.6e', '0', '0.000000e+6'),
('.6e', '9.999999', '9.999999e+0'),
('.6e', '9.9999999', '1.000000e+1'),
('.6e', '-1.23e5', '-1.230000e+5'),
('.6e', '1.23456789e-3', '1.234568e-3'),
('f', '0', '0'),
('f', '0.0', '0.0'),
('f', '0E-2', '0.00'),
('f', '0.00E-8', '0.0000000000'),
('f', '0E1', '0'), # loses exponent information
('f', '3.2E1', '32'),
('f', '3.2E2', '320'),
('f', '3.20E2', '320'),
('f', '3.200E2', '320.0'),
('f', '3.2E-6', '0.0000032'),
('.6f', '0E-15', '0.000000'), # all zeros treated equally
('.6f', '0E1', '0.000000'),
('.6f', '0', '0.000000'),
('.0f', '0', '0'), # no decimal point
('.0f', '0e-2', '0'),
('.0f', '3.14159265', '3'),
('.1f', '3.14159265', '3.1'),
('.4f', '3.14159265', '3.1416'),
('.6f', '3.14159265', '3.141593'),
('.7f', '3.14159265', '3.1415926'), # round-half-even!
('.8f', '3.14159265', '3.14159265'),
('.9f', '3.14159265', '3.141592650'),
('g', '0', '0'),
('g', '0.0', '0.0'),
('g', '0E1', '0e+1'),
('G', '0E1', '0E+1'),
('g', '0E-5', '0.00000'),
('g', '0E-6', '0.000000'),
('g', '0E-7', '0e-7'),
('g', '-0E2', '-0e+2'),
('.0g', '3.14159265', '3'), # 0 sig fig -> 1 sig fig
('.1g', '3.14159265', '3'),
('.2g', '3.14159265', '3.1'),
('.5g', '3.14159265', '3.1416'),
('.7g', '3.14159265', '3.141593'),
('.8g', '3.14159265', '3.1415926'), # round-half-even!
('.9g', '3.14159265', '3.14159265'),
('.10g', '3.14159265', '3.14159265'), # don't pad
('%', '0E1', '0%'),
('%', '0E0', '0%'),
('%', '0E-1', '0%'),
('%', '0E-2', '0%'),
('%', '0E-3', '0.0%'),
('%', '0E-4', '0.00%'),
('.3%', '0', '0.000%'), # all zeros treated equally
('.3%', '0E10', '0.000%'),
('.3%', '0E-10', '0.000%'),
('.3%', '2.34', '234.000%'),
('.3%', '1.234567', '123.457%'),
('.0%', '1.23', '123%'),
('e', 'NaN', 'NaN'),
('f', '-NaN123', '-NaN123'),
('+g', 'NaN456', '+NaN456'),
('.3e', 'Inf', 'Infinity'),
('.16f', '-Inf', '-Infinity'),
('.0g', '-sNaN', '-sNaN'),
('', '1.00', '1.00'),
# test alignment and padding
('6', '123', ' 123'),
('<6', '123', '123 '),
('>6', '123', ' 123'),
('^6', '123', ' 123 '),
('=+6', '123', '+ 123'),
('#<10', 'NaN', 'NaN#######'),
('#<10', '-4.3', '-4.3######'),
('#<+10', '0.0130', '+0.0130###'),
('#< 10', '0.0130', ' 0.0130###'),
('@>10', '-Inf', '@-Infinity'),
('#>5', '-Inf', '-Infinity'),
('?^5', '123', '?123?'),
('%^6', '123', '%123%%'),
(' ^6', '-45.6', '-45.6 '),
('/=10', '-45.6', '-/////45.6'),
('/=+10', '45.6', '+/////45.6'),
('/= 10', '45.6', ' /////45.6'),
# thousands separator
(',', '1234567', '1,234,567'),
(',', '123456', '123,456'),
(',', '12345', '12,345'),
(',', '1234', '1,234'),
(',', '123', '123'),
(',', '12', '12'),
(',', '1', '1'),
(',', '0', '0'),
(',', '-1234567', '-1,234,567'),
(',', '-123456', '-123,456'),
('7,', '123456', '123,456'),
('8,', '123456', ' 123,456'),
('08,', '123456', '0,123,456'), # special case: extra 0 needed
('+08,', '123456', '+123,456'), # but not if there's a sign
(' 08,', '123456', ' 123,456'),
('08,', '-123456', '-123,456'),
('+09,', '123456', '+0,123,456'),
# ... with fractional part...
('07,', '1234.56', '1,234.56'),
('08,', '1234.56', '1,234.56'),
('09,', '1234.56', '01,234.56'),
('010,', '1234.56', '001,234.56'),
('011,', '1234.56', '0,001,234.56'),
('012,', '1234.56', '0,001,234.56'),
('08,.1f', '1234.5', '01,234.5'),
# no thousands separators in fraction part
(',', '1.23456789', '1.23456789'),
(',%', '123.456789', '12,345.6789%'),
(',e', '123456', '1.23456e+5'),
(',E', '123456', '1.23456E+5'),
# issue 6850
('a=-7.0', '0.12345', 'aaaa0.1'),
# Issue 7094: Alternate formatting (specified by #)
('.0e', '1.0', '1e+0'),
('#.0e', '1.0', '1.e+0'),
('.0f', '1.0', '1'),
('#.0f', '1.0', '1.'),
('g', '1.1', '1.1'),
('#g', '1.1', '1.1'),
('.0g', '1', '1'),
('#.0g', '1', '1.'),
('.0%', '1.0', '100%'),
('#.0%', '1.0', '100.%'),
]
for fmt, d, result in test_values:
self.assertEqual(format(Decimal(d), fmt), result)
def test_n_format(self):
try:
from locale import CHAR_MAX
except ImportError:
return
# Set up some localeconv-like dictionaries
en_US = {
'decimal_point' : '.',
'grouping' : [3, 3, 0],
'thousands_sep': ','
}
fr_FR = {
'decimal_point' : ',',
'grouping' : [CHAR_MAX],
'thousands_sep' : ''
}
ru_RU = {
'decimal_point' : ',',
'grouping' : [3, 3, 0],
'thousands_sep' : ' '
}
crazy = {
'decimal_point' : '&',
'grouping' : [1, 4, 2, CHAR_MAX],
'thousands_sep' : '-'
}
def get_fmt(x, locale, fmt='n'):
return Decimal.__format__(Decimal(x), fmt, _localeconv=locale)
self.assertEqual(get_fmt(Decimal('12.7'), en_US), '12.7')
self.assertEqual(get_fmt(Decimal('12.7'), fr_FR), '12,7')
self.assertEqual(get_fmt(Decimal('12.7'), ru_RU), '12,7')
self.assertEqual(get_fmt(Decimal('12.7'), crazy), '1-2&7')
self.assertEqual(get_fmt(123456789, en_US), '123,456,789')
self.assertEqual(get_fmt(123456789, fr_FR), '123456789')
self.assertEqual(get_fmt(123456789, ru_RU), '123 456 789')
self.assertEqual(get_fmt(1234567890123, crazy), '123456-78-9012-3')
self.assertEqual(get_fmt(123456789, en_US, '.6n'), '1.23457e+8')
self.assertEqual(get_fmt(123456789, fr_FR, '.6n'), '1,23457e+8')
self.assertEqual(get_fmt(123456789, ru_RU, '.6n'), '1,23457e+8')
self.assertEqual(get_fmt(123456789, crazy, '.6n'), '1&23457e+8')
# zero padding
self.assertEqual(get_fmt(1234, fr_FR, '03n'), '1234')
self.assertEqual(get_fmt(1234, fr_FR, '04n'), '1234')
self.assertEqual(get_fmt(1234, fr_FR, '05n'), '01234')
self.assertEqual(get_fmt(1234, fr_FR, '06n'), '001234')
self.assertEqual(get_fmt(12345, en_US, '05n'), '12,345')
self.assertEqual(get_fmt(12345, en_US, '06n'), '12,345')
self.assertEqual(get_fmt(12345, en_US, '07n'), '012,345')
self.assertEqual(get_fmt(12345, en_US, '08n'), '0,012,345')
self.assertEqual(get_fmt(12345, en_US, '09n'), '0,012,345')
self.assertEqual(get_fmt(12345, en_US, '010n'), '00,012,345')
self.assertEqual(get_fmt(123456, crazy, '06n'), '1-2345-6')
self.assertEqual(get_fmt(123456, crazy, '07n'), '1-2345-6')
self.assertEqual(get_fmt(123456, crazy, '08n'), '1-2345-6')
self.assertEqual(get_fmt(123456, crazy, '09n'), '01-2345-6')
self.assertEqual(get_fmt(123456, crazy, '010n'), '0-01-2345-6')
self.assertEqual(get_fmt(123456, crazy, '011n'), '0-01-2345-6')
self.assertEqual(get_fmt(123456, crazy, '012n'), '00-01-2345-6')
self.assertEqual(get_fmt(123456, crazy, '013n'), '000-01-2345-6')
class DecimalArithmeticOperatorsTest(unittest.TestCase):
'''Unit tests for all arithmetic operators, binary and unary.'''
def test_addition(self):
d1 = Decimal('-11.1')
d2 = Decimal('22.2')
#two Decimals
self.assertEqual(d1+d2, Decimal('11.1'))
self.assertEqual(d2+d1, Decimal('11.1'))
#with other type, left
c = d1 + 5
self.assertEqual(c, Decimal('-6.1'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 5 + d1
self.assertEqual(c, Decimal('-6.1'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 += d2
self.assertEqual(d1, Decimal('11.1'))
#inline with other type
d1 += 5
self.assertEqual(d1, Decimal('16.1'))
def test_subtraction(self):
d1 = Decimal('-11.1')
d2 = Decimal('22.2')
#two Decimals
self.assertEqual(d1-d2, Decimal('-33.3'))
self.assertEqual(d2-d1, Decimal('33.3'))
#with other type, left
c = d1 - 5
self.assertEqual(c, Decimal('-16.1'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 5 - d1
self.assertEqual(c, Decimal('16.1'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 -= d2
self.assertEqual(d1, Decimal('-33.3'))
#inline with other type
d1 -= 5
self.assertEqual(d1, Decimal('-38.3'))
def test_multiplication(self):
d1 = Decimal('-5')
d2 = Decimal('3')
#two Decimals
self.assertEqual(d1*d2, Decimal('-15'))
self.assertEqual(d2*d1, Decimal('-15'))
#with other type, left
c = d1 * 5
self.assertEqual(c, Decimal('-25'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 5 * d1
self.assertEqual(c, Decimal('-25'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 *= d2
self.assertEqual(d1, Decimal('-15'))
#inline with other type
d1 *= 5
self.assertEqual(d1, Decimal('-75'))
def test_division(self):
d1 = Decimal('-5')
d2 = Decimal('2')
#two Decimals
self.assertEqual(d1/d2, Decimal('-2.5'))
self.assertEqual(d2/d1, Decimal('-0.4'))
#with other type, left
c = d1 / 4
self.assertEqual(c, Decimal('-1.25'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 4 / d1
self.assertEqual(c, Decimal('-0.8'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 /= d2
self.assertEqual(d1, Decimal('-2.5'))
#inline with other type
d1 /= 4
self.assertEqual(d1, Decimal('-0.625'))
def test_floor_division(self):
d1 = Decimal('5')
d2 = Decimal('2')
#two Decimals
self.assertEqual(d1//d2, Decimal('2'))
self.assertEqual(d2//d1, Decimal('0'))
#with other type, left
c = d1 // 4
self.assertEqual(c, Decimal('1'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 7 // d1
self.assertEqual(c, Decimal('1'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 //= d2
self.assertEqual(d1, Decimal('2'))
#inline with other type
d1 //= 2
self.assertEqual(d1, Decimal('1'))
def test_powering(self):
d1 = Decimal('5')
d2 = Decimal('2')
#two Decimals
self.assertEqual(d1**d2, Decimal('25'))
self.assertEqual(d2**d1, Decimal('32'))
#with other type, left
c = d1 ** 4
self.assertEqual(c, Decimal('625'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 7 ** d1
self.assertEqual(c, Decimal('16807'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 **= d2
self.assertEqual(d1, Decimal('25'))
#inline with other type
d1 **= 4
self.assertEqual(d1, Decimal('390625'))
def test_module(self):
d1 = Decimal('5')
d2 = Decimal('2')
#two Decimals
self.assertEqual(d1%d2, Decimal('1'))
self.assertEqual(d2%d1, Decimal('2'))
#with other type, left
c = d1 % 4
self.assertEqual(c, Decimal('1'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 7 % d1
self.assertEqual(c, Decimal('2'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 %= d2
self.assertEqual(d1, Decimal('1'))
#inline with other type
d1 %= 4
self.assertEqual(d1, Decimal('1'))
def test_floor_div_module(self):
d1 = Decimal('5')
d2 = Decimal('2')
#two Decimals
(p, q) = divmod(d1, d2)
self.assertEqual(p, Decimal('2'))
self.assertEqual(q, Decimal('1'))
self.assertEqual(type(p), type(d1))
self.assertEqual(type(q), type(d1))
#with other type, left
(p, q) = divmod(d1, 4)
self.assertEqual(p, Decimal('1'))
self.assertEqual(q, Decimal('1'))
self.assertEqual(type(p), type(d1))
self.assertEqual(type(q), type(d1))
#with other type, right
(p, q) = divmod(7, d1)
self.assertEqual(p, Decimal('1'))
self.assertEqual(q, Decimal('2'))
self.assertEqual(type(p), type(d1))
self.assertEqual(type(q), type(d1))
def test_unary_operators(self):
self.assertEqual(+Decimal(45), Decimal(+45)) # +
self.assertEqual(-Decimal(45), Decimal(-45)) # -
self.assertEqual(abs(Decimal(45)), abs(Decimal(-45))) # abs
def test_nan_comparisons(self):
# comparisons involving signaling nans signal InvalidOperation
# order comparisons (<, <=, >, >=) involving only quiet nans
# also signal InvalidOperation
# equality comparisons (==, !=) involving only quiet nans
# don't signal, but return False or True respectively.
n = Decimal('NaN')
s = Decimal('sNaN')
i = Decimal('Inf')
f = Decimal('2')
qnan_pairs = (n, n), (n, i), (i, n), (n, f), (f, n)
snan_pairs = (s, n), (n, s), (s, i), (i, s), (s, f), (f, s), (s, s)
order_ops = operator.lt, operator.le, operator.gt, operator.ge
equality_ops = operator.eq, operator.ne
# results when InvalidOperation is not trapped
for x, y in qnan_pairs + snan_pairs:
for op in order_ops + equality_ops:
got = op(x, y)
expected = True if op is operator.ne else False
self.assertIs(expected, got,
"expected {0!r} for operator.{1}({2!r}, {3!r}); "
"got {4!r}".format(
expected, op.__name__, x, y, got))
# repeat the above, but this time trap the InvalidOperation
with localcontext() as ctx:
ctx.traps[InvalidOperation] = 1
for x, y in qnan_pairs:
for op in equality_ops:
got = op(x, y)
expected = True if op is operator.ne else False
self.assertIs(expected, got,
"expected {0!r} for "
"operator.{1}({2!r}, {3!r}); "
"got {4!r}".format(
expected, op.__name__, x, y, got))
for x, y in snan_pairs:
for op in equality_ops:
self.assertRaises(InvalidOperation, operator.eq, x, y)
self.assertRaises(InvalidOperation, operator.ne, x, y)
for x, y in qnan_pairs + snan_pairs:
for op in order_ops:
self.assertRaises(InvalidOperation, op, x, y)
def test_copy_sign(self):
d = Decimal(1).copy_sign(Decimal(-2))
self.assertEqual(Decimal(1).copy_sign(-2), d)
self.assertRaises(TypeError, Decimal(1).copy_sign, '-2')
# The following are two functions used to test threading in the next class
def thfunc1(cls):
d1 = Decimal(1)
d3 = Decimal(3)
test1 = d1/d3
cls.synchro.wait()
test2 = d1/d3
cls.finish1.set()
cls.assertEqual(test1, Decimal('0.3333333333333333333333333333'))
cls.assertEqual(test2, Decimal('0.3333333333333333333333333333'))
return
def thfunc2(cls):
d1 = Decimal(1)
d3 = Decimal(3)
test1 = d1/d3
thiscontext = getcontext()
thiscontext.prec = 18
test2 = d1/d3
cls.synchro.set()
cls.finish2.set()
cls.assertEqual(test1, Decimal('0.3333333333333333333333333333'))
cls.assertEqual(test2, Decimal('0.333333333333333333'))
return
class DecimalUseOfContextTest(unittest.TestCase):
'''Unit tests for Use of Context cases in Decimal.'''
try:
import threading
except ImportError:
threading = None
# Take care executing this test from IDLE, there's an issue in threading
# that hangs IDLE and I couldn't find it
def test_threading(self):
#Test the "threading isolation" of a Context.
self.synchro = threading.Event()
self.finish1 = threading.Event()
self.finish2 = threading.Event()
th1 = threading.Thread(target=thfunc1, args=(self,))
th2 = threading.Thread(target=thfunc2, args=(self,))
th1.start()
th2.start()
self.finish1.wait()
self.finish2.wait()
return
if threading is None:
del test_threading
class DecimalUsabilityTest(unittest.TestCase):
'''Unit tests for Usability cases of Decimal.'''
def test_comparison_operators(self):
da = Decimal('23.42')
db = Decimal('23.42')
dc = Decimal('45')
#two Decimals
self.assertGreater(dc, da)
self.assertGreaterEqual(dc, da)
self.assertLess(da, dc)
self.assertLessEqual(da, dc)
self.assertEqual(da, db)
self.assertNotEqual(da, dc)
self.assertLessEqual(da, db)
self.assertGreaterEqual(da, db)
#a Decimal and an int
self.assertGreater(dc, 23)
self.assertLess(23, dc)
self.assertEqual(dc, 45)
#a Decimal and uncomparable
self.assertNotEqual(da, 'ugly')
self.assertNotEqual(da, 32.7)
self.assertNotEqual(da, object())
self.assertNotEqual(da, object)
# sortable
a = list(map(Decimal, range(100)))
b = a[:]
random.shuffle(a)
a.sort()
self.assertEqual(a, b)
def test_decimal_float_comparison(self):
da = Decimal('0.25')
db = Decimal('3.0')
self.assertLess(da, 3.0)
self.assertLessEqual(da, 3.0)
self.assertGreater(db, 0.25)
self.assertGreaterEqual(db, 0.25)
self.assertNotEqual(da, 1.5)
self.assertEqual(da, 0.25)
self.assertGreater(3.0, da)
self.assertGreaterEqual(3.0, da)
self.assertLess(0.25, db)
self.assertLessEqual(0.25, db)
self.assertNotEqual(0.25, db)
self.assertEqual(3.0, db)
self.assertNotEqual(0.1, Decimal('0.1'))
def test_copy_and_deepcopy_methods(self):
d = Decimal('43.24')
c = copy.copy(d)
self.assertEqual(id(c), id(d))
dc = copy.deepcopy(d)
self.assertEqual(id(dc), id(d))
def test_hash_method(self):
def hashit(d):
a = hash(d)
b = d.__hash__()
self.assertEqual(a, b)
return a
#just that it's hashable
hashit(Decimal(23))
hashit(Decimal('Infinity'))
hashit(Decimal('-Infinity'))
hashit(Decimal('nan123'))
hashit(Decimal('-NaN'))
test_values = [Decimal(sign*(2**m + n))
for m in [0, 14, 15, 16, 17, 30, 31,
32, 33, 61, 62, 63, 64, 65, 66]
for n in range(-10, 10)
for sign in [-1, 1]]
test_values.extend([
Decimal("-1"), # ==> -2
Decimal("-0"), # zeros
Decimal("0.00"),
Decimal("-0.000"),
Decimal("0E10"),
Decimal("-0E12"),
Decimal("10.0"), # negative exponent
Decimal("-23.00000"),
Decimal("1230E100"), # positive exponent
Decimal("-4.5678E50"),
# a value for which hash(n) != hash(n % (2**64-1))
# in Python pre-2.6
Decimal(2**64 + 2**32 - 1),
# selection of values which fail with the old (before
# version 2.6) long.__hash__
Decimal("1.634E100"),
Decimal("90.697E100"),
Decimal("188.83E100"),
Decimal("1652.9E100"),
Decimal("56531E100"),
])
# check that hash(d) == hash(int(d)) for integral values
for value in test_values:
self.assertEqual(hashit(value), hashit(int(value)))
#the same hash that to an int
self.assertEqual(hashit(Decimal(23)), hashit(23))
self.assertRaises(TypeError, hash, Decimal('sNaN'))
self.assertTrue(hashit(Decimal('Inf')))
self.assertTrue(hashit(Decimal('-Inf')))
# check that the hashes of a Decimal float match when they
# represent exactly the same values
test_strings = ['inf', '-Inf', '0.0', '-.0e1',
'34.0', '2.5', '112390.625', '-0.515625']
for s in test_strings:
f = float(s)
d = Decimal(s)
self.assertEqual(hashit(f), hashit(d))
# check that the value of the hash doesn't depend on the
# current context (issue #1757)
c = getcontext()
old_precision = c.prec
x = Decimal("123456789.1")
c.prec = 6
h1 = hashit(x)
c.prec = 10
h2 = hashit(x)
c.prec = 16
h3 = hashit(x)
self.assertEqual(h1, h2)
self.assertEqual(h1, h3)
c.prec = old_precision
def test_min_and_max_methods(self):
d1 = Decimal('15.32')
d2 = Decimal('28.5')
l1 = 15
l2 = 28
#between Decimals
self.assertIs(min(d1,d2), d1)
self.assertIs(min(d2,d1), d1)
self.assertIs(max(d1,d2), d2)
self.assertIs(max(d2,d1), d2)
#between Decimal and long
self.assertIs(min(d1,l2), d1)
self.assertIs(min(l2,d1), d1)
self.assertIs(max(l1,d2), d2)
self.assertIs(max(d2,l1), d2)
def test_as_nonzero(self):
#as false
self.assertFalse(Decimal(0))
#as true
self.assertTrue(Decimal('0.372'))
def test_tostring_methods(self):
#Test str and repr methods.
d = Decimal('15.32')
self.assertEqual(str(d), '15.32') # str
self.assertEqual(repr(d), "Decimal('15.32')") # repr
def test_tonum_methods(self):
#Test float and int methods.
d1 = Decimal('66')
d2 = Decimal('15.32')
#int
self.assertEqual(int(d1), 66)
self.assertEqual(int(d2), 15)
#float
self.assertEqual(float(d1), 66)
self.assertEqual(float(d2), 15.32)
#floor
test_pairs = [
('123.00', 123),
('3.2', 3),
('3.54', 3),
('3.899', 3),
('-2.3', -3),
('-11.0', -11),
('0.0', 0),
('-0E3', 0),
]
for d, i in test_pairs:
self.assertEqual(math.floor(Decimal(d)), i)
self.assertRaises(ValueError, math.floor, Decimal('-NaN'))
self.assertRaises(ValueError, math.floor, Decimal('sNaN'))
self.assertRaises(ValueError, math.floor, Decimal('NaN123'))
self.assertRaises(OverflowError, math.floor, Decimal('Inf'))
self.assertRaises(OverflowError, math.floor, Decimal('-Inf'))
#ceiling
test_pairs = [
('123.00', 123),
('3.2', 4),
('3.54', 4),
('3.899', 4),
('-2.3', -2),
('-11.0', -11),
('0.0', 0),
('-0E3', 0),
]
for d, i in test_pairs:
self.assertEqual(math.ceil(Decimal(d)), i)
self.assertRaises(ValueError, math.ceil, Decimal('-NaN'))
self.assertRaises(ValueError, math.ceil, Decimal('sNaN'))
self.assertRaises(ValueError, math.ceil, Decimal('NaN123'))
self.assertRaises(OverflowError, math.ceil, Decimal('Inf'))
self.assertRaises(OverflowError, math.ceil, Decimal('-Inf'))
#round, single argument
test_pairs = [
('123.00', 123),
('3.2', 3),
('3.54', 4),
('3.899', 4),
('-2.3', -2),
('-11.0', -11),
('0.0', 0),
('-0E3', 0),
('-3.5', -4),
('-2.5', -2),
('-1.5', -2),
('-0.5', 0),
('0.5', 0),
('1.5', 2),
('2.5', 2),
('3.5', 4),
]
for d, i in test_pairs:
self.assertEqual(round(Decimal(d)), i)
self.assertRaises(ValueError, round, Decimal('-NaN'))
self.assertRaises(ValueError, round, Decimal('sNaN'))
self.assertRaises(ValueError, round, Decimal('NaN123'))
self.assertRaises(OverflowError, round, Decimal('Inf'))
self.assertRaises(OverflowError, round, Decimal('-Inf'))
#round, two arguments; this is essentially equivalent
#to quantize, which is already extensively tested
test_triples = [
('123.456', -4, '0E+4'),
('123.456', -3, '0E+3'),
('123.456', -2, '1E+2'),
('123.456', -1, '1.2E+2'),
('123.456', 0, '123'),
('123.456', 1, '123.5'),
('123.456', 2, '123.46'),
('123.456', 3, '123.456'),
('123.456', 4, '123.4560'),
('123.455', 2, '123.46'),
('123.445', 2, '123.44'),
('Inf', 4, 'NaN'),
('-Inf', -23, 'NaN'),
('sNaN314', 3, 'NaN314'),
]
for d, n, r in test_triples:
self.assertEqual(str(round(Decimal(d), n)), r)
def test_eval_round_trip(self):
#with zero
d = Decimal( (0, (0,), 0) )
self.assertEqual(d, eval(repr(d)))
#int
d = Decimal( (1, (4, 5), 0) )
self.assertEqual(d, eval(repr(d)))
#float
d = Decimal( (0, (4, 5, 3, 4), -2) )
self.assertEqual(d, eval(repr(d)))
#weird
d = Decimal( (1, (4, 3, 4, 9, 1, 3, 5, 3, 4), -25) )
self.assertEqual(d, eval(repr(d)))
def test_as_tuple(self):
#with zero
d = Decimal(0)
self.assertEqual(d.as_tuple(), (0, (0,), 0) )
#int
d = Decimal(-45)
self.assertEqual(d.as_tuple(), (1, (4, 5), 0) )
#complicated string
d = Decimal("-4.34913534E-17")
self.assertEqual(d.as_tuple(), (1, (4, 3, 4, 9, 1, 3, 5, 3, 4), -25) )
#inf
d = Decimal("Infinity")
self.assertEqual(d.as_tuple(), (0, (0,), 'F') )
#leading zeros in coefficient should be stripped
d = Decimal( (0, (0, 0, 4, 0, 5, 3, 4), -2) )
self.assertEqual(d.as_tuple(), (0, (4, 0, 5, 3, 4), -2) )
d = Decimal( (1, (0, 0, 0), 37) )
self.assertEqual(d.as_tuple(), (1, (0,), 37))
d = Decimal( (1, (), 37) )
self.assertEqual(d.as_tuple(), (1, (0,), 37))
#leading zeros in NaN diagnostic info should be stripped
d = Decimal( (0, (0, 0, 4, 0, 5, 3, 4), 'n') )
self.assertEqual(d.as_tuple(), (0, (4, 0, 5, 3, 4), 'n') )
d = Decimal( (1, (0, 0, 0), 'N') )
self.assertEqual(d.as_tuple(), (1, (), 'N') )
d = Decimal( (1, (), 'n') )
self.assertEqual(d.as_tuple(), (1, (), 'n') )
#coefficient in infinity should be ignored
d = Decimal( (0, (4, 5, 3, 4), 'F') )
self.assertEqual(d.as_tuple(), (0, (0,), 'F'))
d = Decimal( (1, (0, 2, 7, 1), 'F') )
self.assertEqual(d.as_tuple(), (1, (0,), 'F'))
def test_immutability_operations(self):
# Do operations and check that it didn't change change internal objects.
d1 = Decimal('-25e55')
b1 = Decimal('-25e55')
d2 = Decimal('33e+33')
b2 = Decimal('33e+33')
def checkSameDec(operation, useOther=False):
if useOther:
eval("d1." + operation + "(d2)")
self.assertEqual(d1._sign, b1._sign)
self.assertEqual(d1._int, b1._int)
self.assertEqual(d1._exp, b1._exp)
self.assertEqual(d2._sign, b2._sign)
self.assertEqual(d2._int, b2._int)
self.assertEqual(d2._exp, b2._exp)
else:
eval("d1." + operation + "()")
self.assertEqual(d1._sign, b1._sign)
self.assertEqual(d1._int, b1._int)
self.assertEqual(d1._exp, b1._exp)
return
Decimal(d1)
self.assertEqual(d1._sign, b1._sign)
self.assertEqual(d1._int, b1._int)
self.assertEqual(d1._exp, b1._exp)
checkSameDec("__abs__")
checkSameDec("__add__", True)
checkSameDec("__divmod__", True)
checkSameDec("__eq__", True)
checkSameDec("__ne__", True)
checkSameDec("__le__", True)
checkSameDec("__lt__", True)
checkSameDec("__ge__", True)
checkSameDec("__gt__", True)
checkSameDec("__float__")
checkSameDec("__floordiv__", True)
checkSameDec("__hash__")
checkSameDec("__int__")
checkSameDec("__trunc__")
checkSameDec("__mod__", True)
checkSameDec("__mul__", True)
checkSameDec("__neg__")
checkSameDec("__bool__")
checkSameDec("__pos__")
checkSameDec("__pow__", True)
checkSameDec("__radd__", True)
checkSameDec("__rdivmod__", True)
checkSameDec("__repr__")
checkSameDec("__rfloordiv__", True)
checkSameDec("__rmod__", True)
checkSameDec("__rmul__", True)
checkSameDec("__rpow__", True)
checkSameDec("__rsub__", True)
checkSameDec("__str__")
checkSameDec("__sub__", True)
checkSameDec("__truediv__", True)
checkSameDec("adjusted")
checkSameDec("as_tuple")
checkSameDec("compare", True)
checkSameDec("max", True)
checkSameDec("min", True)
checkSameDec("normalize")
checkSameDec("quantize", True)
checkSameDec("remainder_near", True)
checkSameDec("same_quantum", True)
checkSameDec("sqrt")
checkSameDec("to_eng_string")
checkSameDec("to_integral")
def test_subclassing(self):
# Different behaviours when subclassing Decimal
class MyDecimal(Decimal):
pass
d1 = MyDecimal(1)
d2 = MyDecimal(2)
d = d1 + d2
self.assertIs(type(d), Decimal)
d = d1.max(d2)
self.assertIs(type(d), Decimal)
def test_implicit_context(self):
# Check results when context given implicitly. (Issue 2478)
c = getcontext()
self.assertEqual(str(Decimal(0).sqrt()),
str(c.sqrt(Decimal(0))))
def test_conversions_from_int(self):
# Check that methods taking a second Decimal argument will
# always accept an integer in place of a Decimal.
self.assertEqual(Decimal(4).compare(3),
Decimal(4).compare(Decimal(3)))
self.assertEqual(Decimal(4).compare_signal(3),
Decimal(4).compare_signal(Decimal(3)))
self.assertEqual(Decimal(4).compare_total(3),
Decimal(4).compare_total(Decimal(3)))
self.assertEqual(Decimal(4).compare_total_mag(3),
Decimal(4).compare_total_mag(Decimal(3)))
self.assertEqual(Decimal(10101).logical_and(1001),
Decimal(10101).logical_and(Decimal(1001)))
self.assertEqual(Decimal(10101).logical_or(1001),
Decimal(10101).logical_or(Decimal(1001)))
self.assertEqual(Decimal(10101).logical_xor(1001),
Decimal(10101).logical_xor(Decimal(1001)))
self.assertEqual(Decimal(567).max(123),
Decimal(567).max(Decimal(123)))
self.assertEqual(Decimal(567).max_mag(123),
Decimal(567).max_mag(Decimal(123)))
self.assertEqual(Decimal(567).min(123),
Decimal(567).min(Decimal(123)))
self.assertEqual(Decimal(567).min_mag(123),
Decimal(567).min_mag(Decimal(123)))
self.assertEqual(Decimal(567).next_toward(123),
Decimal(567).next_toward(Decimal(123)))
self.assertEqual(Decimal(1234).quantize(100),
Decimal(1234).quantize(Decimal(100)))
self.assertEqual(Decimal(768).remainder_near(1234),
Decimal(768).remainder_near(Decimal(1234)))
self.assertEqual(Decimal(123).rotate(1),
Decimal(123).rotate(Decimal(1)))
self.assertEqual(Decimal(1234).same_quantum(1000),
Decimal(1234).same_quantum(Decimal(1000)))
self.assertEqual(Decimal('9.123').scaleb(-100),
Decimal('9.123').scaleb(Decimal(-100)))
self.assertEqual(Decimal(456).shift(-1),
Decimal(456).shift(Decimal(-1)))
self.assertEqual(Decimal(-12).fma(Decimal(45), 67),
Decimal(-12).fma(Decimal(45), Decimal(67)))
self.assertEqual(Decimal(-12).fma(45, 67),
Decimal(-12).fma(Decimal(45), Decimal(67)))
self.assertEqual(Decimal(-12).fma(45, Decimal(67)),
Decimal(-12).fma(Decimal(45), Decimal(67)))
class DecimalPythonAPItests(unittest.TestCase):
def test_abc(self):
self.assertTrue(issubclass(Decimal, numbers.Number))
self.assertFalse(issubclass(Decimal, numbers.Real))
self.assertIsInstance(Decimal(0), numbers.Number)
self.assertNotIsInstance(Decimal(0), numbers.Real)
def test_pickle(self):
d = Decimal('-3.141590000')
p = pickle.dumps(d)
e = pickle.loads(p)
self.assertEqual(d, e)
def test_int(self):
for x in range(-250, 250):
s = '%0.2f' % (x / 100.0)
# should work the same as for floats
self.assertEqual(int(Decimal(s)), int(float(s)))
# should work the same as to_integral in the ROUND_DOWN mode
d = Decimal(s)
r = d.to_integral(ROUND_DOWN)
self.assertEqual(Decimal(int(d)), r)
self.assertRaises(ValueError, int, Decimal('-nan'))
self.assertRaises(ValueError, int, Decimal('snan'))
self.assertRaises(OverflowError, int, Decimal('inf'))
self.assertRaises(OverflowError, int, Decimal('-inf'))
def test_trunc(self):
for x in range(-250, 250):
s = '%0.2f' % (x / 100.0)
# should work the same as for floats
self.assertEqual(int(Decimal(s)), int(float(s)))
# should work the same as to_integral in the ROUND_DOWN mode
d = Decimal(s)
r = d.to_integral(ROUND_DOWN)
self.assertEqual(Decimal(math.trunc(d)), r)
def test_from_float(self):
class MyDecimal(Decimal):
pass
r = MyDecimal.from_float(0.1)
self.assertEqual(type(r), MyDecimal)
self.assertEqual(str(r),
'0.1000000000000000055511151231257827021181583404541015625')
bigint = 12345678901234567890123456789
self.assertEqual(MyDecimal.from_float(bigint), MyDecimal(bigint))
self.assertTrue(MyDecimal.from_float(float('nan')).is_qnan())
self.assertTrue(MyDecimal.from_float(float('inf')).is_infinite())
self.assertTrue(MyDecimal.from_float(float('-inf')).is_infinite())
self.assertEqual(str(MyDecimal.from_float(float('nan'))),
str(Decimal('NaN')))
self.assertEqual(str(MyDecimal.from_float(float('inf'))),
str(Decimal('Infinity')))
self.assertEqual(str(MyDecimal.from_float(float('-inf'))),
str(Decimal('-Infinity')))
self.assertRaises(TypeError, MyDecimal.from_float, 'abc')
for i in range(200):
x = random.expovariate(0.01) * (random.random() * 2.0 - 1.0)
self.assertEqual(x, float(MyDecimal.from_float(x))) # roundtrip
def test_create_decimal_from_float(self):
context = Context(prec=5, rounding=ROUND_DOWN)
self.assertEqual(
context.create_decimal_from_float(math.pi),
Decimal('3.1415')
)
context = Context(prec=5, rounding=ROUND_UP)
self.assertEqual(
context.create_decimal_from_float(math.pi),
Decimal('3.1416')
)
context = Context(prec=5, traps=[Inexact])
self.assertRaises(
Inexact,
context.create_decimal_from_float,
math.pi
)
self.assertEqual(repr(context.create_decimal_from_float(-0.0)),
"Decimal('-0')")
self.assertEqual(repr(context.create_decimal_from_float(1.0)),
"Decimal('1')")
self.assertEqual(repr(context.create_decimal_from_float(10)),
"Decimal('10')")
class ContextAPItests(unittest.TestCase):
def test_pickle(self):
c = Context()
e = pickle.loads(pickle.dumps(c))
for k in vars(c):
v1 = vars(c)[k]
v2 = vars(e)[k]
self.assertEqual(v1, v2)
def test_equality_with_other_types(self):
self.assertIn(Decimal(10), ['a', 1.0, Decimal(10), (1,2), {}])
self.assertNotIn(Decimal(10), ['a', 1.0, (1,2), {}])
def test_copy(self):
# All copies should be deep
c = Context()
d = c.copy()
self.assertNotEqual(id(c), id(d))
self.assertNotEqual(id(c.flags), id(d.flags))
self.assertNotEqual(id(c.traps), id(d.traps))
def test__clamp(self):
# In Python 3.2, the private attribute `_clamp` was made
# public (issue 8540), with the old `_clamp` becoming a
# property wrapping `clamp`. For the duration of Python 3.2
# only, the attribute should be gettable/settable via both
# `clamp` and `_clamp`; in Python 3.3, `_clamp` should be
# removed.
c = Context(clamp = 0)
self.assertEqual(c.clamp, 0)
with check_warnings(("", DeprecationWarning)):
c._clamp = 1
self.assertEqual(c.clamp, 1)
with check_warnings(("", DeprecationWarning)):
self.assertEqual(c._clamp, 1)
c.clamp = 0
self.assertEqual(c.clamp, 0)
with check_warnings(("", DeprecationWarning)):
self.assertEqual(c._clamp, 0)
def test_abs(self):
c = Context()
d = c.abs(Decimal(-1))
self.assertEqual(c.abs(-1), d)
self.assertRaises(TypeError, c.abs, '-1')
def test_add(self):
c = Context()
d = c.add(Decimal(1), Decimal(1))
self.assertEqual(c.add(1, 1), d)
self.assertEqual(c.add(Decimal(1), 1), d)
self.assertEqual(c.add(1, Decimal(1)), d)
self.assertRaises(TypeError, c.add, '1', 1)
self.assertRaises(TypeError, c.add, 1, '1')
def test_compare(self):
c = Context()
d = c.compare(Decimal(1), Decimal(1))
self.assertEqual(c.compare(1, 1), d)
self.assertEqual(c.compare(Decimal(1), 1), d)
self.assertEqual(c.compare(1, Decimal(1)), d)
self.assertRaises(TypeError, c.compare, '1', 1)
self.assertRaises(TypeError, c.compare, 1, '1')
def test_compare_signal(self):
c = Context()
d = c.compare_signal(Decimal(1), Decimal(1))
self.assertEqual(c.compare_signal(1, 1), d)
self.assertEqual(c.compare_signal(Decimal(1), 1), d)
self.assertEqual(c.compare_signal(1, Decimal(1)), d)
self.assertRaises(TypeError, c.compare_signal, '1', 1)
self.assertRaises(TypeError, c.compare_signal, 1, '1')
def test_compare_total(self):
c = Context()
d = c.compare_total(Decimal(1), Decimal(1))
self.assertEqual(c.compare_total(1, 1), d)
self.assertEqual(c.compare_total(Decimal(1), 1), d)
self.assertEqual(c.compare_total(1, Decimal(1)), d)
self.assertRaises(TypeError, c.compare_total, '1', 1)
self.assertRaises(TypeError, c.compare_total, 1, '1')
def test_compare_total_mag(self):
c = Context()
d = c.compare_total_mag(Decimal(1), Decimal(1))
self.assertEqual(c.compare_total_mag(1, 1), d)
self.assertEqual(c.compare_total_mag(Decimal(1), 1), d)
self.assertEqual(c.compare_total_mag(1, Decimal(1)), d)
self.assertRaises(TypeError, c.compare_total_mag, '1', 1)
self.assertRaises(TypeError, c.compare_total_mag, 1, '1')
def test_copy_abs(self):
c = Context()
d = c.copy_abs(Decimal(-1))
self.assertEqual(c.copy_abs(-1), d)
self.assertRaises(TypeError, c.copy_abs, '-1')
def test_copy_decimal(self):
c = Context()
d = c.copy_decimal(Decimal(-1))
self.assertEqual(c.copy_decimal(-1), d)
self.assertRaises(TypeError, c.copy_decimal, '-1')
def test_copy_negate(self):
c = Context()
d = c.copy_negate(Decimal(-1))
self.assertEqual(c.copy_negate(-1), d)
self.assertRaises(TypeError, c.copy_negate, '-1')
def test_copy_sign(self):
c = Context()
d = c.copy_sign(Decimal(1), Decimal(-2))
self.assertEqual(c.copy_sign(1, -2), d)
self.assertEqual(c.copy_sign(Decimal(1), -2), d)
self.assertEqual(c.copy_sign(1, Decimal(-2)), d)
self.assertRaises(TypeError, c.copy_sign, '1', -2)
self.assertRaises(TypeError, c.copy_sign, 1, '-2')
def test_divide(self):
c = Context()
d = c.divide(Decimal(1), Decimal(2))
self.assertEqual(c.divide(1, 2), d)
self.assertEqual(c.divide(Decimal(1), 2), d)
self.assertEqual(c.divide(1, Decimal(2)), d)
self.assertRaises(TypeError, c.divide, '1', 2)
self.assertRaises(TypeError, c.divide, 1, '2')
def test_divide_int(self):
c = Context()
d = c.divide_int(Decimal(1), Decimal(2))
self.assertEqual(c.divide_int(1, 2), d)
self.assertEqual(c.divide_int(Decimal(1), 2), d)
self.assertEqual(c.divide_int(1, Decimal(2)), d)
self.assertRaises(TypeError, c.divide_int, '1', 2)
self.assertRaises(TypeError, c.divide_int, 1, '2')
def test_divmod(self):
c = Context()
d = c.divmod(Decimal(1), Decimal(2))
self.assertEqual(c.divmod(1, 2), d)
self.assertEqual(c.divmod(Decimal(1), 2), d)
self.assertEqual(c.divmod(1, Decimal(2)), d)
self.assertRaises(TypeError, c.divmod, '1', 2)
self.assertRaises(TypeError, c.divmod, 1, '2')
def test_exp(self):
c = Context()
d = c.exp(Decimal(10))
self.assertEqual(c.exp(10), d)
self.assertRaises(TypeError, c.exp, '10')
def test_fma(self):
c = Context()
d = c.fma(Decimal(2), Decimal(3), Decimal(4))
self.assertEqual(c.fma(2, 3, 4), d)
self.assertEqual(c.fma(Decimal(2), 3, 4), d)
self.assertEqual(c.fma(2, Decimal(3), 4), d)
self.assertEqual(c.fma(2, 3, Decimal(4)), d)
self.assertEqual(c.fma(Decimal(2), Decimal(3), 4), d)
self.assertRaises(TypeError, c.fma, '2', 3, 4)
self.assertRaises(TypeError, c.fma, 2, '3', 4)
self.assertRaises(TypeError, c.fma, 2, 3, '4')
def test_is_finite(self):
c = Context()
d = c.is_finite(Decimal(10))
self.assertEqual(c.is_finite(10), d)
self.assertRaises(TypeError, c.is_finite, '10')
def test_is_infinite(self):
c = Context()
d = c.is_infinite(Decimal(10))
self.assertEqual(c.is_infinite(10), d)
self.assertRaises(TypeError, c.is_infinite, '10')
def test_is_nan(self):
c = Context()
d = c.is_nan(Decimal(10))
self.assertEqual(c.is_nan(10), d)
self.assertRaises(TypeError, c.is_nan, '10')
def test_is_normal(self):
c = Context()
d = c.is_normal(Decimal(10))
self.assertEqual(c.is_normal(10), d)
self.assertRaises(TypeError, c.is_normal, '10')
def test_is_qnan(self):
c = Context()
d = c.is_qnan(Decimal(10))
self.assertEqual(c.is_qnan(10), d)
self.assertRaises(TypeError, c.is_qnan, '10')
def test_is_signed(self):
c = Context()
d = c.is_signed(Decimal(10))
self.assertEqual(c.is_signed(10), d)
self.assertRaises(TypeError, c.is_signed, '10')
def test_is_snan(self):
c = Context()
d = c.is_snan(Decimal(10))
self.assertEqual(c.is_snan(10), d)
self.assertRaises(TypeError, c.is_snan, '10')
def test_is_subnormal(self):
c = Context()
d = c.is_subnormal(Decimal(10))
self.assertEqual(c.is_subnormal(10), d)
self.assertRaises(TypeError, c.is_subnormal, '10')
def test_is_zero(self):
c = Context()
d = c.is_zero(Decimal(10))
self.assertEqual(c.is_zero(10), d)
self.assertRaises(TypeError, c.is_zero, '10')
def test_ln(self):
c = Context()
d = c.ln(Decimal(10))
self.assertEqual(c.ln(10), d)
self.assertRaises(TypeError, c.ln, '10')
def test_log10(self):
c = Context()
d = c.log10(Decimal(10))
self.assertEqual(c.log10(10), d)
self.assertRaises(TypeError, c.log10, '10')
def test_logb(self):
c = Context()
d = c.logb(Decimal(10))
self.assertEqual(c.logb(10), d)
self.assertRaises(TypeError, c.logb, '10')
def test_logical_and(self):
c = Context()
d = c.logical_and(Decimal(1), Decimal(1))
self.assertEqual(c.logical_and(1, 1), d)
self.assertEqual(c.logical_and(Decimal(1), 1), d)
self.assertEqual(c.logical_and(1, Decimal(1)), d)
self.assertRaises(TypeError, c.logical_and, '1', 1)
self.assertRaises(TypeError, c.logical_and, 1, '1')
def test_logical_invert(self):
c = Context()
d = c.logical_invert(Decimal(1000))
self.assertEqual(c.logical_invert(1000), d)
self.assertRaises(TypeError, c.logical_invert, '1000')
def test_logical_or(self):
c = Context()
d = c.logical_or(Decimal(1), Decimal(1))
self.assertEqual(c.logical_or(1, 1), d)
self.assertEqual(c.logical_or(Decimal(1), 1), d)
self.assertEqual(c.logical_or(1, Decimal(1)), d)
self.assertRaises(TypeError, c.logical_or, '1', 1)
self.assertRaises(TypeError, c.logical_or, 1, '1')
def test_logical_xor(self):
c = Context()
d = c.logical_xor(Decimal(1), Decimal(1))
self.assertEqual(c.logical_xor(1, 1), d)
self.assertEqual(c.logical_xor(Decimal(1), 1), d)
self.assertEqual(c.logical_xor(1, Decimal(1)), d)
self.assertRaises(TypeError, c.logical_xor, '1', 1)
self.assertRaises(TypeError, c.logical_xor, 1, '1')
def test_max(self):
c = Context()
d = c.max(Decimal(1), Decimal(2))
self.assertEqual(c.max(1, 2), d)
self.assertEqual(c.max(Decimal(1), 2), d)
self.assertEqual(c.max(1, Decimal(2)), d)
self.assertRaises(TypeError, c.max, '1', 2)
self.assertRaises(TypeError, c.max, 1, '2')
def test_max_mag(self):
c = Context()
d = c.max_mag(Decimal(1), Decimal(2))
self.assertEqual(c.max_mag(1, 2), d)
self.assertEqual(c.max_mag(Decimal(1), 2), d)
self.assertEqual(c.max_mag(1, Decimal(2)), d)
self.assertRaises(TypeError, c.max_mag, '1', 2)
self.assertRaises(TypeError, c.max_mag, 1, '2')
def test_min(self):
c = Context()
d = c.min(Decimal(1), Decimal(2))
self.assertEqual(c.min(1, 2), d)
self.assertEqual(c.min(Decimal(1), 2), d)
self.assertEqual(c.min(1, Decimal(2)), d)
self.assertRaises(TypeError, c.min, '1', 2)
self.assertRaises(TypeError, c.min, 1, '2')
def test_min_mag(self):
c = Context()
d = c.min_mag(Decimal(1), Decimal(2))
self.assertEqual(c.min_mag(1, 2), d)
self.assertEqual(c.min_mag(Decimal(1), 2), d)
self.assertEqual(c.min_mag(1, Decimal(2)), d)
self.assertRaises(TypeError, c.min_mag, '1', 2)
self.assertRaises(TypeError, c.min_mag, 1, '2')
def test_minus(self):
c = Context()
d = c.minus(Decimal(10))
self.assertEqual(c.minus(10), d)
self.assertRaises(TypeError, c.minus, '10')
def test_multiply(self):
c = Context()
d = c.multiply(Decimal(1), Decimal(2))
self.assertEqual(c.multiply(1, 2), d)
self.assertEqual(c.multiply(Decimal(1), 2), d)
self.assertEqual(c.multiply(1, Decimal(2)), d)
self.assertRaises(TypeError, c.multiply, '1', 2)
self.assertRaises(TypeError, c.multiply, 1, '2')
def test_next_minus(self):
c = Context()
d = c.next_minus(Decimal(10))
self.assertEqual(c.next_minus(10), d)
self.assertRaises(TypeError, c.next_minus, '10')
def test_next_plus(self):
c = Context()
d = c.next_plus(Decimal(10))
self.assertEqual(c.next_plus(10), d)
self.assertRaises(TypeError, c.next_plus, '10')
def test_next_toward(self):
c = Context()
d = c.next_toward(Decimal(1), Decimal(2))
self.assertEqual(c.next_toward(1, 2), d)
self.assertEqual(c.next_toward(Decimal(1), 2), d)
self.assertEqual(c.next_toward(1, Decimal(2)), d)
self.assertRaises(TypeError, c.next_toward, '1', 2)
self.assertRaises(TypeError, c.next_toward, 1, '2')
def test_normalize(self):
c = Context()
d = c.normalize(Decimal(10))
self.assertEqual(c.normalize(10), d)
self.assertRaises(TypeError, c.normalize, '10')
def test_number_class(self):
c = Context()
self.assertEqual(c.number_class(123), c.number_class(Decimal(123)))
self.assertEqual(c.number_class(0), c.number_class(Decimal(0)))
self.assertEqual(c.number_class(-45), c.number_class(Decimal(-45)))
def test_power(self):
c = Context()
d = c.power(Decimal(1), Decimal(4), Decimal(2))
self.assertEqual(c.power(1, 4, 2), d)
self.assertEqual(c.power(Decimal(1), 4, 2), d)
self.assertEqual(c.power(1, Decimal(4), 2), d)
self.assertEqual(c.power(1, 4, Decimal(2)), d)
self.assertEqual(c.power(Decimal(1), Decimal(4), 2), d)
self.assertRaises(TypeError, c.power, '1', 4, 2)
self.assertRaises(TypeError, c.power, 1, '4', 2)
self.assertRaises(TypeError, c.power, 1, 4, '2')
def test_plus(self):
c = Context()
d = c.plus(Decimal(10))
self.assertEqual(c.plus(10), d)
self.assertRaises(TypeError, c.plus, '10')
def test_quantize(self):
c = Context()
d = c.quantize(Decimal(1), Decimal(2))
self.assertEqual(c.quantize(1, 2), d)
self.assertEqual(c.quantize(Decimal(1), 2), d)
self.assertEqual(c.quantize(1, Decimal(2)), d)
self.assertRaises(TypeError, c.quantize, '1', 2)
self.assertRaises(TypeError, c.quantize, 1, '2')
def test_remainder(self):
c = Context()
d = c.remainder(Decimal(1), Decimal(2))
self.assertEqual(c.remainder(1, 2), d)
self.assertEqual(c.remainder(Decimal(1), 2), d)
self.assertEqual(c.remainder(1, Decimal(2)), d)
self.assertRaises(TypeError, c.remainder, '1', 2)
self.assertRaises(TypeError, c.remainder, 1, '2')
def test_remainder_near(self):
c = Context()
d = c.remainder_near(Decimal(1), Decimal(2))
self.assertEqual(c.remainder_near(1, 2), d)
self.assertEqual(c.remainder_near(Decimal(1), 2), d)
self.assertEqual(c.remainder_near(1, Decimal(2)), d)
self.assertRaises(TypeError, c.remainder_near, '1', 2)
self.assertRaises(TypeError, c.remainder_near, 1, '2')
def test_rotate(self):
c = Context()
d = c.rotate(Decimal(1), Decimal(2))
self.assertEqual(c.rotate(1, 2), d)
self.assertEqual(c.rotate(Decimal(1), 2), d)
self.assertEqual(c.rotate(1, Decimal(2)), d)
self.assertRaises(TypeError, c.rotate, '1', 2)
self.assertRaises(TypeError, c.rotate, 1, '2')
def test_sqrt(self):
c = Context()
d = c.sqrt(Decimal(10))
self.assertEqual(c.sqrt(10), d)
self.assertRaises(TypeError, c.sqrt, '10')
def test_same_quantum(self):
c = Context()
d = c.same_quantum(Decimal(1), Decimal(2))
self.assertEqual(c.same_quantum(1, 2), d)
self.assertEqual(c.same_quantum(Decimal(1), 2), d)
self.assertEqual(c.same_quantum(1, Decimal(2)), d)
self.assertRaises(TypeError, c.same_quantum, '1', 2)
self.assertRaises(TypeError, c.same_quantum, 1, '2')
def test_scaleb(self):
c = Context()
d = c.scaleb(Decimal(1), Decimal(2))
self.assertEqual(c.scaleb(1, 2), d)
self.assertEqual(c.scaleb(Decimal(1), 2), d)
self.assertEqual(c.scaleb(1, Decimal(2)), d)
self.assertRaises(TypeError, c.scaleb, '1', 2)
self.assertRaises(TypeError, c.scaleb, 1, '2')
def test_shift(self):
c = Context()
d = c.shift(Decimal(1), Decimal(2))
self.assertEqual(c.shift(1, 2), d)
self.assertEqual(c.shift(Decimal(1), 2), d)
self.assertEqual(c.shift(1, Decimal(2)), d)
self.assertRaises(TypeError, c.shift, '1', 2)
self.assertRaises(TypeError, c.shift, 1, '2')
def test_subtract(self):
c = Context()
d = c.subtract(Decimal(1), Decimal(2))
self.assertEqual(c.subtract(1, 2), d)
self.assertEqual(c.subtract(Decimal(1), 2), d)
self.assertEqual(c.subtract(1, Decimal(2)), d)
self.assertRaises(TypeError, c.subtract, '1', 2)
self.assertRaises(TypeError, c.subtract, 1, '2')
def test_to_eng_string(self):
c = Context()
d = c.to_eng_string(Decimal(10))
self.assertEqual(c.to_eng_string(10), d)
self.assertRaises(TypeError, c.to_eng_string, '10')
def test_to_sci_string(self):
c = Context()
d = c.to_sci_string(Decimal(10))
self.assertEqual(c.to_sci_string(10), d)
self.assertRaises(TypeError, c.to_sci_string, '10')
def test_to_integral_exact(self):
c = Context()
d = c.to_integral_exact(Decimal(10))
self.assertEqual(c.to_integral_exact(10), d)
self.assertRaises(TypeError, c.to_integral_exact, '10')
def test_to_integral_value(self):
c = Context()
d = c.to_integral_value(Decimal(10))
self.assertEqual(c.to_integral_value(10), d)
self.assertRaises(TypeError, c.to_integral_value, '10')
class WithStatementTest(unittest.TestCase):
# Can't do these as docstrings until Python 2.6
# as doctest can't handle __future__ statements
def test_localcontext(self):
# Use a copy of the current context in the block
orig_ctx = getcontext()
with localcontext() as enter_ctx:
set_ctx = getcontext()
final_ctx = getcontext()
self.assertIs(orig_ctx, final_ctx, 'did not restore context correctly')
self.assertIsNot(orig_ctx, set_ctx, 'did not copy the context')
self.assertIs(set_ctx, enter_ctx, '__enter__ returned wrong context')
def test_localcontextarg(self):
# Use a copy of the supplied context in the block
orig_ctx = getcontext()
new_ctx = Context(prec=42)
with localcontext(new_ctx) as enter_ctx:
set_ctx = getcontext()
final_ctx = getcontext()
self.assertIs(orig_ctx, final_ctx, 'did not restore context correctly')
self.assertEqual(set_ctx.prec, new_ctx.prec, 'did not set correct context')
self.assertIsNot(new_ctx, set_ctx, 'did not copy the context')
self.assertIs(set_ctx, enter_ctx, '__enter__ returned wrong context')
class ContextFlags(unittest.TestCase):
def test_flags_irrelevant(self):
# check that the result (numeric result + flags raised) of an
# arithmetic operation doesn't depend on the current flags
context = Context(prec=9, Emin = -999999999, Emax = 999999999,
rounding=ROUND_HALF_EVEN, traps=[], flags=[])
# operations that raise various flags, in the form (function, arglist)
operations = [
(context._apply, [Decimal("100E-1000000009")]),
(context.sqrt, [Decimal(2)]),
(context.add, [Decimal("1.23456789"), Decimal("9.87654321")]),
(context.multiply, [Decimal("1.23456789"), Decimal("9.87654321")]),
(context.subtract, [Decimal("1.23456789"), Decimal("9.87654321")]),
]
# try various flags individually, then a whole lot at once
flagsets = [[Inexact], [Rounded], [Underflow], [Clamped], [Subnormal],
[Inexact, Rounded, Underflow, Clamped, Subnormal]]
for fn, args in operations:
# find answer and flags raised using a clean context
context.clear_flags()
ans = fn(*args)
flags = [k for k, v in context.flags.items() if v]
for extra_flags in flagsets:
# set flags, before calling operation
context.clear_flags()
for flag in extra_flags:
context._raise_error(flag)
new_ans = fn(*args)
# flags that we expect to be set after the operation
expected_flags = list(flags)
for flag in extra_flags:
if flag not in expected_flags:
expected_flags.append(flag)
expected_flags.sort(key=id)
# flags we actually got
new_flags = [k for k,v in context.flags.items() if v]
new_flags.sort(key=id)
self.assertEqual(ans, new_ans,
"operation produces different answers depending on flags set: " +
"expected %s, got %s." % (ans, new_ans))
self.assertEqual(new_flags, expected_flags,
"operation raises different flags depending on flags set: " +
"expected %s, got %s" % (expected_flags, new_flags))
def test_main(arith=False, verbose=None, todo_tests=None, debug=None):
""" Execute the tests.
Runs all arithmetic tests if arith is True or if the "decimal" resource
is enabled in regrtest.py
"""
init()
global TEST_ALL, DEBUG
TEST_ALL = arith or is_resource_enabled('decimal')
DEBUG = debug
if todo_tests is None:
test_classes = [
DecimalExplicitConstructionTest,
DecimalImplicitConstructionTest,
DecimalArithmeticOperatorsTest,
DecimalFormatTest,
DecimalUseOfContextTest,
DecimalUsabilityTest,
DecimalPythonAPItests,
ContextAPItests,
DecimalTest,
WithStatementTest,
ContextFlags
]
else:
test_classes = [DecimalTest]
# Dynamically build custom test definition for each file in the test
# directory and add the definitions to the DecimalTest class. This
# procedure insures that new files do not get skipped.
for filename in os.listdir(directory):
if '.decTest' not in filename or filename.startswith("."):
continue
head, tail = filename.split('.')
if todo_tests is not None and head not in todo_tests:
continue
tester = lambda self, f=filename: self.eval_file(directory + f)
setattr(DecimalTest, 'test_' + head, tester)
del filename, head, tail, tester
try:
run_unittest(*test_classes)
if todo_tests is None:
import decimal as DecimalModule
run_doctest(DecimalModule, verbose)
finally:
setcontext(ORIGINAL_CONTEXT)
if __name__ == '__main__':
import optparse
p = optparse.OptionParser("test_decimal.py [--debug] [{--skip | test1 [test2 [...]]}]")
p.add_option('--debug', '-d', action='store_true', help='shows the test number and context before each test')
p.add_option('--skip', '-s', action='store_true', help='skip over 90% of the arithmetic tests')
(opt, args) = p.parse_args()
if opt.skip:
test_main(arith=False, verbose=True)
elif args:
test_main(arith=True, verbose=True, todo_tests=args, debug=opt.debug)
else:
test_main(arith=True, verbose=True)
|
test_functools.py
|
import abc
import builtins
import collections
import collections.abc
import copy
from itertools import permutations
import pickle
from random import choice
import sys
from test import support
import threading
import time
import typing
import unittest
import unittest.mock
from weakref import proxy
import contextlib
import functools
py_functools = support.import_fresh_module('functools', blocked=['_functools'])
c_functools = functools
# pypy: was:
# c_functools = support.import_fresh_module('functools', fresh=['_functools'])
# but this creates confusion for pickle because on pypy, _functools is a
# pure python module, whereas on CPython it is C (and so not really
# re-importable)
decimal = support.import_fresh_module('decimal', fresh=['_decimal'])
@contextlib.contextmanager
def replaced_module(name, replacement):
original_module = sys.modules[name]
sys.modules[name] = replacement
try:
yield
finally:
sys.modules[name] = original_module
def capture(*args, **kw):
"""capture all positional and keyword arguments"""
return args, kw
def signature(part):
""" return the signature of a partial object """
return (part.func, part.args, part.keywords, part.__dict__)
class MyTuple(tuple):
pass
class BadTuple(tuple):
def __add__(self, other):
return list(self) + list(other)
class MyDict(dict):
pass
class TestPartial:
def test_basic_examples(self):
p = self.partial(capture, 1, 2, a=10, b=20)
self.assertTrue(callable(p))
self.assertEqual(p(3, 4, b=30, c=40),
((1, 2, 3, 4), dict(a=10, b=30, c=40)))
p = self.partial(map, lambda x: x*10)
self.assertEqual(list(p([1,2,3,4])), [10, 20, 30, 40])
def test_attributes(self):
p = self.partial(capture, 1, 2, a=10, b=20)
# attributes should be readable
self.assertEqual(p.func, capture)
self.assertEqual(p.args, (1, 2))
self.assertEqual(p.keywords, dict(a=10, b=20))
def test_argument_checking(self):
self.assertRaises(TypeError, self.partial) # need at least a func arg
try:
self.partial(2)()
except TypeError:
pass
else:
self.fail('First arg not checked for callability')
def test_protection_of_callers_dict_argument(self):
# a caller's dictionary should not be altered by partial
def func(a=10, b=20):
return a
d = {'a':3}
p = self.partial(func, a=5)
self.assertEqual(p(**d), 3)
self.assertEqual(d, {'a':3})
p(b=7)
self.assertEqual(d, {'a':3})
def test_kwargs_copy(self):
# Issue #29532: Altering a kwarg dictionary passed to a constructor
# should not affect a partial object after creation
d = {'a': 3}
p = self.partial(capture, **d)
self.assertEqual(p(), ((), {'a': 3}))
d['a'] = 5
self.assertEqual(p(), ((), {'a': 3}))
def test_arg_combinations(self):
# exercise special code paths for zero args in either partial
# object or the caller
p = self.partial(capture)
self.assertEqual(p(), ((), {}))
self.assertEqual(p(1,2), ((1,2), {}))
p = self.partial(capture, 1, 2)
self.assertEqual(p(), ((1,2), {}))
self.assertEqual(p(3,4), ((1,2,3,4), {}))
def test_kw_combinations(self):
# exercise special code paths for no keyword args in
# either the partial object or the caller
p = self.partial(capture)
self.assertEqual(p.keywords, {})
self.assertEqual(p(), ((), {}))
self.assertEqual(p(a=1), ((), {'a':1}))
p = self.partial(capture, a=1)
self.assertEqual(p.keywords, {'a':1})
self.assertEqual(p(), ((), {'a':1}))
self.assertEqual(p(b=2), ((), {'a':1, 'b':2}))
# keyword args in the call override those in the partial object
self.assertEqual(p(a=3, b=2), ((), {'a':3, 'b':2}))
def test_positional(self):
# make sure positional arguments are captured correctly
for args in [(), (0,), (0,1), (0,1,2), (0,1,2,3)]:
p = self.partial(capture, *args)
expected = args + ('x',)
got, empty = p('x')
self.assertTrue(expected == got and empty == {})
def test_keyword(self):
# make sure keyword arguments are captured correctly
for a in ['a', 0, None, 3.5]:
p = self.partial(capture, a=a)
expected = {'a':a,'x':None}
empty, got = p(x=None)
self.assertTrue(expected == got and empty == ())
def test_no_side_effects(self):
# make sure there are no side effects that affect subsequent calls
p = self.partial(capture, 0, a=1)
args1, kw1 = p(1, b=2)
self.assertTrue(args1 == (0,1) and kw1 == {'a':1,'b':2})
args2, kw2 = p()
self.assertTrue(args2 == (0,) and kw2 == {'a':1})
def test_error_propagation(self):
def f(x, y):
x / y
self.assertRaises(ZeroDivisionError, self.partial(f, 1, 0))
self.assertRaises(ZeroDivisionError, self.partial(f, 1), 0)
self.assertRaises(ZeroDivisionError, self.partial(f), 1, 0)
self.assertRaises(ZeroDivisionError, self.partial(f, y=0), 1)
def test_weakref(self):
f = self.partial(int, base=16)
p = proxy(f)
self.assertEqual(f.func, p.func)
f = None
support.gc_collect()
self.assertRaises(ReferenceError, getattr, p, 'func')
def test_with_bound_and_unbound_methods(self):
data = list(map(str, range(10)))
join = self.partial(str.join, '')
self.assertEqual(join(data), '0123456789')
join = self.partial(''.join)
self.assertEqual(join(data), '0123456789')
def test_nested_optimization(self):
partial = self.partial
inner = partial(signature, 'asdf')
nested = partial(inner, bar=True)
flat = partial(signature, 'asdf', bar=True)
self.assertEqual(signature(nested), signature(flat))
def test_nested_partial_with_attribute(self):
# see issue 25137
partial = self.partial
def foo(bar):
return bar
p = partial(foo, 'first')
p2 = partial(p, 'second')
p2.new_attr = 'spam'
self.assertEqual(p2.new_attr, 'spam')
def test_repr(self):
args = (object(), object())
args_repr = ', '.join(repr(a) for a in args)
kwargs = {'a': object(), 'b': object()}
kwargs_reprs = ['a={a!r}, b={b!r}'.format_map(kwargs),
'b={b!r}, a={a!r}'.format_map(kwargs)]
if self.partial in (c_functools.partial, py_functools.partial):
name = 'functools.partial'
else:
name = self.partial.__name__
f = self.partial(capture)
self.assertEqual(f'{name}({capture!r})', repr(f))
f = self.partial(capture, *args)
self.assertEqual(f'{name}({capture!r}, {args_repr})', repr(f))
f = self.partial(capture, **kwargs)
self.assertIn(repr(f),
[f'{name}({capture!r}, {kwargs_repr})'
for kwargs_repr in kwargs_reprs])
f = self.partial(capture, *args, **kwargs)
self.assertIn(repr(f),
[f'{name}({capture!r}, {args_repr}, {kwargs_repr})'
for kwargs_repr in kwargs_reprs])
def test_recursive_repr(self):
if self.partial in (c_functools.partial, py_functools.partial):
name = 'functools.partial'
else:
name = self.partial.__name__
f = self.partial(capture)
f.__setstate__((f, (), {}, {}))
try:
self.assertEqual(repr(f), '%s(...)' % (name,))
finally:
f.__setstate__((capture, (), {}, {}))
f = self.partial(capture)
f.__setstate__((capture, (f,), {}, {}))
try:
self.assertEqual(repr(f), '%s(%r, ...)' % (name, capture,))
finally:
f.__setstate__((capture, (), {}, {}))
f = self.partial(capture)
f.__setstate__((capture, (), {'a': f}, {}))
try:
self.assertEqual(repr(f), '%s(%r, a=...)' % (name, capture,))
finally:
f.__setstate__((capture, (), {}, {}))
def test_pickle(self):
with self.AllowPickle():
f = self.partial(signature, ['asdf'], bar=[True])
f.attr = []
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
f_copy = pickle.loads(pickle.dumps(f, proto))
self.assertEqual(signature(f_copy), signature(f))
def test_copy(self):
f = self.partial(signature, ['asdf'], bar=[True])
f.attr = []
f_copy = copy.copy(f)
self.assertEqual(signature(f_copy), signature(f))
self.assertIs(f_copy.attr, f.attr)
self.assertIs(f_copy.args, f.args)
self.assertIs(f_copy.keywords, f.keywords)
def test_deepcopy(self):
f = self.partial(signature, ['asdf'], bar=[True])
f.attr = []
f_copy = copy.deepcopy(f)
self.assertEqual(signature(f_copy), signature(f))
self.assertIsNot(f_copy.attr, f.attr)
self.assertIsNot(f_copy.args, f.args)
self.assertIsNot(f_copy.args[0], f.args[0])
self.assertIsNot(f_copy.keywords, f.keywords)
self.assertIsNot(f_copy.keywords['bar'], f.keywords['bar'])
def test_setstate(self):
f = self.partial(signature)
f.__setstate__((capture, (1,), dict(a=10), dict(attr=[])))
self.assertEqual(signature(f),
(capture, (1,), dict(a=10), dict(attr=[])))
self.assertEqual(f(2, b=20), ((1, 2), {'a': 10, 'b': 20}))
f.__setstate__((capture, (1,), dict(a=10), None))
self.assertEqual(signature(f), (capture, (1,), dict(a=10), {}))
self.assertEqual(f(2, b=20), ((1, 2), {'a': 10, 'b': 20}))
f.__setstate__((capture, (1,), None, None))
#self.assertEqual(signature(f), (capture, (1,), {}, {}))
self.assertEqual(f(2, b=20), ((1, 2), {'b': 20}))
self.assertEqual(f(2), ((1, 2), {}))
self.assertEqual(f(), ((1,), {}))
f.__setstate__((capture, (), {}, None))
self.assertEqual(signature(f), (capture, (), {}, {}))
self.assertEqual(f(2, b=20), ((2,), {'b': 20}))
self.assertEqual(f(2), ((2,), {}))
self.assertEqual(f(), ((), {}))
def test_setstate_errors(self):
f = self.partial(signature)
self.assertRaises(TypeError, f.__setstate__, (capture, (), {}))
self.assertRaises(TypeError, f.__setstate__, (capture, (), {}, {}, None))
self.assertRaises(TypeError, f.__setstate__, [capture, (), {}, None])
self.assertRaises(TypeError, f.__setstate__, (None, (), {}, None))
self.assertRaises(TypeError, f.__setstate__, (capture, None, {}, None))
self.assertRaises(TypeError, f.__setstate__, (capture, [], {}, None))
self.assertRaises(TypeError, f.__setstate__, (capture, (), [], None))
def test_setstate_subclasses(self):
f = self.partial(signature)
f.__setstate__((capture, MyTuple((1,)), MyDict(a=10), None))
s = signature(f)
self.assertEqual(s, (capture, (1,), dict(a=10), {}))
self.assertIs(type(s[1]), tuple)
self.assertIs(type(s[2]), dict)
r = f()
self.assertEqual(r, ((1,), {'a': 10}))
self.assertIs(type(r[0]), tuple)
self.assertIs(type(r[1]), dict)
f.__setstate__((capture, BadTuple((1,)), {}, None))
s = signature(f)
self.assertEqual(s, (capture, (1,), {}, {}))
self.assertIs(type(s[1]), tuple)
r = f(2)
self.assertEqual(r, ((1, 2), {}))
self.assertIs(type(r[0]), tuple)
def test_recursive_pickle(self):
with self.AllowPickle():
f = self.partial(capture)
f.__setstate__((f, (), {}, {}))
try:
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
with self.assertRaises(RecursionError):
pickle.dumps(f, proto)
finally:
f.__setstate__((capture, (), {}, {}))
f = self.partial(capture)
f.__setstate__((capture, (f,), {}, {}))
try:
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
f_copy = pickle.loads(pickle.dumps(f, proto))
try:
self.assertIs(f_copy.args[0], f_copy)
finally:
f_copy.__setstate__((capture, (), {}, {}))
finally:
f.__setstate__((capture, (), {}, {}))
f = self.partial(capture)
f.__setstate__((capture, (), {'a': f}, {}))
try:
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
f_copy = pickle.loads(pickle.dumps(f, proto))
try:
self.assertIs(f_copy.keywords['a'], f_copy)
finally:
f_copy.__setstate__((capture, (), {}, {}))
finally:
f.__setstate__((capture, (), {}, {}))
# Issue 6083: Reference counting bug
def test_setstate_refcount(self):
class BadSequence:
def __len__(self):
return 4
def __getitem__(self, key):
if key == 0:
return max
elif key == 1:
return tuple(range(1000000))
elif key in (2, 3):
return {}
raise IndexError
f = self.partial(object)
self.assertRaises(TypeError, f.__setstate__, BadSequence())
@unittest.skipUnless(c_functools, 'requires the C _functools module')
class TestPartialC(TestPartial, unittest.TestCase):
if c_functools:
partial = c_functools.partial
class AllowPickle:
def __enter__(self):
return self
def __exit__(self, type, value, tb):
return False
def test_attributes_unwritable(self):
# attributes should not be writable
p = self.partial(capture, 1, 2, a=10, b=20)
self.assertRaises(AttributeError, setattr, p, 'func', map)
self.assertRaises(AttributeError, setattr, p, 'args', (1, 2))
self.assertRaises(AttributeError, setattr, p, 'keywords', dict(a=1, b=2))
p = self.partial(hex)
try:
del p.__dict__
except TypeError:
pass
else:
self.fail('partial object allowed __dict__ to be deleted')
def test_manually_adding_non_string_keyword(self):
p = self.partial(capture)
# Adding a non-string/unicode keyword to partial kwargs
p.keywords[1234] = 'value'
r = repr(p)
self.assertIn('1234', r)
self.assertIn("'value'", r)
with self.assertRaises(TypeError):
p()
def test_keystr_replaces_value(self):
p = self.partial(capture)
class MutatesYourDict(object):
def __str__(self):
p.keywords[self] = ['sth2']
return 'astr'
# Replacing the value during key formatting should keep the original
# value alive (at least long enough).
p.keywords[MutatesYourDict()] = ['sth']
r = repr(p)
self.assertIn('astr', r)
self.assertIn("['sth']", r)
class TestPartialPy(TestPartial, unittest.TestCase):
partial = py_functools.partial
class AllowPickle:
def __init__(self):
self._cm = replaced_module("functools", py_functools)
def __enter__(self):
return self._cm.__enter__()
def __exit__(self, type, value, tb):
return self._cm.__exit__(type, value, tb)
if c_functools:
class CPartialSubclass(c_functools.partial):
pass
class PyPartialSubclass(py_functools.partial):
pass
@unittest.skipUnless(c_functools, 'requires the C _functools module')
class TestPartialCSubclass(TestPartialC):
if c_functools:
partial = CPartialSubclass
# partial subclasses are not optimized for nested calls
test_nested_optimization = None
class TestPartialPySubclass(TestPartialPy):
partial = PyPartialSubclass
class TestPartialMethod(unittest.TestCase):
class A(object):
nothing = functools.partialmethod(capture)
positional = functools.partialmethod(capture, 1)
keywords = functools.partialmethod(capture, a=2)
both = functools.partialmethod(capture, 3, b=4)
spec_keywords = functools.partialmethod(capture, self=1, func=2)
nested = functools.partialmethod(positional, 5)
over_partial = functools.partialmethod(functools.partial(capture, c=6), 7)
static = functools.partialmethod(staticmethod(capture), 8)
cls = functools.partialmethod(classmethod(capture), d=9)
a = A()
def test_arg_combinations(self):
self.assertEqual(self.a.nothing(), ((self.a,), {}))
self.assertEqual(self.a.nothing(5), ((self.a, 5), {}))
self.assertEqual(self.a.nothing(c=6), ((self.a,), {'c': 6}))
self.assertEqual(self.a.nothing(5, c=6), ((self.a, 5), {'c': 6}))
self.assertEqual(self.a.positional(), ((self.a, 1), {}))
self.assertEqual(self.a.positional(5), ((self.a, 1, 5), {}))
self.assertEqual(self.a.positional(c=6), ((self.a, 1), {'c': 6}))
self.assertEqual(self.a.positional(5, c=6), ((self.a, 1, 5), {'c': 6}))
self.assertEqual(self.a.keywords(), ((self.a,), {'a': 2}))
self.assertEqual(self.a.keywords(5), ((self.a, 5), {'a': 2}))
self.assertEqual(self.a.keywords(c=6), ((self.a,), {'a': 2, 'c': 6}))
self.assertEqual(self.a.keywords(5, c=6), ((self.a, 5), {'a': 2, 'c': 6}))
self.assertEqual(self.a.both(), ((self.a, 3), {'b': 4}))
self.assertEqual(self.a.both(5), ((self.a, 3, 5), {'b': 4}))
self.assertEqual(self.a.both(c=6), ((self.a, 3), {'b': 4, 'c': 6}))
self.assertEqual(self.a.both(5, c=6), ((self.a, 3, 5), {'b': 4, 'c': 6}))
self.assertEqual(self.A.both(self.a, 5, c=6), ((self.a, 3, 5), {'b': 4, 'c': 6}))
self.assertEqual(self.a.spec_keywords(), ((self.a,), {'self': 1, 'func': 2}))
def test_nested(self):
self.assertEqual(self.a.nested(), ((self.a, 1, 5), {}))
self.assertEqual(self.a.nested(6), ((self.a, 1, 5, 6), {}))
self.assertEqual(self.a.nested(d=7), ((self.a, 1, 5), {'d': 7}))
self.assertEqual(self.a.nested(6, d=7), ((self.a, 1, 5, 6), {'d': 7}))
self.assertEqual(self.A.nested(self.a, 6, d=7), ((self.a, 1, 5, 6), {'d': 7}))
def test_over_partial(self):
self.assertEqual(self.a.over_partial(), ((self.a, 7), {'c': 6}))
self.assertEqual(self.a.over_partial(5), ((self.a, 7, 5), {'c': 6}))
self.assertEqual(self.a.over_partial(d=8), ((self.a, 7), {'c': 6, 'd': 8}))
self.assertEqual(self.a.over_partial(5, d=8), ((self.a, 7, 5), {'c': 6, 'd': 8}))
self.assertEqual(self.A.over_partial(self.a, 5, d=8), ((self.a, 7, 5), {'c': 6, 'd': 8}))
def test_bound_method_introspection(self):
obj = self.a
self.assertIs(obj.both.__self__, obj)
self.assertIs(obj.nested.__self__, obj)
self.assertIs(obj.over_partial.__self__, obj)
self.assertIs(obj.cls.__self__, self.A)
self.assertIs(self.A.cls.__self__, self.A)
def test_unbound_method_retrieval(self):
obj = self.A
self.assertFalse(hasattr(obj.both, "__self__"))
self.assertFalse(hasattr(obj.nested, "__self__"))
self.assertFalse(hasattr(obj.over_partial, "__self__"))
self.assertFalse(hasattr(obj.static, "__self__"))
self.assertFalse(hasattr(self.a.static, "__self__"))
def test_descriptors(self):
for obj in [self.A, self.a]:
with self.subTest(obj=obj):
self.assertEqual(obj.static(), ((8,), {}))
self.assertEqual(obj.static(5), ((8, 5), {}))
self.assertEqual(obj.static(d=8), ((8,), {'d': 8}))
self.assertEqual(obj.static(5, d=8), ((8, 5), {'d': 8}))
self.assertEqual(obj.cls(), ((self.A,), {'d': 9}))
self.assertEqual(obj.cls(5), ((self.A, 5), {'d': 9}))
self.assertEqual(obj.cls(c=8), ((self.A,), {'c': 8, 'd': 9}))
self.assertEqual(obj.cls(5, c=8), ((self.A, 5), {'c': 8, 'd': 9}))
def test_overriding_keywords(self):
self.assertEqual(self.a.keywords(a=3), ((self.a,), {'a': 3}))
self.assertEqual(self.A.keywords(self.a, a=3), ((self.a,), {'a': 3}))
def test_invalid_args(self):
with self.assertRaises(TypeError):
class B(object):
method = functools.partialmethod(None, 1)
with self.assertRaises(TypeError):
class B:
method = functools.partialmethod()
with self.assertWarns(DeprecationWarning):
class B:
method = functools.partialmethod(func=capture, a=1)
b = B()
self.assertEqual(b.method(2, x=3), ((b, 2), {'a': 1, 'x': 3}))
def test_repr(self):
self.assertEqual(repr(vars(self.A)['both']),
'functools.partialmethod({}, 3, b=4)'.format(capture))
def test_abstract(self):
class Abstract(abc.ABCMeta):
@abc.abstractmethod
def add(self, x, y):
pass
add5 = functools.partialmethod(add, 5)
self.assertTrue(Abstract.add.__isabstractmethod__)
self.assertTrue(Abstract.add5.__isabstractmethod__)
for func in [self.A.static, self.A.cls, self.A.over_partial, self.A.nested, self.A.both]:
self.assertFalse(getattr(func, '__isabstractmethod__', False))
def test_positional_only(self):
def f(a, b, /):
return a + b
p = functools.partial(f, 1)
self.assertEqual(p(2), f(1, 2))
class TestUpdateWrapper(unittest.TestCase):
def check_wrapper(self, wrapper, wrapped,
assigned=functools.WRAPPER_ASSIGNMENTS,
updated=functools.WRAPPER_UPDATES):
# Check attributes were assigned
for name in assigned:
self.assertTrue(getattr(wrapper, name) == getattr(wrapped, name))
# Check attributes were updated
for name in updated:
wrapper_attr = getattr(wrapper, name)
wrapped_attr = getattr(wrapped, name)
for key in wrapped_attr:
if name == "__dict__" and key == "__wrapped__":
# __wrapped__ is overwritten by the update code
continue
self.assertIs(wrapped_attr[key], wrapper_attr[key])
# Check __wrapped__
self.assertIs(wrapper.__wrapped__, wrapped)
def _default_update(self):
def f(a:'This is a new annotation'):
"""This is a test"""
pass
f.attr = 'This is also a test'
f.__wrapped__ = "This is a bald faced lie"
def wrapper(b:'This is the prior annotation'):
pass
functools.update_wrapper(wrapper, f)
return wrapper, f
def test_default_update(self):
wrapper, f = self._default_update()
self.check_wrapper(wrapper, f)
self.assertIs(wrapper.__wrapped__, f)
self.assertEqual(wrapper.__name__, 'f')
self.assertEqual(wrapper.__qualname__, f.__qualname__)
self.assertEqual(wrapper.attr, 'This is also a test')
self.assertEqual(wrapper.__annotations__['a'], 'This is a new annotation')
self.assertNotIn('b', wrapper.__annotations__)
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_default_update_doc(self):
wrapper, f = self._default_update()
self.assertEqual(wrapper.__doc__, 'This is a test')
def test_no_update(self):
def f():
"""This is a test"""
pass
f.attr = 'This is also a test'
def wrapper():
pass
functools.update_wrapper(wrapper, f, (), ())
self.check_wrapper(wrapper, f, (), ())
self.assertEqual(wrapper.__name__, 'wrapper')
self.assertNotEqual(wrapper.__qualname__, f.__qualname__)
self.assertEqual(wrapper.__doc__, None)
self.assertEqual(wrapper.__annotations__, {})
self.assertFalse(hasattr(wrapper, 'attr'))
def test_selective_update(self):
def f():
pass
f.attr = 'This is a different test'
f.dict_attr = dict(a=1, b=2, c=3)
def wrapper():
pass
wrapper.dict_attr = {}
assign = ('attr',)
update = ('dict_attr',)
functools.update_wrapper(wrapper, f, assign, update)
self.check_wrapper(wrapper, f, assign, update)
self.assertEqual(wrapper.__name__, 'wrapper')
self.assertNotEqual(wrapper.__qualname__, f.__qualname__)
self.assertEqual(wrapper.__doc__, None)
self.assertEqual(wrapper.attr, 'This is a different test')
self.assertEqual(wrapper.dict_attr, f.dict_attr)
def test_missing_attributes(self):
def f():
pass
def wrapper():
pass
wrapper.dict_attr = {}
assign = ('attr',)
update = ('dict_attr',)
# Missing attributes on wrapped object are ignored
functools.update_wrapper(wrapper, f, assign, update)
self.assertNotIn('attr', wrapper.__dict__)
self.assertEqual(wrapper.dict_attr, {})
# Wrapper must have expected attributes for updating
del wrapper.dict_attr
with self.assertRaises(AttributeError):
functools.update_wrapper(wrapper, f, assign, update)
wrapper.dict_attr = 1
with self.assertRaises(AttributeError):
functools.update_wrapper(wrapper, f, assign, update)
@support.requires_docstrings
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_builtin_update(self):
# Test for bug #1576241
def wrapper():
pass
functools.update_wrapper(wrapper, max)
self.assertEqual(wrapper.__name__, 'max')
self.assertTrue(wrapper.__doc__.startswith('max('))
self.assertEqual(wrapper.__annotations__, {})
class TestWraps(TestUpdateWrapper):
def _default_update(self):
def f():
"""This is a test"""
pass
f.attr = 'This is also a test'
f.__wrapped__ = "This is still a bald faced lie"
@functools.wraps(f)
def wrapper():
pass
return wrapper, f
def test_default_update(self):
wrapper, f = self._default_update()
self.check_wrapper(wrapper, f)
self.assertEqual(wrapper.__name__, 'f')
self.assertEqual(wrapper.__qualname__, f.__qualname__)
self.assertEqual(wrapper.attr, 'This is also a test')
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_default_update_doc(self):
wrapper, _ = self._default_update()
self.assertEqual(wrapper.__doc__, 'This is a test')
def test_no_update(self):
def f():
"""This is a test"""
pass
f.attr = 'This is also a test'
@functools.wraps(f, (), ())
def wrapper():
pass
self.check_wrapper(wrapper, f, (), ())
self.assertEqual(wrapper.__name__, 'wrapper')
self.assertNotEqual(wrapper.__qualname__, f.__qualname__)
self.assertEqual(wrapper.__doc__, None)
self.assertFalse(hasattr(wrapper, 'attr'))
def test_selective_update(self):
def f():
pass
f.attr = 'This is a different test'
f.dict_attr = dict(a=1, b=2, c=3)
def add_dict_attr(f):
f.dict_attr = {}
return f
assign = ('attr',)
update = ('dict_attr',)
@functools.wraps(f, assign, update)
@add_dict_attr
def wrapper():
pass
self.check_wrapper(wrapper, f, assign, update)
self.assertEqual(wrapper.__name__, 'wrapper')
self.assertNotEqual(wrapper.__qualname__, f.__qualname__)
self.assertEqual(wrapper.__doc__, None)
self.assertEqual(wrapper.attr, 'This is a different test')
self.assertEqual(wrapper.dict_attr, f.dict_attr)
class TestReduce:
def test_reduce(self):
class Squares:
def __init__(self, max):
self.max = max
self.sofar = []
def __len__(self):
return len(self.sofar)
def __getitem__(self, i):
if not 0 <= i < self.max: raise IndexError
n = len(self.sofar)
while n <= i:
self.sofar.append(n*n)
n += 1
return self.sofar[i]
def add(x, y):
return x + y
self.assertEqual(self.reduce(add, ['a', 'b', 'c'], ''), 'abc')
self.assertEqual(
self.reduce(add, [['a', 'c'], [], ['d', 'w']], []),
['a','c','d','w']
)
self.assertEqual(self.reduce(lambda x, y: x*y, range(2,8), 1), 5040)
self.assertEqual(
self.reduce(lambda x, y: x*y, range(2,21), 1),
2432902008176640000
)
self.assertEqual(self.reduce(add, Squares(10)), 285)
self.assertEqual(self.reduce(add, Squares(10), 0), 285)
self.assertEqual(self.reduce(add, Squares(0), 0), 0)
self.assertRaises(TypeError, self.reduce)
self.assertRaises(TypeError, self.reduce, 42, 42)
self.assertRaises(TypeError, self.reduce, 42, 42, 42)
self.assertEqual(self.reduce(42, "1"), "1") # func is never called with one item
self.assertEqual(self.reduce(42, "", "1"), "1") # func is never called with one item
self.assertRaises(TypeError, self.reduce, 42, (42, 42))
self.assertRaises(TypeError, self.reduce, add, []) # arg 2 must not be empty sequence with no initial value
self.assertRaises(TypeError, self.reduce, add, "")
self.assertRaises(TypeError, self.reduce, add, ())
self.assertRaises(TypeError, self.reduce, add, object())
class TestFailingIter:
def __iter__(self):
raise RuntimeError
self.assertRaises(RuntimeError, self.reduce, add, TestFailingIter())
self.assertEqual(self.reduce(add, [], None), None)
self.assertEqual(self.reduce(add, [], 42), 42)
class BadSeq:
def __getitem__(self, index):
raise ValueError
self.assertRaises(ValueError, self.reduce, 42, BadSeq())
# Test reduce()'s use of iterators.
def test_iterator_usage(self):
class SequenceClass:
def __init__(self, n):
self.n = n
def __getitem__(self, i):
if 0 <= i < self.n:
return i
else:
raise IndexError
from operator import add
self.assertEqual(self.reduce(add, SequenceClass(5)), 10)
self.assertEqual(self.reduce(add, SequenceClass(5), 42), 52)
self.assertRaises(TypeError, self.reduce, add, SequenceClass(0))
self.assertEqual(self.reduce(add, SequenceClass(0), 42), 42)
self.assertEqual(self.reduce(add, SequenceClass(1)), 0)
self.assertEqual(self.reduce(add, SequenceClass(1), 42), 42)
d = {"one": 1, "two": 2, "three": 3}
self.assertEqual(self.reduce(add, d), "".join(d.keys()))
@unittest.skipUnless(c_functools, 'requires the C _functools module')
class TestReduceC(TestReduce, unittest.TestCase):
if c_functools:
reduce = c_functools.reduce
class TestReducePy(TestReduce, unittest.TestCase):
reduce = staticmethod(py_functools.reduce)
class TestCmpToKey:
def test_cmp_to_key(self):
def cmp1(x, y):
return (x > y) - (x < y)
key = self.cmp_to_key(cmp1)
self.assertEqual(key(3), key(3))
self.assertGreater(key(3), key(1))
self.assertGreaterEqual(key(3), key(3))
def cmp2(x, y):
return int(x) - int(y)
key = self.cmp_to_key(cmp2)
self.assertEqual(key(4.0), key('4'))
self.assertLess(key(2), key('35'))
self.assertLessEqual(key(2), key('35'))
self.assertNotEqual(key(2), key('35'))
def test_cmp_to_key_arguments(self):
def cmp1(x, y):
return (x > y) - (x < y)
key = self.cmp_to_key(mycmp=cmp1)
self.assertEqual(key(obj=3), key(obj=3))
self.assertGreater(key(obj=3), key(obj=1))
with self.assertRaises((TypeError, AttributeError)):
key(3) > 1 # rhs is not a K object
with self.assertRaises((TypeError, AttributeError)):
1 < key(3) # lhs is not a K object
with self.assertRaises(TypeError):
key = self.cmp_to_key() # too few args
with self.assertRaises(TypeError):
key = self.cmp_to_key(cmp1, None) # too many args
key = self.cmp_to_key(cmp1)
with self.assertRaises(TypeError):
key() # too few args
with self.assertRaises(TypeError):
key(None, None) # too many args
def test_bad_cmp(self):
def cmp1(x, y):
raise ZeroDivisionError
key = self.cmp_to_key(cmp1)
with self.assertRaises(ZeroDivisionError):
key(3) > key(1)
class BadCmp:
def __lt__(self, other):
raise ZeroDivisionError
def cmp1(x, y):
return BadCmp()
with self.assertRaises(ZeroDivisionError):
key(3) > key(1)
def test_obj_field(self):
def cmp1(x, y):
return (x > y) - (x < y)
key = self.cmp_to_key(mycmp=cmp1)
self.assertEqual(key(50).obj, 50)
def test_sort_int(self):
def mycmp(x, y):
return y - x
self.assertEqual(sorted(range(5), key=self.cmp_to_key(mycmp)),
[4, 3, 2, 1, 0])
def test_sort_int_str(self):
def mycmp(x, y):
x, y = int(x), int(y)
return (x > y) - (x < y)
values = [5, '3', 7, 2, '0', '1', 4, '10', 1]
values = sorted(values, key=self.cmp_to_key(mycmp))
self.assertEqual([int(value) for value in values],
[0, 1, 1, 2, 3, 4, 5, 7, 10])
def test_hash(self):
def mycmp(x, y):
return y - x
key = self.cmp_to_key(mycmp)
k = key(10)
self.assertRaises(TypeError, hash, k)
self.assertNotIsInstance(k, collections.abc.Hashable)
@unittest.skipUnless(c_functools, 'requires the C _functools module')
class TestCmpToKeyC(TestCmpToKey, unittest.TestCase):
if c_functools:
cmp_to_key = c_functools.cmp_to_key
class TestCmpToKeyPy(TestCmpToKey, unittest.TestCase):
cmp_to_key = staticmethod(py_functools.cmp_to_key)
class TestTotalOrdering(unittest.TestCase):
def test_total_ordering_lt(self):
@functools.total_ordering
class A:
def __init__(self, value):
self.value = value
def __lt__(self, other):
return self.value < other.value
def __eq__(self, other):
return self.value == other.value
self.assertTrue(A(1) < A(2))
self.assertTrue(A(2) > A(1))
self.assertTrue(A(1) <= A(2))
self.assertTrue(A(2) >= A(1))
self.assertTrue(A(2) <= A(2))
self.assertTrue(A(2) >= A(2))
self.assertFalse(A(1) > A(2))
def test_total_ordering_le(self):
@functools.total_ordering
class A:
def __init__(self, value):
self.value = value
def __le__(self, other):
return self.value <= other.value
def __eq__(self, other):
return self.value == other.value
self.assertTrue(A(1) < A(2))
self.assertTrue(A(2) > A(1))
self.assertTrue(A(1) <= A(2))
self.assertTrue(A(2) >= A(1))
self.assertTrue(A(2) <= A(2))
self.assertTrue(A(2) >= A(2))
self.assertFalse(A(1) >= A(2))
def test_total_ordering_gt(self):
@functools.total_ordering
class A:
def __init__(self, value):
self.value = value
def __gt__(self, other):
return self.value > other.value
def __eq__(self, other):
return self.value == other.value
self.assertTrue(A(1) < A(2))
self.assertTrue(A(2) > A(1))
self.assertTrue(A(1) <= A(2))
self.assertTrue(A(2) >= A(1))
self.assertTrue(A(2) <= A(2))
self.assertTrue(A(2) >= A(2))
self.assertFalse(A(2) < A(1))
def test_total_ordering_ge(self):
@functools.total_ordering
class A:
def __init__(self, value):
self.value = value
def __ge__(self, other):
return self.value >= other.value
def __eq__(self, other):
return self.value == other.value
self.assertTrue(A(1) < A(2))
self.assertTrue(A(2) > A(1))
self.assertTrue(A(1) <= A(2))
self.assertTrue(A(2) >= A(1))
self.assertTrue(A(2) <= A(2))
self.assertTrue(A(2) >= A(2))
self.assertFalse(A(2) <= A(1))
def test_total_ordering_no_overwrite(self):
# new methods should not overwrite existing
@functools.total_ordering
class A(int):
pass
self.assertTrue(A(1) < A(2))
self.assertTrue(A(2) > A(1))
self.assertTrue(A(1) <= A(2))
self.assertTrue(A(2) >= A(1))
self.assertTrue(A(2) <= A(2))
self.assertTrue(A(2) >= A(2))
def test_no_operations_defined(self):
with self.assertRaises(ValueError):
@functools.total_ordering
class A:
pass
def test_type_error_when_not_implemented(self):
# bug 10042; ensure stack overflow does not occur
# when decorated types return NotImplemented
@functools.total_ordering
class ImplementsLessThan:
def __init__(self, value):
self.value = value
def __eq__(self, other):
if isinstance(other, ImplementsLessThan):
return self.value == other.value
return False
def __lt__(self, other):
if isinstance(other, ImplementsLessThan):
return self.value < other.value
return NotImplemented
@functools.total_ordering
class ImplementsGreaterThan:
def __init__(self, value):
self.value = value
def __eq__(self, other):
if isinstance(other, ImplementsGreaterThan):
return self.value == other.value
return False
def __gt__(self, other):
if isinstance(other, ImplementsGreaterThan):
return self.value > other.value
return NotImplemented
@functools.total_ordering
class ImplementsLessThanEqualTo:
def __init__(self, value):
self.value = value
def __eq__(self, other):
if isinstance(other, ImplementsLessThanEqualTo):
return self.value == other.value
return False
def __le__(self, other):
if isinstance(other, ImplementsLessThanEqualTo):
return self.value <= other.value
return NotImplemented
@functools.total_ordering
class ImplementsGreaterThanEqualTo:
def __init__(self, value):
self.value = value
def __eq__(self, other):
if isinstance(other, ImplementsGreaterThanEqualTo):
return self.value == other.value
return False
def __ge__(self, other):
if isinstance(other, ImplementsGreaterThanEqualTo):
return self.value >= other.value
return NotImplemented
@functools.total_ordering
class ComparatorNotImplemented:
def __init__(self, value):
self.value = value
def __eq__(self, other):
if isinstance(other, ComparatorNotImplemented):
return self.value == other.value
return False
def __lt__(self, other):
return NotImplemented
with self.subTest("LT < 1"), self.assertRaises(TypeError):
ImplementsLessThan(-1) < 1
with self.subTest("LT < LE"), self.assertRaises(TypeError):
ImplementsLessThan(0) < ImplementsLessThanEqualTo(0)
with self.subTest("LT < GT"), self.assertRaises(TypeError):
ImplementsLessThan(1) < ImplementsGreaterThan(1)
with self.subTest("LE <= LT"), self.assertRaises(TypeError):
ImplementsLessThanEqualTo(2) <= ImplementsLessThan(2)
with self.subTest("LE <= GE"), self.assertRaises(TypeError):
ImplementsLessThanEqualTo(3) <= ImplementsGreaterThanEqualTo(3)
with self.subTest("GT > GE"), self.assertRaises(TypeError):
ImplementsGreaterThan(4) > ImplementsGreaterThanEqualTo(4)
with self.subTest("GT > LT"), self.assertRaises(TypeError):
ImplementsGreaterThan(5) > ImplementsLessThan(5)
with self.subTest("GE >= GT"), self.assertRaises(TypeError):
ImplementsGreaterThanEqualTo(6) >= ImplementsGreaterThan(6)
with self.subTest("GE >= LE"), self.assertRaises(TypeError):
ImplementsGreaterThanEqualTo(7) >= ImplementsLessThanEqualTo(7)
with self.subTest("GE when equal"):
a = ComparatorNotImplemented(8)
b = ComparatorNotImplemented(8)
self.assertEqual(a, b)
with self.assertRaises(TypeError):
a >= b
with self.subTest("LE when equal"):
a = ComparatorNotImplemented(9)
b = ComparatorNotImplemented(9)
self.assertEqual(a, b)
with self.assertRaises(TypeError):
a <= b
def test_pickle(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
for name in '__lt__', '__gt__', '__le__', '__ge__':
with self.subTest(method=name, proto=proto):
method = getattr(Orderable_LT, name)
method_copy = pickle.loads(pickle.dumps(method, proto))
self.assertIs(method_copy, method)
@functools.total_ordering
class Orderable_LT:
def __init__(self, value):
self.value = value
def __lt__(self, other):
return self.value < other.value
def __eq__(self, other):
return self.value == other.value
class TestLRU:
def test_lru(self):
def orig(x, y):
return 3 * x + y
f = self.module.lru_cache(maxsize=20)(orig)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(maxsize, 20)
self.assertEqual(currsize, 0)
self.assertEqual(hits, 0)
self.assertEqual(misses, 0)
domain = range(5)
for i in range(1000):
x, y = choice(domain), choice(domain)
actual = f(x, y)
expected = orig(x, y)
self.assertEqual(actual, expected)
hits, misses, maxsize, currsize = f.cache_info()
self.assertTrue(hits > misses)
self.assertEqual(hits + misses, 1000)
self.assertEqual(currsize, 20)
f.cache_clear() # test clearing
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(hits, 0)
self.assertEqual(misses, 0)
self.assertEqual(currsize, 0)
f(x, y)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(hits, 0)
self.assertEqual(misses, 1)
self.assertEqual(currsize, 1)
# Test bypassing the cache
self.assertIs(f.__wrapped__, orig)
f.__wrapped__(x, y)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(hits, 0)
self.assertEqual(misses, 1)
self.assertEqual(currsize, 1)
# test size zero (which means "never-cache")
@self.module.lru_cache(0)
def f():
nonlocal f_cnt
f_cnt += 1
return 20
self.assertEqual(f.cache_info().maxsize, 0)
f_cnt = 0
for i in range(5):
self.assertEqual(f(), 20)
self.assertEqual(f_cnt, 5)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(hits, 0)
self.assertEqual(misses, 5)
self.assertEqual(currsize, 0)
# test size one
@self.module.lru_cache(1)
def f():
nonlocal f_cnt
f_cnt += 1
return 20
self.assertEqual(f.cache_info().maxsize, 1)
f_cnt = 0
for i in range(5):
self.assertEqual(f(), 20)
self.assertEqual(f_cnt, 1)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(hits, 4)
self.assertEqual(misses, 1)
self.assertEqual(currsize, 1)
# test size two
@self.module.lru_cache(2)
def f(x):
nonlocal f_cnt
f_cnt += 1
return x*10
self.assertEqual(f.cache_info().maxsize, 2)
f_cnt = 0
for x in 7, 9, 7, 9, 7, 9, 8, 8, 8, 9, 9, 9, 8, 8, 8, 7:
# * * * *
self.assertEqual(f(x), x*10)
self.assertEqual(f_cnt, 4)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(hits, 12)
self.assertEqual(misses, 4)
self.assertEqual(currsize, 2)
def test_lru_no_args(self):
@self.module.lru_cache
def square(x):
return x ** 2
self.assertEqual(list(map(square, [10, 20, 10])),
[100, 400, 100])
self.assertEqual(square.cache_info().hits, 1)
self.assertEqual(square.cache_info().misses, 2)
self.assertEqual(square.cache_info().maxsize, 128)
self.assertEqual(square.cache_info().currsize, 2)
def test_lru_bug_35780(self):
# C version of the lru_cache was not checking to see if
# the user function call has already modified the cache
# (this arises in recursive calls and in multi-threading).
# This cause the cache to have orphan links not referenced
# by the cache dictionary.
once = True # Modified by f(x) below
@self.module.lru_cache(maxsize=10)
def f(x):
nonlocal once
rv = f'.{x}.'
if x == 20 and once:
once = False
rv = f(x)
return rv
# Fill the cache
for x in range(15):
self.assertEqual(f(x), f'.{x}.')
self.assertEqual(f.cache_info().currsize, 10)
# Make a recursive call and make sure the cache remains full
self.assertEqual(f(20), '.20.')
self.assertEqual(f.cache_info().currsize, 10)
def test_lru_bug_36650(self):
# C version of lru_cache was treating a call with an empty **kwargs
# dictionary as being distinct from a call with no keywords at all.
# This did not result in an incorrect answer, but it did trigger
# an unexpected cache miss.
@self.module.lru_cache()
def f(x):
pass
f(0)
f(0, **{})
self.assertEqual(f.cache_info().hits, 1)
def test_lru_hash_only_once(self):
# To protect against weird reentrancy bugs and to improve
# efficiency when faced with slow __hash__ methods, the
# LRU cache guarantees that it will only call __hash__
# only once per use as an argument to the cached function.
@self.module.lru_cache(maxsize=1)
def f(x, y):
return x * 3 + y
# Simulate the integer 5
mock_int = unittest.mock.Mock()
mock_int.__mul__ = unittest.mock.Mock(return_value=15)
mock_int.__hash__ = unittest.mock.Mock(return_value=999)
# Add to cache: One use as an argument gives one call
self.assertEqual(f(mock_int, 1), 16)
self.assertEqual(mock_int.__hash__.call_count, 1)
self.assertEqual(f.cache_info(), (0, 1, 1, 1))
# Cache hit: One use as an argument gives one additional call
self.assertEqual(f(mock_int, 1), 16)
self.assertEqual(mock_int.__hash__.call_count, 2)
self.assertEqual(f.cache_info(), (1, 1, 1, 1))
# Cache eviction: No use as an argument gives no additional call
self.assertEqual(f(6, 2), 20)
self.assertEqual(mock_int.__hash__.call_count, 2)
self.assertEqual(f.cache_info(), (1, 2, 1, 1))
# Cache miss: One use as an argument gives one additional call
self.assertEqual(f(mock_int, 1), 16)
self.assertEqual(mock_int.__hash__.call_count, 3)
self.assertEqual(f.cache_info(), (1, 3, 1, 1))
def test_lru_reentrancy_with_len(self):
# Test to make sure the LRU cache code isn't thrown-off by
# caching the built-in len() function. Since len() can be
# cached, we shouldn't use it inside the lru code itself.
old_len = builtins.len
try:
builtins.len = self.module.lru_cache(4)(len)
for i in [0, 0, 1, 2, 3, 3, 4, 5, 6, 1, 7, 2, 1]:
self.assertEqual(len('abcdefghijklmn'[:i]), i)
finally:
builtins.len = old_len
def test_lru_star_arg_handling(self):
# Test regression that arose in ea064ff3c10f
@functools.lru_cache()
def f(*args):
return args
self.assertEqual(f(1, 2), (1, 2))
self.assertEqual(f((1, 2)), ((1, 2),))
def test_lru_type_error(self):
# Regression test for issue #28653.
# lru_cache was leaking when one of the arguments
# wasn't cacheable.
@functools.lru_cache(maxsize=None)
def infinite_cache(o):
pass
@functools.lru_cache(maxsize=10)
def limited_cache(o):
pass
with self.assertRaises(TypeError):
infinite_cache([])
with self.assertRaises(TypeError):
limited_cache([])
def test_lru_with_maxsize_none(self):
@self.module.lru_cache(maxsize=None)
def fib(n):
if n < 2:
return n
return fib(n-1) + fib(n-2)
self.assertEqual([fib(n) for n in range(16)],
[0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610])
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=28, misses=16, maxsize=None, currsize=16))
fib.cache_clear()
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=0, misses=0, maxsize=None, currsize=0))
def test_lru_with_maxsize_negative(self):
@self.module.lru_cache(maxsize=-10)
def eq(n):
return n
for i in (0, 1):
self.assertEqual([eq(n) for n in range(150)], list(range(150)))
self.assertEqual(eq.cache_info(),
self.module._CacheInfo(hits=0, misses=300, maxsize=0, currsize=0))
def test_lru_with_exceptions(self):
# Verify that user_function exceptions get passed through without
# creating a hard-to-read chained exception.
# http://bugs.python.org/issue13177
for maxsize in (None, 128):
@self.module.lru_cache(maxsize)
def func(i):
return 'abc'[i]
self.assertEqual(func(0), 'a')
with self.assertRaises(IndexError) as cm:
func(15)
self.assertIsNone(cm.exception.__context__)
# Verify that the previous exception did not result in a cached entry
with self.assertRaises(IndexError):
func(15)
def test_lru_with_types(self):
for maxsize in (None, 128):
@self.module.lru_cache(maxsize=maxsize, typed=True)
def square(x):
return x * x
self.assertEqual(square(3), 9)
self.assertEqual(type(square(3)), type(9))
self.assertEqual(square(3.0), 9.0)
self.assertEqual(type(square(3.0)), type(9.0))
self.assertEqual(square(x=3), 9)
self.assertEqual(type(square(x=3)), type(9))
self.assertEqual(square(x=3.0), 9.0)
self.assertEqual(type(square(x=3.0)), type(9.0))
self.assertEqual(square.cache_info().hits, 4)
self.assertEqual(square.cache_info().misses, 4)
def test_lru_with_keyword_args(self):
@self.module.lru_cache()
def fib(n):
if n < 2:
return n
return fib(n=n-1) + fib(n=n-2)
self.assertEqual(
[fib(n=number) for number in range(16)],
[0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610]
)
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=28, misses=16, maxsize=128, currsize=16))
fib.cache_clear()
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=0, misses=0, maxsize=128, currsize=0))
def test_lru_with_keyword_args_maxsize_none(self):
@self.module.lru_cache(maxsize=None)
def fib(n):
if n < 2:
return n
return fib(n=n-1) + fib(n=n-2)
self.assertEqual([fib(n=number) for number in range(16)],
[0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610])
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=28, misses=16, maxsize=None, currsize=16))
fib.cache_clear()
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=0, misses=0, maxsize=None, currsize=0))
def test_kwargs_order(self):
# PEP 468: Preserving Keyword Argument Order
@self.module.lru_cache(maxsize=10)
def f(**kwargs):
return list(kwargs.items())
self.assertEqual(f(a=1, b=2), [('a', 1), ('b', 2)])
self.assertEqual(f(b=2, a=1), [('b', 2), ('a', 1)])
self.assertEqual(f.cache_info(),
self.module._CacheInfo(hits=0, misses=2, maxsize=10, currsize=2))
def test_lru_cache_decoration(self):
def f(zomg: 'zomg_annotation'):
"""f doc string"""
return 42
g = self.module.lru_cache()(f)
for attr in self.module.WRAPPER_ASSIGNMENTS:
self.assertEqual(getattr(g, attr), getattr(f, attr))
def test_lru_cache_threaded(self):
n, m = 5, 11
def orig(x, y):
return 3 * x + y
f = self.module.lru_cache(maxsize=n*m)(orig)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(currsize, 0)
start = threading.Event()
def full(k):
start.wait(10)
for _ in range(m):
self.assertEqual(f(k, 0), orig(k, 0))
def clear():
start.wait(10)
for _ in range(2*m):
f.cache_clear()
orig_si = sys.getswitchinterval()
support.setswitchinterval(1e-6)
try:
# create n threads in order to fill cache
threads = [threading.Thread(target=full, args=[k])
for k in range(n)]
with support.start_threads(threads):
start.set()
hits, misses, maxsize, currsize = f.cache_info()
if self.module is py_functools:
# XXX: Why can be not equal?
self.assertLessEqual(misses, n)
self.assertLessEqual(hits, m*n - misses)
else:
self.assertEqual(misses, n)
self.assertEqual(hits, m*n - misses)
self.assertEqual(currsize, n)
# create n threads in order to fill cache and 1 to clear it
threads = [threading.Thread(target=clear)]
threads += [threading.Thread(target=full, args=[k])
for k in range(n)]
start.clear()
with support.start_threads(threads):
start.set()
finally:
sys.setswitchinterval(orig_si)
def test_lru_cache_threaded2(self):
# Simultaneous call with the same arguments
n, m = 5, 7
start = threading.Barrier(n+1)
pause = threading.Barrier(n+1)
stop = threading.Barrier(n+1)
@self.module.lru_cache(maxsize=m*n)
def f(x):
pause.wait(10)
return 3 * x
self.assertEqual(f.cache_info(), (0, 0, m*n, 0))
def test():
for i in range(m):
start.wait(10)
self.assertEqual(f(i), 3 * i)
stop.wait(10)
threads = [threading.Thread(target=test) for k in range(n)]
with support.start_threads(threads):
for i in range(m):
start.wait(10)
stop.reset()
pause.wait(10)
start.reset()
stop.wait(10)
pause.reset()
self.assertEqual(f.cache_info(), (0, (i+1)*n, m*n, i+1))
def test_lru_cache_threaded3(self):
@self.module.lru_cache(maxsize=2)
def f(x):
time.sleep(.01)
return 3 * x
def test(i, x):
with self.subTest(thread=i):
self.assertEqual(f(x), 3 * x, i)
threads = [threading.Thread(target=test, args=(i, v))
for i, v in enumerate([1, 2, 2, 3, 2])]
with support.start_threads(threads):
pass
def test_need_for_rlock(self):
# This will deadlock on an LRU cache that uses a regular lock
@self.module.lru_cache(maxsize=10)
def test_func(x):
'Used to demonstrate a reentrant lru_cache call within a single thread'
return x
class DoubleEq:
'Demonstrate a reentrant lru_cache call within a single thread'
def __init__(self, x):
self.x = x
def __hash__(self):
return self.x
def __eq__(self, other):
if self.x == 2:
test_func(DoubleEq(1))
return self.x == other.x
test_func(DoubleEq(1)) # Load the cache
test_func(DoubleEq(2)) # Load the cache
self.assertEqual(test_func(DoubleEq(2)), # Trigger a re-entrant __eq__ call
DoubleEq(2)) # Verify the correct return value
def test_lru_method(self):
class X(int):
f_cnt = 0
@self.module.lru_cache(2)
def f(self, x):
self.f_cnt += 1
return x*10+self
a = X(5)
b = X(5)
c = X(7)
self.assertEqual(X.f.cache_info(), (0, 0, 2, 0))
for x in 1, 2, 2, 3, 1, 1, 1, 2, 3, 3:
self.assertEqual(a.f(x), x*10 + 5)
self.assertEqual((a.f_cnt, b.f_cnt, c.f_cnt), (6, 0, 0))
self.assertEqual(X.f.cache_info(), (4, 6, 2, 2))
for x in 1, 2, 1, 1, 1, 1, 3, 2, 2, 2:
self.assertEqual(b.f(x), x*10 + 5)
self.assertEqual((a.f_cnt, b.f_cnt, c.f_cnt), (6, 4, 0))
self.assertEqual(X.f.cache_info(), (10, 10, 2, 2))
for x in 2, 1, 1, 1, 1, 2, 1, 3, 2, 1:
self.assertEqual(c.f(x), x*10 + 7)
self.assertEqual((a.f_cnt, b.f_cnt, c.f_cnt), (6, 4, 5))
self.assertEqual(X.f.cache_info(), (15, 15, 2, 2))
self.assertEqual(a.f.cache_info(), X.f.cache_info())
self.assertEqual(b.f.cache_info(), X.f.cache_info())
self.assertEqual(c.f.cache_info(), X.f.cache_info())
def test_pickle(self):
cls = self.__class__
for f in cls.cached_func[0], cls.cached_meth, cls.cached_staticmeth:
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
with self.subTest(proto=proto, func=f):
f_copy = pickle.loads(pickle.dumps(f, proto))
self.assertIs(f_copy, f)
def test_copy(self):
cls = self.__class__
def orig(x, y):
return 3 * x + y
part = self.module.partial(orig, 2)
funcs = (cls.cached_func[0], cls.cached_meth, cls.cached_staticmeth,
self.module.lru_cache(2)(part))
for f in funcs:
with self.subTest(func=f):
f_copy = copy.copy(f)
self.assertIs(f_copy, f)
def test_deepcopy(self):
cls = self.__class__
def orig(x, y):
return 3 * x + y
part = self.module.partial(orig, 2)
funcs = (cls.cached_func[0], cls.cached_meth, cls.cached_staticmeth,
self.module.lru_cache(2)(part))
for f in funcs:
with self.subTest(func=f):
f_copy = copy.deepcopy(f)
self.assertIs(f_copy, f)
@py_functools.lru_cache()
def py_cached_func(x, y):
return 3 * x + y
@c_functools.lru_cache()
def c_cached_func(x, y):
return 3 * x + y
class TestLRUPy(TestLRU, unittest.TestCase):
module = py_functools
cached_func = py_cached_func,
@module.lru_cache()
def cached_meth(self, x, y):
return 3 * x + y
@staticmethod
@module.lru_cache()
def cached_staticmeth(x, y):
return 3 * x + y
class TestLRUC(TestLRU, unittest.TestCase):
module = c_functools
cached_func = c_cached_func,
@module.lru_cache()
def cached_meth(self, x, y):
return 3 * x + y
@staticmethod
@module.lru_cache()
def cached_staticmeth(x, y):
return 3 * x + y
class TestSingleDispatch(unittest.TestCase):
def test_simple_overloads(self):
@functools.singledispatch
def g(obj):
return "base"
def g_int(i):
return "integer"
g.register(int, g_int)
self.assertEqual(g("str"), "base")
self.assertEqual(g(1), "integer")
self.assertEqual(g([1,2,3]), "base")
def test_mro(self):
@functools.singledispatch
def g(obj):
return "base"
class A:
pass
class C(A):
pass
class B(A):
pass
class D(C, B):
pass
def g_A(a):
return "A"
def g_B(b):
return "B"
g.register(A, g_A)
g.register(B, g_B)
self.assertEqual(g(A()), "A")
self.assertEqual(g(B()), "B")
self.assertEqual(g(C()), "A")
self.assertEqual(g(D()), "B")
def test_register_decorator(self):
@functools.singledispatch
def g(obj):
return "base"
@g.register(int)
def g_int(i):
return "int %s" % (i,)
self.assertEqual(g(""), "base")
self.assertEqual(g(12), "int 12")
self.assertIs(g.dispatch(int), g_int)
self.assertIs(g.dispatch(object), g.dispatch(str))
# Note: in the assert above this is not g.
# @singledispatch returns the wrapper.
def test_wrapping_attributes(self):
@functools.singledispatch
def g(obj):
"Simple test"
return "Test"
self.assertEqual(g.__name__, "g")
if sys.flags.optimize < 2:
self.assertEqual(g.__doc__, "Simple test")
@unittest.skipUnless(decimal, 'requires _decimal')
@support.cpython_only
def test_c_classes(self):
@functools.singledispatch
def g(obj):
return "base"
@g.register(decimal.DecimalException)
def _(obj):
return obj.args
subn = decimal.Subnormal("Exponent < Emin")
rnd = decimal.Rounded("Number got rounded")
self.assertEqual(g(subn), ("Exponent < Emin",))
self.assertEqual(g(rnd), ("Number got rounded",))
@g.register(decimal.Subnormal)
def _(obj):
return "Too small to care."
self.assertEqual(g(subn), "Too small to care.")
self.assertEqual(g(rnd), ("Number got rounded",))
def test_compose_mro(self):
# None of the examples in this test depend on haystack ordering.
c = collections.abc
mro = functools._compose_mro
bases = [c.Sequence, c.MutableMapping, c.Mapping, c.Set]
for haystack in permutations(bases):
m = mro(dict, haystack)
self.assertEqual(m, [dict, c.MutableMapping, c.Mapping,
c.Collection, c.Sized, c.Iterable,
c.Container, object])
bases = [c.Container, c.Mapping, c.MutableMapping, collections.OrderedDict]
for haystack in permutations(bases):
m = mro(collections.ChainMap, haystack)
self.assertEqual(m, [collections.ChainMap, c.MutableMapping, c.Mapping,
c.Collection, c.Sized, c.Iterable,
c.Container, object])
# If there's a generic function with implementations registered for
# both Sized and Container, passing a defaultdict to it results in an
# ambiguous dispatch which will cause a RuntimeError (see
# test_mro_conflicts).
bases = [c.Container, c.Sized, str]
for haystack in permutations(bases):
m = mro(collections.defaultdict, [c.Sized, c.Container, str])
self.assertEqual(m, [collections.defaultdict, dict, c.Sized,
c.Container, object])
# MutableSequence below is registered directly on D. In other words, it
# precedes MutableMapping which means single dispatch will always
# choose MutableSequence here.
class D(collections.defaultdict):
pass
c.MutableSequence.register(D)
bases = [c.MutableSequence, c.MutableMapping]
for haystack in permutations(bases):
m = mro(D, bases)
self.assertEqual(m, [D, c.MutableSequence, c.Sequence, c.Reversible,
collections.defaultdict, dict, c.MutableMapping, c.Mapping,
c.Collection, c.Sized, c.Iterable, c.Container,
object])
# Container and Callable are registered on different base classes and
# a generic function supporting both should always pick the Callable
# implementation if a C instance is passed.
class C(collections.defaultdict):
def __call__(self):
pass
bases = [c.Sized, c.Callable, c.Container, c.Mapping]
for haystack in permutations(bases):
m = mro(C, haystack)
self.assertEqual(m, [C, c.Callable, collections.defaultdict, dict, c.Mapping,
c.Collection, c.Sized, c.Iterable,
c.Container, object])
def test_register_abc(self):
c = collections.abc
d = {"a": "b"}
l = [1, 2, 3]
s = {object(), None}
f = frozenset(s)
t = (1, 2, 3)
@functools.singledispatch
def g(obj):
return "base"
self.assertEqual(g(d), "base")
self.assertEqual(g(l), "base")
self.assertEqual(g(s), "base")
self.assertEqual(g(f), "base")
self.assertEqual(g(t), "base")
g.register(c.Sized, lambda obj: "sized")
self.assertEqual(g(d), "sized")
self.assertEqual(g(l), "sized")
self.assertEqual(g(s), "sized")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sized")
g.register(c.MutableMapping, lambda obj: "mutablemapping")
self.assertEqual(g(d), "mutablemapping")
self.assertEqual(g(l), "sized")
self.assertEqual(g(s), "sized")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sized")
g.register(collections.ChainMap, lambda obj: "chainmap")
self.assertEqual(g(d), "mutablemapping") # irrelevant ABCs registered
self.assertEqual(g(l), "sized")
self.assertEqual(g(s), "sized")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sized")
g.register(c.MutableSequence, lambda obj: "mutablesequence")
self.assertEqual(g(d), "mutablemapping")
self.assertEqual(g(l), "mutablesequence")
self.assertEqual(g(s), "sized")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sized")
g.register(c.MutableSet, lambda obj: "mutableset")
self.assertEqual(g(d), "mutablemapping")
self.assertEqual(g(l), "mutablesequence")
self.assertEqual(g(s), "mutableset")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sized")
g.register(c.Mapping, lambda obj: "mapping")
self.assertEqual(g(d), "mutablemapping") # not specific enough
self.assertEqual(g(l), "mutablesequence")
self.assertEqual(g(s), "mutableset")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sized")
g.register(c.Sequence, lambda obj: "sequence")
self.assertEqual(g(d), "mutablemapping")
self.assertEqual(g(l), "mutablesequence")
self.assertEqual(g(s), "mutableset")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sequence")
g.register(c.Set, lambda obj: "set")
self.assertEqual(g(d), "mutablemapping")
self.assertEqual(g(l), "mutablesequence")
self.assertEqual(g(s), "mutableset")
self.assertEqual(g(f), "set")
self.assertEqual(g(t), "sequence")
g.register(dict, lambda obj: "dict")
self.assertEqual(g(d), "dict")
self.assertEqual(g(l), "mutablesequence")
self.assertEqual(g(s), "mutableset")
self.assertEqual(g(f), "set")
self.assertEqual(g(t), "sequence")
g.register(list, lambda obj: "list")
self.assertEqual(g(d), "dict")
self.assertEqual(g(l), "list")
self.assertEqual(g(s), "mutableset")
self.assertEqual(g(f), "set")
self.assertEqual(g(t), "sequence")
g.register(set, lambda obj: "concrete-set")
self.assertEqual(g(d), "dict")
self.assertEqual(g(l), "list")
self.assertEqual(g(s), "concrete-set")
self.assertEqual(g(f), "set")
self.assertEqual(g(t), "sequence")
g.register(frozenset, lambda obj: "frozen-set")
self.assertEqual(g(d), "dict")
self.assertEqual(g(l), "list")
self.assertEqual(g(s), "concrete-set")
self.assertEqual(g(f), "frozen-set")
self.assertEqual(g(t), "sequence")
g.register(tuple, lambda obj: "tuple")
self.assertEqual(g(d), "dict")
self.assertEqual(g(l), "list")
self.assertEqual(g(s), "concrete-set")
self.assertEqual(g(f), "frozen-set")
self.assertEqual(g(t), "tuple")
def test_c3_abc(self):
c = collections.abc
mro = functools._c3_mro
class A(object):
pass
class B(A):
def __len__(self):
return 0 # implies Sized
@c.Container.register
class C(object):
pass
class D(object):
pass # unrelated
class X(D, C, B):
def __call__(self):
pass # implies Callable
expected = [X, c.Callable, D, C, c.Container, B, c.Sized, A, object]
for abcs in permutations([c.Sized, c.Callable, c.Container]):
self.assertEqual(mro(X, abcs=abcs), expected)
# unrelated ABCs don't appear in the resulting MRO
many_abcs = [c.Mapping, c.Sized, c.Callable, c.Container, c.Iterable]
self.assertEqual(mro(X, abcs=many_abcs), expected)
def test_false_meta(self):
# see issue23572
class MetaA(type):
def __len__(self):
return 0
class A(metaclass=MetaA):
pass
class AA(A):
pass
@functools.singledispatch
def fun(a):
return 'base A'
@fun.register(A)
def _(a):
return 'fun A'
aa = AA()
self.assertEqual(fun(aa), 'fun A')
def test_mro_conflicts(self):
c = collections.abc
@functools.singledispatch
def g(arg):
return "base"
class O(c.Sized):
def __len__(self):
return 0
o = O()
self.assertEqual(g(o), "base")
g.register(c.Iterable, lambda arg: "iterable")
g.register(c.Container, lambda arg: "container")
g.register(c.Sized, lambda arg: "sized")
g.register(c.Set, lambda arg: "set")
self.assertEqual(g(o), "sized")
c.Iterable.register(O)
self.assertEqual(g(o), "sized") # because it's explicitly in __mro__
c.Container.register(O)
self.assertEqual(g(o), "sized") # see above: Sized is in __mro__
c.Set.register(O)
self.assertEqual(g(o), "set") # because c.Set is a subclass of
# c.Sized and c.Container
class P:
pass
p = P()
self.assertEqual(g(p), "base")
c.Iterable.register(P)
self.assertEqual(g(p), "iterable")
c.Container.register(P)
with self.assertRaises(RuntimeError) as re_one:
g(p)
self.assertIn(
str(re_one.exception),
(("Ambiguous dispatch: <class 'collections.abc.Container'> "
"or <class 'collections.abc.Iterable'>"),
("Ambiguous dispatch: <class 'collections.abc.Iterable'> "
"or <class 'collections.abc.Container'>")),
)
class Q(c.Sized):
def __len__(self):
return 0
q = Q()
self.assertEqual(g(q), "sized")
c.Iterable.register(Q)
self.assertEqual(g(q), "sized") # because it's explicitly in __mro__
c.Set.register(Q)
self.assertEqual(g(q), "set") # because c.Set is a subclass of
# c.Sized and c.Iterable
@functools.singledispatch
def h(arg):
return "base"
@h.register(c.Sized)
def _(arg):
return "sized"
@h.register(c.Container)
def _(arg):
return "container"
# Even though Sized and Container are explicit bases of MutableMapping,
# this ABC is implicitly registered on defaultdict which makes all of
# MutableMapping's bases implicit as well from defaultdict's
# perspective.
with self.assertRaises(RuntimeError) as re_two:
h(collections.defaultdict(lambda: 0))
self.assertIn(
str(re_two.exception),
(("Ambiguous dispatch: <class 'collections.abc.Container'> "
"or <class 'collections.abc.Sized'>"),
("Ambiguous dispatch: <class 'collections.abc.Sized'> "
"or <class 'collections.abc.Container'>")),
)
class R(collections.defaultdict):
pass
c.MutableSequence.register(R)
@functools.singledispatch
def i(arg):
return "base"
@i.register(c.MutableMapping)
def _(arg):
return "mapping"
@i.register(c.MutableSequence)
def _(arg):
return "sequence"
r = R()
self.assertEqual(i(r), "sequence")
class S:
pass
class T(S, c.Sized):
def __len__(self):
return 0
t = T()
self.assertEqual(h(t), "sized")
c.Container.register(T)
self.assertEqual(h(t), "sized") # because it's explicitly in the MRO
class U:
def __len__(self):
return 0
u = U()
self.assertEqual(h(u), "sized") # implicit Sized subclass inferred
# from the existence of __len__()
c.Container.register(U)
# There is no preference for registered versus inferred ABCs.
with self.assertRaises(RuntimeError) as re_three:
h(u)
self.assertIn(
str(re_three.exception),
(("Ambiguous dispatch: <class 'collections.abc.Container'> "
"or <class 'collections.abc.Sized'>"),
("Ambiguous dispatch: <class 'collections.abc.Sized'> "
"or <class 'collections.abc.Container'>")),
)
class V(c.Sized, S):
def __len__(self):
return 0
@functools.singledispatch
def j(arg):
return "base"
@j.register(S)
def _(arg):
return "s"
@j.register(c.Container)
def _(arg):
return "container"
v = V()
self.assertEqual(j(v), "s")
c.Container.register(V)
self.assertEqual(j(v), "container") # because it ends up right after
# Sized in the MRO
def test_cache_invalidation(self):
from collections import UserDict
import weakref
class TracingDict(UserDict):
def __init__(self, *args, **kwargs):
super(TracingDict, self).__init__(*args, **kwargs)
self.set_ops = []
self.get_ops = []
def __getitem__(self, key):
result = self.data[key]
self.get_ops.append(key)
return result
def __setitem__(self, key, value):
self.set_ops.append(key)
self.data[key] = value
def clear(self):
self.data.clear()
td = TracingDict()
with support.swap_attr(weakref, "WeakKeyDictionary", lambda: td):
c = collections.abc
@functools.singledispatch
def g(arg):
return "base"
d = {}
l = []
self.assertEqual(len(td), 0)
self.assertEqual(g(d), "base")
self.assertEqual(len(td), 1)
self.assertEqual(td.get_ops, [])
self.assertEqual(td.set_ops, [dict])
self.assertEqual(td.data[dict], g.registry[object])
self.assertEqual(g(l), "base")
self.assertEqual(len(td), 2)
self.assertEqual(td.get_ops, [])
self.assertEqual(td.set_ops, [dict, list])
self.assertEqual(td.data[dict], g.registry[object])
self.assertEqual(td.data[list], g.registry[object])
self.assertEqual(td.data[dict], td.data[list])
self.assertEqual(g(l), "base")
self.assertEqual(g(d), "base")
self.assertEqual(td.get_ops, [list, dict])
self.assertEqual(td.set_ops, [dict, list])
g.register(list, lambda arg: "list")
self.assertEqual(td.get_ops, [list, dict])
self.assertEqual(len(td), 0)
self.assertEqual(g(d), "base")
self.assertEqual(len(td), 1)
self.assertEqual(td.get_ops, [list, dict])
self.assertEqual(td.set_ops, [dict, list, dict])
self.assertEqual(td.data[dict],
functools._find_impl(dict, g.registry))
self.assertEqual(g(l), "list")
self.assertEqual(len(td), 2)
self.assertEqual(td.get_ops, [list, dict])
self.assertEqual(td.set_ops, [dict, list, dict, list])
self.assertEqual(td.data[list],
functools._find_impl(list, g.registry))
class X:
pass
c.MutableMapping.register(X) # Will not invalidate the cache,
# not using ABCs yet.
self.assertEqual(g(d), "base")
self.assertEqual(g(l), "list")
self.assertEqual(td.get_ops, [list, dict, dict, list])
self.assertEqual(td.set_ops, [dict, list, dict, list])
g.register(c.Sized, lambda arg: "sized")
self.assertEqual(len(td), 0)
self.assertEqual(g(d), "sized")
self.assertEqual(len(td), 1)
self.assertEqual(td.get_ops, [list, dict, dict, list])
self.assertEqual(td.set_ops, [dict, list, dict, list, dict])
self.assertEqual(g(l), "list")
self.assertEqual(len(td), 2)
self.assertEqual(td.get_ops, [list, dict, dict, list])
self.assertEqual(td.set_ops, [dict, list, dict, list, dict, list])
self.assertEqual(g(l), "list")
self.assertEqual(g(d), "sized")
self.assertEqual(td.get_ops, [list, dict, dict, list, list, dict])
self.assertEqual(td.set_ops, [dict, list, dict, list, dict, list])
g.dispatch(list)
g.dispatch(dict)
self.assertEqual(td.get_ops, [list, dict, dict, list, list, dict,
list, dict])
self.assertEqual(td.set_ops, [dict, list, dict, list, dict, list])
c.MutableSet.register(X) # Will invalidate the cache.
self.assertEqual(len(td), 2) # Stale cache.
self.assertEqual(g(l), "list")
self.assertEqual(len(td), 1)
g.register(c.MutableMapping, lambda arg: "mutablemapping")
self.assertEqual(len(td), 0)
self.assertEqual(g(d), "mutablemapping")
self.assertEqual(len(td), 1)
self.assertEqual(g(l), "list")
self.assertEqual(len(td), 2)
g.register(dict, lambda arg: "dict")
self.assertEqual(g(d), "dict")
self.assertEqual(g(l), "list")
g._clear_cache()
self.assertEqual(len(td), 0)
def test_annotations(self):
@functools.singledispatch
def i(arg):
return "base"
@i.register
def _(arg: collections.abc.Mapping):
return "mapping"
@i.register
def _(arg: "collections.abc.Sequence"):
return "sequence"
self.assertEqual(i(None), "base")
self.assertEqual(i({"a": 1}), "mapping")
self.assertEqual(i([1, 2, 3]), "sequence")
self.assertEqual(i((1, 2, 3)), "sequence")
self.assertEqual(i("str"), "sequence")
# Registering classes as callables doesn't work with annotations,
# you need to pass the type explicitly.
@i.register(str)
class _:
def __init__(self, arg):
self.arg = arg
def __eq__(self, other):
return self.arg == other
self.assertEqual(i("str"), "str")
def test_method_register(self):
class A:
@functools.singledispatchmethod
def t(self, arg):
self.arg = "base"
@t.register(int)
def _(self, arg):
self.arg = "int"
@t.register(str)
def _(self, arg):
self.arg = "str"
a = A()
a.t(0)
self.assertEqual(a.arg, "int")
aa = A()
self.assertFalse(hasattr(aa, 'arg'))
a.t('')
self.assertEqual(a.arg, "str")
aa = A()
self.assertFalse(hasattr(aa, 'arg'))
a.t(0.0)
self.assertEqual(a.arg, "base")
aa = A()
self.assertFalse(hasattr(aa, 'arg'))
def test_staticmethod_register(self):
class A:
@functools.singledispatchmethod
@staticmethod
def t(arg):
return arg
@t.register(int)
@staticmethod
def _(arg):
return isinstance(arg, int)
@t.register(str)
@staticmethod
def _(arg):
return isinstance(arg, str)
a = A()
self.assertTrue(A.t(0))
self.assertTrue(A.t(''))
self.assertEqual(A.t(0.0), 0.0)
def test_classmethod_register(self):
class A:
def __init__(self, arg):
self.arg = arg
@functools.singledispatchmethod
@classmethod
def t(cls, arg):
return cls("base")
@t.register(int)
@classmethod
def _(cls, arg):
return cls("int")
@t.register(str)
@classmethod
def _(cls, arg):
return cls("str")
self.assertEqual(A.t(0).arg, "int")
self.assertEqual(A.t('').arg, "str")
self.assertEqual(A.t(0.0).arg, "base")
def test_callable_register(self):
class A:
def __init__(self, arg):
self.arg = arg
@functools.singledispatchmethod
@classmethod
def t(cls, arg):
return cls("base")
@A.t.register(int)
@classmethod
def _(cls, arg):
return cls("int")
@A.t.register(str)
@classmethod
def _(cls, arg):
return cls("str")
self.assertEqual(A.t(0).arg, "int")
self.assertEqual(A.t('').arg, "str")
self.assertEqual(A.t(0.0).arg, "base")
def test_abstractmethod_register(self):
class Abstract(abc.ABCMeta):
@functools.singledispatchmethod
@abc.abstractmethod
def add(self, x, y):
pass
self.assertTrue(Abstract.add.__isabstractmethod__)
def test_type_ann_register(self):
class A:
@functools.singledispatchmethod
def t(self, arg):
return "base"
@t.register
def _(self, arg: int):
return "int"
@t.register
def _(self, arg: str):
return "str"
a = A()
self.assertEqual(a.t(0), "int")
self.assertEqual(a.t(''), "str")
self.assertEqual(a.t(0.0), "base")
def test_invalid_registrations(self):
msg_prefix = "Invalid first argument to `register()`: "
msg_suffix = (
". Use either `@register(some_class)` or plain `@register` on an "
"annotated function."
)
@functools.singledispatch
def i(arg):
return "base"
with self.assertRaises(TypeError) as exc:
@i.register(42)
def _(arg):
return "I annotated with a non-type"
self.assertTrue(str(exc.exception).startswith(msg_prefix + "42"))
self.assertTrue(str(exc.exception).endswith(msg_suffix))
with self.assertRaises(TypeError) as exc:
@i.register
def _(arg):
return "I forgot to annotate"
self.assertTrue(str(exc.exception).startswith(msg_prefix +
"<function TestSingleDispatch.test_invalid_registrations.<locals>._"
))
self.assertTrue(str(exc.exception).endswith(msg_suffix))
with self.assertRaises(TypeError) as exc:
@i.register
def _(arg: typing.Iterable[str]):
# At runtime, dispatching on generics is impossible.
# When registering implementations with singledispatch, avoid
# types from `typing`. Instead, annotate with regular types
# or ABCs.
return "I annotated with a generic collection"
self.assertTrue(str(exc.exception).startswith(
"Invalid annotation for 'arg'."
))
self.assertTrue(str(exc.exception).endswith(
'typing.Iterable[str] is not a class.'
))
def test_invalid_positional_argument(self):
@functools.singledispatch
def f(*args):
pass
msg = 'f requires at least 1 positional argument'
with self.assertRaisesRegex(TypeError, msg):
f()
class CachedCostItem:
_cost = 1
def __init__(self):
self.lock = py_functools.RLock()
@py_functools.cached_property
def cost(self):
"""The cost of the item."""
with self.lock:
self._cost += 1
return self._cost
class OptionallyCachedCostItem:
_cost = 1
def get_cost(self):
"""The cost of the item."""
self._cost += 1
return self._cost
cached_cost = py_functools.cached_property(get_cost)
class CachedCostItemWait:
def __init__(self, event):
self._cost = 1
self.lock = py_functools.RLock()
self.event = event
@py_functools.cached_property
def cost(self):
self.event.wait(1)
with self.lock:
self._cost += 1
return self._cost
class CachedCostItemWithSlots:
__slots__ = ('_cost')
def __init__(self):
self._cost = 1
@py_functools.cached_property
def cost(self):
raise RuntimeError('never called, slots not supported')
class TestCachedProperty(unittest.TestCase):
def test_cached(self):
item = CachedCostItem()
self.assertEqual(item.cost, 2)
self.assertEqual(item.cost, 2) # not 3
def test_cached_attribute_name_differs_from_func_name(self):
item = OptionallyCachedCostItem()
self.assertEqual(item.get_cost(), 2)
self.assertEqual(item.cached_cost, 3)
self.assertEqual(item.get_cost(), 4)
self.assertEqual(item.cached_cost, 3)
def test_threaded(self):
go = threading.Event()
item = CachedCostItemWait(go)
num_threads = 3
orig_si = sys.getswitchinterval()
sys.setswitchinterval(1e-6)
try:
threads = [
threading.Thread(target=lambda: item.cost)
for k in range(num_threads)
]
with support.start_threads(threads):
go.set()
finally:
sys.setswitchinterval(orig_si)
self.assertEqual(item.cost, 2)
def test_object_with_slots(self):
item = CachedCostItemWithSlots()
with self.assertRaisesRegex(
TypeError,
"No '__dict__' attribute on 'CachedCostItemWithSlots' instance to cache 'cost' property.",
):
item.cost
def test_immutable_dict(self):
class MyMeta(type):
@py_functools.cached_property
def prop(self):
return True
class MyClass(metaclass=MyMeta):
pass
with self.assertRaisesRegex(
TypeError,
"The '__dict__' attribute on 'MyMeta' instance does not support item assignment for caching 'prop' property.",
):
MyClass.prop
def test_reuse_different_names(self):
"""Disallow this case because decorated function a would not be cached."""
with self.assertRaises(RuntimeError) as ctx:
class ReusedCachedProperty:
@py_functools.cached_property
def a(self):
pass
b = a
self.assertEqual(
str(ctx.exception.__context__),
str(TypeError("Cannot assign the same cached_property to two different names ('a' and 'b')."))
)
def test_reuse_same_name(self):
"""Reusing a cached_property on different classes under the same name is OK."""
counter = 0
@py_functools.cached_property
def _cp(_self):
nonlocal counter
counter += 1
return counter
class A:
cp = _cp
class B:
cp = _cp
a = A()
b = B()
self.assertEqual(a.cp, 1)
self.assertEqual(b.cp, 2)
self.assertEqual(a.cp, 1)
def test_set_name_not_called(self):
cp = py_functools.cached_property(lambda s: None)
class Foo:
pass
Foo.cp = cp
with self.assertRaisesRegex(
TypeError,
"Cannot use cached_property instance without calling __set_name__ on it.",
):
Foo().cp
def test_access_from_class(self):
self.assertIsInstance(CachedCostItem.cost, py_functools.cached_property)
def test_doc(self):
self.assertEqual(CachedCostItem.cost.__doc__, "The cost of the item.")
if __name__ == '__main__':
unittest.main()
|
lock_test.py
|
# coding:utf-8
from threading import Lock, Thread
lock = Lock()
g = 0
def add_one():
global g
lock.acquire()
g += 1
lock.release()
def add_two():
global g
lock.acquire()
g += 2
lock.release()
threads = []
for func in [add_one, add_two]:
threads.append(Thread(target=func))
threads[-1].start()
for t in threads:
t.join()
print(g)
|
server.py
|
from multiprocessing import Process
from SocketServer import TCPServer, BaseRequestHandler as TCPHandler
from BaseHTTPServer import HTTPServer
from SimpleHTTPServer import SimpleHTTPRequestHandler as HTTPHandler
policy = """<?xml version="1.0"?>
<!DOCTYPE cross-domain-policy SYSTEM "/xml/dtds/cross-domain-policy.dtd">
<cross-domain-policy>
<site-control permitted-cross-domain-policies="master-only"/>
<allow-access-from domain="*" to-ports="61613,5672" />
</cross-domain-policy>
"""
class PolicyHandler(TCPHandler):
def handle(self):
data = self.request.recv(1024).strip()
print 'Got request: ' + data
if '<policy-file-request/>' in data:
self.request.sendall(policy)
def handle_policy():
print 'Serving policy on port 800'
TCPServer(('', 800), PolicyHandler).serve_forever()
def handle_files():
print 'Serving files on port 8000'
HTTPServer(('', 8000), HTTPHandler).serve_forever()
if __name__=='__main__':
Process(target=handle_policy).start()
Process(target=handle_files).start()
|
surface_stats_collector.py
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import Queue
import threading
# Log marker containing SurfaceTexture timestamps.
_SURFACE_TEXTURE_TIMESTAMPS_MESSAGE = 'SurfaceTexture update timestamps'
_SURFACE_TEXTURE_TIMESTAMP_RE = r'\d+'
class SurfaceStatsCollector(object):
"""Collects surface stats for a SurfaceView from the output of SurfaceFlinger.
Args:
device: A DeviceUtils instance.
"""
def __init__(self, device):
self._device = device
self._collector_thread = None
self._surface_before = None
self._get_data_event = None
self._data_queue = None
self._stop_event = None
self._warn_about_empty_data = True
def DisableWarningAboutEmptyData(self):
self._warn_about_empty_data = False
def Start(self):
assert not self._collector_thread
if self._ClearSurfaceFlingerLatencyData():
self._get_data_event = threading.Event()
self._stop_event = threading.Event()
self._data_queue = Queue.Queue()
self._collector_thread = threading.Thread(target=self._CollectorThread)
self._collector_thread.start()
else:
raise Exception('SurfaceFlinger not supported on this device.')
def Stop(self):
assert self._collector_thread
(refresh_period, timestamps) = self._GetDataFromThread()
if self._collector_thread:
self._stop_event.set()
self._collector_thread.join()
self._collector_thread = None
return (refresh_period, timestamps)
def _CollectorThread(self):
last_timestamp = 0
timestamps = []
retries = 0
while not self._stop_event.is_set():
self._get_data_event.wait(1)
try:
refresh_period, new_timestamps = self._GetSurfaceFlingerFrameData()
if refresh_period is None or timestamps is None:
retries += 1
if retries < 3:
continue
if last_timestamp:
# Some data has already been collected, but either the app
# was closed or there's no new data. Signal the main thread and
# wait.
self._data_queue.put((None, None))
self._stop_event.wait()
break
raise Exception('Unable to get surface flinger latency data')
timestamps += [timestamp for timestamp in new_timestamps
if timestamp > last_timestamp]
if len(timestamps):
last_timestamp = timestamps[-1]
if self._get_data_event.is_set():
self._get_data_event.clear()
self._data_queue.put((refresh_period, timestamps))
timestamps = []
except Exception as e:
# On any error, before aborting, put the exception into _data_queue to
# prevent the main thread from waiting at _data_queue.get() infinitely.
self._data_queue.put(e)
raise
def _GetDataFromThread(self):
self._get_data_event.set()
ret = self._data_queue.get()
if isinstance(ret, Exception):
raise ret
return ret
def _ClearSurfaceFlingerLatencyData(self):
"""Clears the SurfaceFlinger latency data.
Returns:
True if SurfaceFlinger latency is supported by the device, otherwise
False.
"""
# The command returns nothing if it is supported, otherwise returns many
# lines of result just like 'dumpsys SurfaceFlinger'.
results = self._device.RunShellCommand(
'dumpsys SurfaceFlinger --latency-clear SurfaceView')
return not len(results)
def GetSurfaceFlingerPid(self):
results = self._device.RunShellCommand('ps | grep surfaceflinger')
if not results:
raise Exception('Unable to get surface flinger process id')
pid = results[0].split()[1]
return pid
def _GetSurfaceFlingerFrameData(self):
"""Returns collected SurfaceFlinger frame timing data.
Returns:
A tuple containing:
- The display's nominal refresh period in milliseconds.
- A list of timestamps signifying frame presentation times in
milliseconds.
The return value may be (None, None) if there was no data collected (for
example, if the app was closed before the collector thread has finished).
"""
# adb shell dumpsys SurfaceFlinger --latency <window name>
# prints some information about the last 128 frames displayed in
# that window.
# The data returned looks like this:
# 16954612
# 7657467895508 7657482691352 7657493499756
# 7657484466553 7657499645964 7657511077881
# 7657500793457 7657516600576 7657527404785
# (...)
#
# The first line is the refresh period (here 16.95 ms), it is followed
# by 128 lines w/ 3 timestamps in nanosecond each:
# A) when the app started to draw
# B) the vsync immediately preceding SF submitting the frame to the h/w
# C) timestamp immediately after SF submitted that frame to the h/w
#
# The difference between the 1st and 3rd timestamp is the frame-latency.
# An interesting data is when the frame latency crosses a refresh period
# boundary, this can be calculated this way:
#
# ceil((C - A) / refresh-period)
#
# (each time the number above changes, we have a "jank").
# If this happens a lot during an animation, the animation appears
# janky, even if it runs at 60 fps in average.
#
# We use the special "SurfaceView" window name because the statistics for
# the activity's main window are not updated when the main web content is
# composited into a SurfaceView.
results = self._device.RunShellCommand(
'dumpsys SurfaceFlinger --latency SurfaceView')
if not len(results):
return (None, None)
timestamps = []
nanoseconds_per_millisecond = 1e6
refresh_period = long(results[0]) / nanoseconds_per_millisecond
# If a fence associated with a frame is still pending when we query the
# latency data, SurfaceFlinger gives the frame a timestamp of INT64_MAX.
# Since we only care about completed frames, we will ignore any timestamps
# with this value.
pending_fence_timestamp = (1 << 63) - 1
for line in results[1:]:
fields = line.split()
if len(fields) != 3:
continue
timestamp = long(fields[1])
if timestamp == pending_fence_timestamp:
continue
timestamp /= nanoseconds_per_millisecond
timestamps.append(timestamp)
return (refresh_period, timestamps)
|
realtime_resampler_ext.py
|
#
from QAPUBSUB.consumer import subscriber, subscriber_routing
from QAPUBSUB.producer import publisher
from QUANTAXIS.QAEngine.QAThreadEngine import QA_Thread
from QUANTAXIS.QAData.data_resample import QA_data_futuremin_resample, QA_data_futuremin_resample_tb_kq
from QUANTAXIS.QAUtil.QADate_trade import QA_util_future_to_tradedatetime
from QARealtimeCollector.setting import eventmq_ip
import json
import pandas as pd
import numpy as np
import threading
import time
class NpEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.integer):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
elif isinstance(obj, np.ndarray):
return obj.tolist()
elif isinstance(obj, pd.Timestamp):
return str(obj)
else:
return super(NpEncoder, self).default(obj)
class QARTC_Resampler_Ext(QA_Thread):
def __init__(self, block_id='0', freqence='1min', model='tb'):
super().__init__()
self.block_id = block_id
self.freqence = freqence
# self.sub = subscriber(
# host=eventmq_ip, exchange='realtime_block_{}'.format(self.block_id))
self.sub = subscriber_routing(
host=eventmq_ip, exchange='realtime_block_{}'.format(self.block_id), routing_key = self.block_id)
self.pub = publisher(
host=eventmq_ip, exchange='realtime_block_{}_{}'.format(self.block_id, self.freqence))
self.sub.callback = self.callback
self.market_data = {}
self.dt = {}
self.model = model
threading.Thread(target=self.sub.start).start()
def callback(self, a, b, c, data):
load_datas = json.loads(str(data, encoding='utf-8'))
# print(load_datas)
for lastest_data in load_datas:
code = lastest_data['code']
# print("code=%s" % code)
# print(self.market_data.keys())
if code not in self.market_data.keys():
self.market_data[code] = []
self.dt[code] = None
# print(lastest_data)
# print(lastest_data['servertime'])
if self.dt[code] != lastest_data['servertime'] or len(self.market_data[code]) < 1:
self.dt[code] = lastest_data['servertime']
# print('new')
self.market_data[code].append(lastest_data)
else:
# print('update')
self.market_data[code][-1] = lastest_data
df = pd.DataFrame(self.market_data[code])
df = df.assign(datetime=pd.to_datetime(df.servertime), code=code, position=0,
tradetime=df.datetime.apply(QA_util_future_to_tradedatetime)).set_index('datetime')
print(df)
# if self.model == 'tb':
# res = QA_data_futuremin_resample_tb_kq(df, self.freqence)
# else:
# res = QA_data_futuremin_resample(df, self.freqence)
# # print(res)
# # print(res.iloc[-1].to_dict())
# self.pub.pub(json.dumps(
# res.reset_index().iloc[-1].to_dict(), cls=NpEncoder))
def run(self):
while True:
# print(pd.DataFrame(self.data))
time.sleep(1)
if __name__ == "__main__":
QARTC_Resampler_Ext().start()
|
external_api.py
|
import json
from urllib.request import urlopen, Request
from urllib import parse
import json
from time import time
from datetime import datetime
import threading
# Debug
from pprint import pprint as pp
class EXTERNAL_API(object):
_LastLoginDate = 0
_LastLoginThashold = 60*60*6 # 6h
# Placeholders
_token = None
_totalProducts = None
_URLs = {
'login': 'https://www.elektrix.com/rest1/auth/login/elektrixapi',
'getProducts': 'https://www.elektrix.com/rest1/product/getProducts',
'getProductTotal': 'https://www.elektrix.com/rest1/product/getProductTotal',
'setProducts': 'https://www.elektrix.com/rest1/product/setProducts',
'getOrders': 'https://www.elektrix.com/rest1/order2/getOrders',
'getCustomers': 'https://www.elektrix.com/rest1/customer/getCustomers',
'getInvoices': ''
}
def __init__(self, password: str):
self._pass = password
def __makeRequest(self, url:str, data:dict) -> dict:
try:
req = Request(url, data=data)
resp = urlopen(req)
j = json.load(resp)
if j['success'] == True:
return j
else:
print(j['message'][0]['text'])
return None
except:
#Future -> Must Debug Here
return None
def __login(self):
url = self._URLs['login']
data = parse.urlencode({
"pass": self._pass,
"language": "en"
}).encode()
j = self.__makeRequest(url, data)
if j is not None:
self._token = j['data'][0]['token']
self._LastLoginDate = time()
print("Logged In")
else:
raise Exception("Login Error ...")
def __checkLogin(self):
if time() - self._LastLoginDate > self._LastLoginThashold:
# print('Above Thrashold')
self.__login()
#! GetColumnNames ----------------------------------------------------------
def getColumnNames(self, type:str) ->[str] :
self.__checkLogin()
if type == 'product':
url = self._URLs['getProducts']
elif type == 'customer':
url = self._URLs['getCustomers']
elif type == 'orders':
url = self._URLs['getOrders']
elif type == 'invoice':
url = self._URLs['getInvoices']
else:
raise Exception("Invalid Type Entered")
opts = {
"token": self._token,
"limit": 1
}
data = parse.urlencode(opts).encode()
j = self.__makeRequest(url, data)
if j is not None:
a = j['data'][0]
return list(a.keys())
else:
raise Exception("Connection Error")
#! Products ----------------------------------------------------------
def __getTotalProducts(self):
self.__checkLogin()
url = self._URLs['getProductTotal']
data = parse.urlencode({
"token": self._token
}).encode()
j = self.__makeRequest(url, data)
if j is not None:
self._totalProducts = j['summary']['totalRecordCount']
else:
raise Exception("Could not get Total Product Count")
def fetchProducts(self, filter:str="", columns:[str]="", onlyActives:bool=False, limit:int=500, custom:dict=None) -> [dict]:
self.__checkLogin()
self.__getTotalProducts()
url = self._URLs['getProducts']
pagination = (float(self._totalProducts) / limit) + 1
self._FetchedProducts = []
page = 0
def fetch(page):
opts = {
"token": self._token,
"start": page * limit,
"limit": limit
}
if custom is not None:
opts = {**opts, **custom}
if filter != "":
opts['f'] = filter
if columns != "":
opts['columns'] = ','.join(columns)
data = parse.urlencode(opts).encode()
j = self.__makeRequest(url, data)
if j is not None:
try:
data = j['data']
if len(data) > 0:
for prod in data:
if onlyActives:
if prod['IsActive'] == 'true':
self._FetchedProducts.append(prod)
else:
self._FetchedProducts.append(prod)
except:
pass
else:
pass
# raise Exception("Product Fetch Error!")
print(f'{(page + 1) * limit} products out of {self._totalProducts}...')
return
threads = []
for _ in range(int(pagination)):
# print(f'Fetching {page + 1} page out of {int(pagination)}')
x = threading.Thread(target=fetch, args=(page,))
threads.append(x)
page += 1
try:
x.start()
except:
pass
for _, thread in enumerate(threads):
thread.join()
# print(f'{i+1} thread Done')
print("Fetched All Products, in Total: ", len(self._FetchedProducts))
return self._FetchedProducts
def setProducts(self, modifiedData:[dict]):
self.__checkLogin()
url = self._URLs['setProducts']
data = parse.urlencode({
"token": self._token,
"data": "[" + json.dumps(modifiedData) + "]"
}).encode()
j = self.__makeRequest(url, data)
if j is not None:
print(f"Successfully Uploaded {len(modifiedData)} !")
else:
raise Exception("Could not Upload the modifiedData")
#! Orders ----------------------------------------------------------
def __getTotalOrders(self):
self.__checkLogin()
url = self._URLs['getOrders']
data = parse.urlencode({
"token": self._token
}).encode()
j = self.__makeRequest(url, data)
if j is not None:
self._totalOrders = j['summary']['totalRecordCount']
else:
raise Exception("Could not Get Order Info")
def fetchOrders(self, filter:str="", columns:[str]="", limit:int=500, custom:dict=None) -> [dict]:
self.__checkLogin()
self.__getTotalOrders()
url = self._URLs['getOrders']
pagination = (float(self._totalOrders) / limit) + 1
self._FetchedOrders = []
page = 0
def fetch(page):
opts = {
"token": self._token,
"start": page * limit,
"limit": limit
}
if custom is not None:
opts = {**opts, **custom}
if filter != "":
opts['f'] = filter
if columns != "":
opts['columns'] = ','.join(columns)
data = parse.urlencode(opts).encode()
j = self.__makeRequest(url, data)
if j is not None:
try:
data = j['data']
if len(data) > 0:
for order in data:
self._FetchedOrders.append(order)
except:
pass
else:
raise Exception("Order Fetch Error!")
print(f'{(page + 1) * limit} Orders out of {self._totalOrders}...')
return
threads = []
for _ in range(int(pagination)):
# print(f'Fetching {page + 1} page out of {int(pagination)}')
x = threading.Thread(target=fetch, args=(page,))
threads.append(x)
page += 1
x.start()
for _, thread in enumerate(threads):
thread.join()
# print(f'{i+1} thread Done')
print("Fetched All Orders, in Total: ", len(self._FetchedOrders))
return self._FetchedOrders
#! Customers ----------------------------------------------------------
def __getTotalCustomers(self):
self.__checkLogin()
url = self._URLs['getCustomers']
data = parse.urlencode({
"token": self._token
}).encode()
j = self.__makeRequest(url, data)
if j is not None:
self._totalCustomers = j['summary']['totalRecordCount']
else:
raise Exception("Could not Get Order Info")
def fetchCustomers(self, filter:str="", columns:[str]="", limit:int=500, custom:dict=None) -> [dict]:
self.__checkLogin()
self.__getTotalCustomers()
url = self._URLs['getCustomers']
pagination = (float(self._totalCustomers) / limit) + 1
self._FetchedCustomers = []
page = 0
def fetch(page):
opts = {
"token": self._token,
"start": page * limit,
"limit": limit
}
if custom is not None:
opts = {**opts, **custom}
if filter != "":
opts['f'] = filter
if columns != "":
opts['columns'] = ','.join(columns)
data = parse.urlencode(opts).encode()
j = self.__makeRequest(url, data)
if j is not None:
try:
data = j['data']
if len(data) > 0:
for order in data:
self._FetchedCustomers.append(order)
except:
pass
else:
raise Exception("Order Fetch Error!")
print(f'{(page + 1) * limit} Customers out of {self._totalCustomers}...')
return
threads = []
for _ in range(int(pagination)):
# print(f'Fetching {page + 1} page out of {int(pagination)}')
x = threading.Thread(target=fetch, args=(page,))
threads.append(x)
page += 1
x.start()
for _, thread in enumerate(threads):
thread.join()
# print(f'{i+1} thread Done')
print("Fetched All Customers, in Total: ", len(self._FetchedCustomers))
return self._FetchedCustomers
|
server.py
|
# Python 3.6.2
import os
import sys
import random
import socket
import struct
import datetime
import threading
from hashlib import sha3_224
# Switch to the directory containing server.py
this_dir = os.path.dirname(os.path.realpath(__file__))
os.chdir(this_dir)
# Change to project root
os.chdir("../../")
# Add project root (parent package) to path
sys.path.append("./")
# Imports from PATH
from src.inter.modules import primitives
# Globals
localhost = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
localhost.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) # Nobody likes TIME_WAIT-ing. Add SO_REUSEADDR.
ring_prop = "eeeeeeeeeeeeeeee"
no_prop = "ffffffffffffffff" # a message with a true hash indicates that no message propagation is needed.
log_level = "" # "Debug", "Info", or "Warning"; will be set by self.initialize()
sub_node = "Server"
nodeState = [(), [], False, False, False, [], False]
nodestate_lock = threading.Lock()
send_lock = threading.Lock()
receive_lock = threading.Lock()
try:
# This works when manually executing init_server.py from the current directory
os.chdir(this_dir)
except FileNotFoundError:
# This works when launching with the src/misc/init.py script
os.chdir("../../server")
# This will be reset with input values by init()
Primitives = primitives.Primitives(sub_node, log_level)
original_path = os.path.dirname(os.path.realpath(__file__))
class Server:
@staticmethod
def lock(lock, name=None):
# if name and type(name) == str:
# print("locking " + name)
lock.acquire()
@staticmethod
def release(lock, name=None):
# if name and type(name) == str:
# print("releasing " + name)
lock.release()
def write_nodestate(self, in_nodestate, index, value):
global nodeState
global nodestate_lock
self.lock(nodestate_lock, name="nodeState")
in_nodestate[index] = value
nodeState = list(in_nodestate)
self.release(nodestate_lock, name="Nodestate")
def read_nodestate(self, index):
global nodestate_lock
global nodeState
# Don't read nodeState while some other thread is writing to it!
self.lock(nodestate_lock, name="nodeState")
current_nodestate = list(nodeState)
self.release(nodestate_lock, name="Nodestate")
return current_nodestate[index]
@staticmethod
def prepare(message):
""" Assign unique hashes to messages ready for transport.
Returns (new hashed message) -> str """
out = ""
timestamp = str(datetime.datetime.utcnow())
out += timestamp
out += message
sig = sha3_224(out.encode()).hexdigest()[:16]
out = ""
out += sig
out += ":"
out += message
return out
def permute_network_tuple(self):
""" Permute the network tuple. Repetitive permutation after each call
of respond() functionally allows the network to inherit many of the anonymous
aspects of a mixing network. Packets are sent sequentially in the order of the
network tuple, which when permuted, thwarts many timing attacks. ''
Doesn't return """
net_tuple = self.read_nodestate(0)
cs_prng = random.SystemRandom()
network_list = list(net_tuple)
cs_prng.shuffle(network_list)
new_network_tuple = tuple(network_list)
self.write_nodestate(nodeState, 0, new_network_tuple)
def lookup_socket(self, address): # TODO: optimize me
"""Do a brute force search for a specific socket.
Maybe this can be optimized by caching the indexes of commonly-used connections?"""
net_tuple = self.read_nodestate(0)
for item in net_tuple:
discovered_address = item[1]
if address == discovered_address:
return item[0]
def lookup_address(self, in_sock): # TODO: optimize me
"""Do a brute force search for a specific socket.
Maybe this can be optimized by caching the indexes of commonly-used connections?"""
net_tuple = self.read_nodestate(0)
for item in net_tuple:
discovered_socket = item[0]
if in_sock == discovered_socket:
return item[1]
""" server.send() and primitives.receive() and primitives.receiveall() were written by StackOverflow user
Adam Rosenfield and modified by me, HexicPyth.
https://stackoverflow.com/a/17668009
https://stackoverflow.com/users/9530/adam-rosenfield """
def send(self, connection, message, signing=True):
global send_lock
sock = connection[0]
address = connection[1]
if signing:
msg = self.prepare(message).encode('utf-8')
else:
msg = message.encode('utf-8')
# Prefix each message with a 4-byte length (network byte order). Message lengths must be less than (2^32)-4.
msg = struct.pack('>I', len(msg)) + msg
self.lock(send_lock, name="Send lock")
try:
sock.sendall(msg)
except (BrokenPipeError, OSError, AttributeError):
if address != () and address != "127.0.0.1":
log_msg = str("Errors occurred sending to " + address + "; Disconnecting...")
Primitives.log(log_msg, in_log_level="Warning")
self.disconnect(connection)
self.release(send_lock, name="Send lock")
def broadcast(self, message, do_mesh_propagation=True):
global ring_prop
# do_message_propagation=None means use global config in nodeState[12]
self.permute_network_tuple()
net_tuple = self.read_nodestate(0)
# If not bootstrapped, do ring network propagation. Else, do fully-complete style propagation.
message_list = self.read_nodestate(1)
if do_mesh_propagation == "not set":
do_mesh_propagation = self.read_nodestate(6)
if not do_mesh_propagation:
# Network not bootstrapped yet, do ring network propagation
if message[:16] != ring_prop:
message = ring_prop + ":" + message
self.write_nodestate(nodeState, 1, message_list)
if do_mesh_propagation:
""" network bootstrapped or do_mesh_propagation override is active, do fully-complete/mesh style
message propagation """
Primitives.log("Message propagation mode: fully-complete/mesh", in_log_level="Debug")
else:
Primitives.log("Message propagation mode: ring", in_log_level="Debug")
for connection in net_tuple:
# Deadlock here
print("Sending "+message + "to " + str(connection))
self.send(connection, message, signing=False) # Send a message to each node( = Broadcast)
def append(self, in_socket, address):
""" Add a connection to the network tuple. Doesn't return."""
net_tuple = self.read_nodestate(0)
# Tuples are immutable; convert it to a list.
network_list = list(net_tuple)
connection = (in_socket, address)
network_list.append(connection)
# (Again) tuples are immutable; replace the old one with the new one
self.write_nodestate(nodeState, 0, tuple(network_list))
def remove(self, connection):
"""Remove a connection from the network tuple. Doesn't return"""
net_tuple = self.read_nodestate(0)
# Tuples are immutable; convert it to a list.
network_list = list(net_tuple)
# Identify and remove said connection
try:
index = network_list.index(connection)
network_list.pop(index)
# Connection not in network tuple, or socket is [closed]
except ValueError:
log_msg = str("Not removing non-existent connection: "+str(connection))
Primitives.log(log_msg, in_log_level="Warning")
# Update the network tuple with the new one
self.write_nodestate(nodeState, 0, tuple(network_list))
def stop(self):
""" Attempt to gracefully disconnect and terminate,
but resort to brute force if needed. """
net_tuple = self.read_nodestate(0)
# 1. Kill localhost client
try:
localhost_socket = self.lookup_socket("127.0.0.1")
localhost_connection = (localhost_socket, "127.0.0.1")
self.send(localhost_connection, "stop")
except ConnectionRefusedError:
pass # Localhost is already disconnected
log_msg = "Attempting to gracefully disconnect and disassociate from all clients..."
Primitives.log(log_msg, in_log_level="Info")
# 2. Disconnect from all clients
for connection in net_tuple:
log_msg = str("Trying to disconnect from socket: " + str(connection[0]))
Primitives.log(log_msg, in_log_level="Debug")
try:
self.disconnect(connection, disallow_local_disconnect=True)
except OSError:
another_log_msg = str("Failed to disconnect from socket: "+str(connection[0]))
Primitives.log(another_log_msg, in_log_level="Warning")
finally:
Primitives.log("Successfully disconnected", in_log_level="Debug")
# Forcefully close localhost socket
localhost_sock_name = localhost.getsockname()
localhost.close()
Primitives.log("Exiting gracefully;", in_log_level="Info")
# 3. Kill the network injector and terminate the Server.
self.write_nodestate(nodeState, 2, True) # set terminated=True
self.write_nodestate(nodeState, 4, True) # set injector_terminated = True
# Hack the socket.listen() loop in the init() function by connecting to it(localhost),
# which will force it to terminate.
temp = socket.socket()
temp.connect(localhost_sock_name) # This will kill the localhost socket
temp.close()
# noinspection PyProtectedMember
os._exit(0)
def respond(self, msg, connection):
# We received a message, reply with an appropriate response.
# Doesn't return anything.
global no_prop # default: 0xffffffffffffffff
net_tuple = self.read_nodestate(0)
message_list = self.read_nodestate(1)
do_ring_prop = False # If true, bypass message signature check
try:
address = connection[1]
full_message = str(msg)
sig = msg[:16]
message = msg[17:]
except TypeError:
""" sig=msg[:16] probably threw TypeError because msg=self.receive(conn) and self.receive probably
returned 0 because the connection is broken. Disconnect from [connection]...
If [connection] is localhost then the client is already dead (hence the receive error);
Permit localhost disconnect..."""
self.disconnect(connection, disallow_local_disconnect=False)
return
if sig == ring_prop:
message = full_message[17:] # Remove the ring-propagation deliminator
message_sig = message[:16] # Signature after removing ring_prop
sig = message_sig
message = message[17:] # remove the signature
new_message_list = list(message_list)
new_message_list.append(message_sig)
self.write_nodestate(nodeState, 1, new_message_list)
# Now, forward message to localhost as no_prop in case
# message propagation path didn't include this node's client
# (This behavior is technically permitted in ring mode)
localhost_message = no_prop + message
localhost_connection = (localhost, "127.0.0.1")
self.send(localhost_connection, localhost_message, signing=False)
# Server received a unique message. Respond accordingly.
if sig not in message_list:
message_received_log_info = str('Server -> Received: ' + message + " (" + sig + ")")
Primitives.log(message_received_log_info, in_log_level="Info")
if message == "echo":
# If received, two-way communication is functional
echo_received_log = str("Two-Way communication with " + address +
" established and/or tested functional")
Primitives.log(echo_received_log, in_log_level="Info")
if message == "stop":
self.broadcast(no_prop + ":" + message)
Primitives.log("Exiting Cleanly", in_log_level="Info")
self.write_nodestate(nodeState, 3, True) # set terminated = True
self.stop()
if message.startswith("remove:"):
address_to_remove = message[7:]
try:
# Don't disconnect from localhost. That's what self.terminate is for.
if address_to_remove != Primitives.get_local_ip() and address_to_remove != "127.0.0.1":
sock = self.lookup_socket(address_to_remove)
if sock:
Primitives.log("Remove -> Disconnecting from " + address_to_remove,
in_log_level="Info")
# lookup the socket of the address we want to remove
connection_to_remove = (sock, address_to_remove)
Primitives.log(str("\t--who's connection is: " + str(connection_to_remove)),
in_log_level="Info")
self.disconnect(connection_to_remove)
else:
Primitives.log("Not disconnecting from a non-existent connection",
in_log_level="Warning")
else:
Primitives.log("Not disconnecting from localhost, dimwit.", in_log_level="Warning")
except (ValueError, TypeError):
# Either the address we're looking for doesn't exist, or we're not connected it it.
Primitives.log(str("Sorry, we're not connected to " + address_to_remove),
in_log_level="Warning")
pass
if message.startswith("retrieve:"):
"""
Opposite of write_page() function. This isn't a function because we need access to
the network to propagate the file contents. Typically sent by a network injector and
received from a client, not from a client directly.
e.x retrieve:(64-bit hash)
"""
target_page = message[9:]
address_list = []
for net_socket, net_address in net_tuple:
address_list.append(net_address)
id_list = []
for remote_address in address_list:
identity = sha3_224(remote_address.encode()).hexdigest()[:16]
id_list.append(identity)
# For every address, sequentially send 'fetch' flags to sync any changes
# to the page
fetch_msg = self.prepare("fetch:"+target_page)
self.broadcast(fetch_msg)
if message.startswith("bootstrap:"):
bootstrapped = self.read_nodestate(6)
if not bootstrapped:
bootstrapped = True
self.write_nodestate(nodeState, 6, bootstrapped)
# We only broadcast messages with hashes we haven't already documented. That way the network doesn't
# loop indefinitely broadcasting the same message. Also, Don't append no_prop to message_list.
# That would be bad.
if sig not in message_list and sig != no_prop:
message_list.append(sig) # Append signature to respond()'s local message_list object
self.write_nodestate(nodeState, 1, message_list) # Update it globally
broadcast_notice = str("Broadcasting "+full_message)
Primitives.log(broadcast_notice, in_log_level="Info")
self.broadcast(full_message)
Primitives.log("Permuting the Network Tuple", in_log_level="Info")
self.permute_network_tuple()
if sig == no_prop:
if message[:5] == "sync:":
# This was received with the no_prop flag, however, the Server can't do anything with sync: calls.
# Send this to localhost Client.
Primitives.log("Violating the no_prop policy for localhost", in_log_level="Warning")
localhost_address = "127.0.0.1"
localhost_socket = self.lookup_socket(localhost_address)
localhost_connection = (localhost_socket, localhost_address)
self.send(localhost_connection, full_message, signing=False)
elif message.startswith("sharepeers:"):
localhost_address = "127.0.0.1"
localhost_socket = self.lookup_socket(localhost_address)
localhost_connection = (localhost_socket, localhost_address)
self.send(localhost_connection, full_message, signing=False)
# If message propagation allows, forward all received message to client as no_prop.
if sig != no_prop and sig != ring_prop:
message_to_client = no_prop+message
localhost_connection = (localhost, "127.0.0.1")
self.send(localhost_connection, message_to_client)
def disconnect(self, connection, disallow_local_disconnect=True):
"""Try to disconnect from a socket as cleanly as possible.
Doesn't return anything. """
sock = connection[0]
address = connection[1]
terminated = self.read_nodestate(2)
try:
if disallow_local_disconnect:
Primitives.log("Terminated:"+str(terminated), in_log_level="Debug")
if address == Primitives.get_local_ip() and not terminated:
Primitives.log("(Bug) Refusing to disconnect from localhost;"
"that's a terrible idea...", in_log_level="Warning")
return None
else:
Primitives.log("\n\tSelf.disconnect() called.\n", in_log_level="Info")
verbose_connection_msg = str("Disconnecting from " + address
+ "\n\t( " + str(sock) + " )")
Primitives.log(verbose_connection_msg, in_log_level="Info")
conn_remove_msg = str("Server -> Removing " + str(sock) + " from network tuple")
Primitives.log(conn_remove_msg, in_log_level="Info")
self.remove(connection)
sock.close()
Primitives.log("Successfully Disconnected.", in_log_level="Info")
# Socket not in network tuple . Probably already disconnected, or the socket was [closed]
except IndexError:
Primitives.log("Already disconnected from that address; passing;", in_log_level="Warning")
def listen(self, connection):
"""Listen for incoming messages in one thread, manage the network injector in another.
Doesn't return anything. """
global receive_lock
def listener(conn):
# some variables are defined in the parent function listen(), so the names are _changed
# _to _avoid _name _collision
_terminated = self.read_nodestate(2)
listener_terminated = False # When set, this thread and this thread only, is stopped.
while not (_terminated or listener_terminated):
try:
terminated = self.read_nodestate(2)
incoming = Primitives.receive(conn)
if type(incoming) == int:
self.remove(connection)
listener_terminated = True
else:
self.respond(incoming, conn)
except (IsADirectoryError, EnvironmentError): # DEBUG, OSError, TypeError
# OSError - Something terrible happened trying to receive from a node
# TypeError - A socket is apparently NoneType now. That's bad
try:
client = conn[0]
address = conn[1]
print("TERMINATED: "+str(_terminated))
if address == Primitives.get_local_ip() or address == "127.0.0.1" and not _terminated:
Primitives.log("Something happened to localhost; not disconnecting",
in_log_level="Warning")
print("TERMINATED: "+str(_terminated))
else:
try:
self.disconnect(conn)
except ValueError:
Primitives.log("Socket Closed", in_log_level="Warning")
finally:
connection_down_msg = str("Server -> Connection to " + str(client)
+ "probably down or terminated;")
Primitives.log(connection_down_msg, in_log_level="Warning")
# Don't leave zombie listeners running
listener_terminated = True
if _terminated:
os._exit(0)
except OSError:
pass
except ValueError: # socket is [closed]
listener_terminated = True
def start_injector():
# Start one instance of the network injector and run it until another client connects.
# Note: The injector itself (i.e inject.py) returns any address that throws a BrokenPipeError on broadcast.
# This function returns nothing.
os.chdir(original_path)
import inject
injector = inject.NetworkInjector()
net_tuple = self.read_nodestate(0)
terminated = self.read_nodestate(2)
net_injection = self.read_nodestate(3)
injector_terminated = self.read_nodestate(4)
loaded_modules = self.read_nodestate(5)
if not injector_terminated or terminated:
if net_injection:
injector_return_value = injector.init(net_tuple, loaded_modules)
# The mess below handles the collect() loop that used to be in inject.py
current_network_size = len(net_tuple)
while not terminated or not injector_terminated:
network_size = len(net_tuple) # Keep this up to date
if terminated:
self.write_nodestate(nodeState, 4, True) # set injector_terminated = True
print("Injector terminated!!")
break
if current_network_size != network_size:
break # A new client connected, let's exit the injector.
if type(injector_return_value) == str:
""" Something went wrong sending to a given address. The injector
doesn't have proper error handling because it's a disposable thread
and a waste of lines, so we'll handle it here """
message_send_successful = (injector_return_value == Primitives.get_local_ip())
if message_send_successful and injector_return_value != "127.0.0.1":
faulty_conn_disconnect_msg = str("Server -> Attempting to "
"disconnect from faulty"
" connection: "
+ injector_return_value)
Primitives.log(faulty_conn_disconnect_msg, in_log_level="Warning")
# Find the address of the disconnected or otherwise faulty node.
sock = self.lookup_socket(injector_return_value)
Primitives.log(str("\tLooking up socket for "+injector_return_value),
in_log_level="Warning")
Primitives.log(str("\tFound socket: " + str(sock)), in_log_level="Info")
if sock:
# Be really verbose.
connection_to_disconnect = (sock, injector_return_value)
found_connection_msg = str("\tAs part of connection: " +
str(connection_to_disconnect))
Primitives.log(found_connection_msg, in_log_level="Info")
disconnect_attempt_msg = str("Trying to disconnect from: " +
str(connection_to_disconnect))
Primitives.log(disconnect_attempt_msg, in_log_level="Info")
self.disconnect(connection_to_disconnect)
else:
Primitives.log("Not disconnecting from localhost, dimwit.", in_log_level="Warning")
# The injector ran cleanly and we still have a multi-node network. Continue as normal.
if injector_return_value == 0 and len(net_tuple) >= 1: #
try:
Primitives.log(str(net_tuple), in_log_level="Debug")
Primitives.log("Permuting the network tuple... ", in_log_level="Info")
Primitives.log(str(net_tuple), in_log_level="Debug")
# Eww nested loops.
injector_return_value = injector.init(net_tuple, loaded_modules)
except BrokenPipeError:
pass # We'll get the address of the disconnected device through other methods shortly
# Something catastrophically wrong happened and for some reason, there are zero connections
# whatsoever. Stop the injector loop immediately so we can deal with the issue at hand.
elif len(net_tuple) == 0:
break
# The size of the net_tuple changed. Either we have remote connections, or a clean
# disconnect just occurred. Stop the loop so we can act accordingly.
elif len(net_tuple) > 1 or len(net_tuple) != current_network_size:
Primitives.log("Remote connections detected, stopping the network injector...",
in_log_level="Info")
break # We have remote connections...
else:
print("Network injector terminated!")
break
elif injector_terminated:
Primitives.log("Terminating the Network Injector", in_log_level="Info")
return
# Start listener in a new thread
Primitives.log("Starting a new listener thread", in_log_level="Info")
threading.Thread(target=listener, name='listener_thread', args=(connection,)).start()
# If applicable, start a new instance of the network injector, killing any other running ones.
terminated = self.read_nodestate(2)
net_injection = self.read_nodestate(3)
if net_injection and not terminated:
# nodestate[4] = injector_terminated
self.write_nodestate(nodeState, 4, True) # Kill any running network injector(s)
self.write_nodestate(nodeState, 4, False) # Reset the mutex preventing them from starting again
# Restart the network injector
threading.Thread(target=start_injector, name='injector_thread', args=()).start()
def initialize(self, port=3704, listening=True, method="socket", network_injection=False,
network_architecture="complete", default_log_level='Warning', modules=None):
if method == "socket":
global localhost
global log_level
global sub_node
global Primitives
log_level = default_log_level
Primitives = primitives.Primitives(sub_node, log_level)
if modules:
for item in modules:
import_str = "import " + item
loaded_modules = self.read_nodestate(5)
loaded_modules.append(item)
self.write_nodestate(nodeState, 5, loaded_modules)
exec(import_str)
# Set parameters and global variables from their default values
address_string = Primitives.get_local_ip()+":"+str(port) # e.x 10.1.10.3:3705
self.write_nodestate(nodeState, 3, network_injection)
Primitives.log("Initializing... ", in_log_level="Info")
Primitives.log(str("Server -> Binding server on: " + address_string + "..."),
in_log_level="Info")
# First, try to bind the server to (this address) port (port). If that doesn't work, exit cleanly.
try:
localhost.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
localhost.bind(('', port))
Primitives.log(str("Successfully bound server on port: " + str(port)), in_log_level="Info")
except OSError:
Primitives.log(str("Failed to bind server on " + address_string +
"; Please try again later."), in_log_level="Info")
self.stop()
if listening:
Primitives.log("Server -> Now Listening for incoming connections...", in_log_level="Info")
# Listen for incoming connections.
while listening:
try:
localhost.listen(5)
client, address_tuple = localhost.accept()
terminated = self.read_nodestate(2)
if not terminated:
address = address_tuple[0]
self.append(client, address)
connection = (client, address)
# Our localhost connected, do localhost stuff;
if address == Primitives.get_local_ip() or address == "127.0.0.1":
Primitives.log("Localhost has connected.", in_log_level="Info")
self.send(connection, str(no_prop+':'+"echo"), signing=False)
self.listen(connection)
Primitives.log("Listening on localhost...", in_log_level="Info")
# Make the client connect back to localhost if network_architecture=mesh
localhost_socket = self.lookup_socket("127.0.0.1")
localhost_connection = (localhost_socket, "127.0.0.1")
self.send(localhost_connection, no_prop + ":ConnectTo:" + address, signing=False)
# A remote client connected, handle them and send an echo, because why not?
else:
Primitives.log(str(address + " has connected."), in_log_level="Info")
Primitives.log(str("Listening on: "+address), in_log_level="Info")
self.listen(connection)
if network_architecture == "complete":
self.send(connection, no_prop+":echo", signing=False) # WIP
if network_architecture == "complete":
# In a 'complete' network, every node is connected to every other node for redundancy.
# Hence, when a new node connects, we broadcast it's address to the entire network so
# every other node can try to connect to it (i.e 'complete' the network).
self.broadcast(no_prop + ':ConnectTo:' + address)
elif network_architecture == "mesh":
# In mesh configuration, tell localhost client to connect back to the server
# of any remote client which connects to localhost server.
localhost_socket = self.lookup_socket("127.0.0.1")
localhost_connection = (localhost_socket, "127.0.0.1")
self.send(localhost_connection, no_prop + ":ConnectTo:" + address, signing=False)
elif terminated:
sys.exit(0)
except ConnectionResetError:
Primitives.log("Server -> localhost has disconnected", in_log_level="Warning")
# OSError will occur on Windows Systems we try to terminate. Handle that.
except OSError:
sys.exit(0)
|
BB.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
This module contains the main interface to interact with BlackBoard.
Author: Adrián Revuelta Cuauhtli <[email protected]>
Workplace: Bio-Robotics Lab., UNAM <http://bio-robotics.fi-p-unam.mx>
'''
# STANDARD IMPORTS
import threading
import time
import types
import Queue
import sys
# PACKAGE IMPORTS
import shared_variables, parallel_senders
from messages import Message, Command, Response
from connection_manager import ConnectionManager
from command_parser import CommandParser
__version__ = '1.9.2'
ParallelSender = parallel_senders.ParallelSender
SharedVarTypes = shared_variables.SharedVarTypes
SubscriptionTypes = shared_variables.SubscriptionTypes
ReportTypes = shared_variables.ReportTypes
_initialized = False
_started = False
_startedLock = threading.Lock()
_ready = False
_readyLock = threading.Lock()
_subscriptionHandlersLock = threading.Lock()
_subscriptionHandlers = {}
_incomingMessages = Queue.Queue(20)
_receivedCommands = Queue.Queue(20)
_receivedResponses = {}
_responsesLock = threading.Lock()
_sentCommands = set([])
_commandsLock = threading.Lock()
def Initialize(port, functionMap={}, asyncHandler = None):
'''
Initializes BlackBoard with the corresponding parameters.
:param int port: The port through which BlackBoard will communicate with this module.
:param dictionary functionMap: A dictionary containing **key:value** pairs, where the *key* is the name of a command received (a string),
and the *value* is either a tuple containing a function as a first element and a boolean as a second element, or a function.
The function in both cases is the function that is going to execute the specified command and receives on object of type :class:`Command` (See :ref:`Creating a command handler <creating_a_command_handler>`).
The boolean value indicates whether the execution of that command should be synchronous (on the same thread) or asynchronous,
usually synchronous execution is preferred for fast commands that can answer almost immediately and asynchronous for commands that might take a little time.
When the value is only a function, by default the execution is synchronous. *functionMap* can also contain an entry with a string containing only an asterisk,
meaning that would be the handler in case no other handler is found for a specific command.
.. note::
Notice that although functionMap can include a wildcard handler and this might seem like the module could answer
anything, BlackBoard will only send commands that are registered under this module's configuration.
:param function asyncHandler: A function that would handle the response of commands when sent with the method :func:`Send`
instead of using :func:`SendAndWait`. This means the execution of a program that sends a command could continue
and an asynchronous handler would handle the response when one is received.
.. note::
Notice that the asyncHandler functionality could also be achieved using a :class:`ParallelSender` object,
but it has other implications.
'''
global _executors, _connMan, _parser, _p, _initialized, _ready
_executors = { 'busy' : (lambda x: Response('busy'), False),
'ready' : (_isReady, False),
'alive' : (lambda x: Response('alive', True), False) }
for m in functionMap:
if isinstance(functionMap[m], types.FunctionType):
_executors[m] = (functionMap[m], False)
elif isinstance(functionMap[m], tuple):
_executors[m] = functionMap[m]
else:
print 'Element in function map is not a function nor a correct tuple: ' + repr(functionMap[m])
_connMan = ConnectionManager(port)
_parser = CommandParser(asyncHandler)
_p = threading.Thread(target=_MainThread)
_p.daemon = True
_initialized = True
def Start():
'''
Once pyRobotics is :func:`initialized <Initialize>`, you can start the communication with BlackBoard.
This will start the threads of the internal *ConnectionManager* and *CommandParser* classes to start listening for
a connection and start receiving and parsin messages.
If pyRobotics is not initialized it will only print a message saying "pyRobotics needs to be initialized before starting".
A similar message will show when trying to use some of this module's functions before calling this function.
.. todo::
Fix bug: sometimes when connection is established successfully a message saying pyRobotics has not been started is printed.
'''
global _p, _connMann, _parser, _initialized, _started, _startedLock
if not _initialized:
print 'pyRobotics needs to be initialized before starting.'
return
_parser.Start()
_connMan.Start()
_p.start()
_startedLock.acquire()
_started = True
_startedLock.release()
def SetReady(val=True):
'''
Once pyRobotics is :func:`initialized <Initialize>` and :func:`started <Start>`, this flag should be set to true to
let BlackBoard know that the module is functioning correctly and ready to receive commands.
Even if this module does not receive any commands, this should be set to true.
'''
global _ready, _readyLock, _started, _startedLock
_startedLock.acquire()
started = _started
_startedLock.release()
if not started:
print 'pyRobotics has not been started.'
return False
_readyLock.acquire()
_ready = val
_readyLock.release()
def _isReady(c):
global _ready, _readyLock
_readyLock.acquire()
ready = _ready
_readyLock.release()
return Response('ready', ready)
def Wait():
'''
In case this module is only used to receive and respond commands, but is doing nothing while no command is received,
this will prevent the main thread (and therefore BlackBoard connection and commands execution) to terminate.
'''
global _started, _startedLock
_startedLock.acquire()
started = _started
_startedLock.release()
if not started:
print 'pyRobotics has not been started.'
return False
try:
while True:
time.sleep(300)
except (KeyboardInterrupt, SystemExit):
sys.exit()
def _MainThread():
global _receivedCommands, _executors
while True:
command = _receivedCommands.get()
key = command.name
if key not in _executors:
if '*' in _executors:
key = '*'
else:
print 'Executor not found for command: ' + command.name
return
func, async = _executors[key]
if async:
p = threading.Thread(target=_Execute, args=(func, command))
p.daemon = True
p.start()
else:
_Execute(func, command)
def _Execute(func, command):
try:
response = func(command)
except Exception as e:
print "Function '" + str(func) + "' crashed."
print 'ERROR: ' + str(e)
response = Response.FromCommandObject(command, False, command.params)
if not isinstance(response, Response):
print "Function '" + str(func) + "' did not return a Response object."
response = Response.FromCommandObject(command, False, command.params)
resp = Response.FromCommandObject(command, response.successful, response.params)
Send(resp)
def Send(message):
'''
Sends a command WITHOUT waiting for an answer.
:param Command message: Message to be sent, must be an instance of the Command class.
:return: ``True`` if the message was sent successfully, ``False`` otherwise.
'''
global _connMan, _started, _startedLock
_startedLock.acquire()
started = _started
_startedLock.release()
if not started:
print 'pyRobotics has not been started.'
return False
if not isinstance(message, Message):
print "Message to be sent should be a derived class of pyrobotics.messages.Message Class. Message was not sent."
return False
for _ in range(3):
if _connMan.Send(message):
return True
return False
def SendAndWait(command, timeout=300000, attempts = 1):
global _commandsLock, _sentCommands, _responsesLock, _receivedResponses, _started, _startedLock
'''
Sends a command and wait for the answer. This blocks the execution of the calling thread.
:param Command command: Message to be sent, must be an instance of the Command class.
:param int timeout: (Default 300000) How much time (in miliseconds) to wait for response before trying again or aborting.
:param int attempts: (Default 1) How many attempts to send the command if no response is received after timeout.
If attempts is 0, it will keep trying indefinitely. (Not recommended)
:return: A :class:`Response` object if the message was sent successfully and a response was received before the timeout occurred, ``None`` otherwise.
'''
_startedLock.acquire()
started = _started
_startedLock.release()
if not started:
print 'pyRobotics has not been started.'
return None
if not isinstance(command, Command):
print "Message should be an instance of class Command. Message not sent."
return None
_commandsLock.acquire()
_sentCommands.add(command)
_commandsLock.release()
currentAttempt = 0
timeout = timeout/1000.0
response = None
while not response and currentAttempt < attempts:
Send(command)
newTimeout = time.time() + timeout
currentAttempt += 1
while time.time() < newTimeout:
_responsesLock.acquire()
if command in _receivedResponses:
response = _receivedResponses.pop(command)
_responsesLock.release()
if response:
break
time.sleep(0.1)
_commandsLock.acquire()
_sentCommands.remove(command)
_commandsLock.release()
return response
def ReadSharedVar(name):
'''
Reads the value of a Shared Variable from the BlackBoard.
:param string name: The name of the Shared Variable.
:return: A :class:`SharedVar` object if the request was successful, ``False`` if pyRobotics has not been started, ``None`` otherwise.
'''
global _started, _startedLock
_startedLock.acquire()
started = _started
_startedLock.release()
if not started:
print 'pyRobotics has not been started.'
return False
return shared_variables._ReadSharedVar(name)
def CreateSharedVar(sharedVarType, name):
'''
Creates a Shared Variable in BlackBoard.
:param enum sharedVarType: The type of the shared variable, it is one of the constants in :class:`SharedVarTypes` pseudo-enum.
:param string name: The name of the shared variable to be created.
:return: ``True`` if creation was successful, ``False`` otherwise.
'''
global _started, _startedLock
_startedLock.acquire()
started = _started
_startedLock.release()
if not started:
print 'pyRobotics has not been started.'
return False
return shared_variables._CreateSharedVar(sharedVarType, name)
def WriteSharedVar(sharedVarType, name, data):
'''
Writes content to a Shared Variable in BlackBoard.
:param enum sharedVarType: The type of the shared variable, it is one of the constants in :class:`SharedVarTypes` pseudo-enum.
:param string name: The name of the shared variable to write to.
:param var data: The data to be written, the type must match the shared variable's type.
:return: ``True`` if shared variable was succesfully written to, ``False`` otherwise.
'''
global _started, _startedLock
_startedLock.acquire()
started = _started
_startedLock.release()
if not started:
print 'pyRobotics has not been started.'
return False
return shared_variables._WriteSharedVar(sharedVarType, name, data)
def SubscribeToSharedVar(name, handler, subscriptionType=SubscriptionTypes.WRITE_OTHERS, reportType = ReportTypes.CONTENT):
'''
Subscribes to a Shared Variable in BlackBoard.
When a module subscribes to a shared variable, it gets notifications when someone writes to it.
:param string name: The name of the shared variable to subscribe to.
:param function handler: A function that will be the handler for this shared variables notification. (See :ref:`Creating a subscription handler <creating_a_subscription_handler>`)
:param enum subscriptionType: The type of subscription, it is one of the constants in :class:`SubscriptionTypes` pseudo-enum.
:param enum reportType: The type of report to receive when someone writes to it, it is one of the constants in :class:`ReportTypes` pseudo-enum.
:return: ``True`` if subscription was successful, ``False`` otherwise.
'''
global _subscriptionHandlersLock, _subscriptionHandlers, _started, _startedLock
_startedLock.acquire()
started = _started
_startedLock.release()
if not started:
print 'pyRobotics has not been started.'
return False
if not shared_variables._SubscribeToSharedVar(name, subscriptionType, reportType):
return False
_subscriptionHandlersLock.acquire()
_subscriptionHandlers[name] = handler
_subscriptionHandlersLock.release()
return True
|
test_INLClient_live.py
|
# Copyright 2016 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals, absolute_import
import os
from django.test import override_settings
from mock import patch
from client.JobGetter import JobGetter
from client import settings, BaseClient
import subprocess
from client.tests import LiveClientTester, utils
import tempfile
import threading
import time
from ci import views
from ci.tests import utils as test_utils
@override_settings(INSTALLED_GITSERVERS=[test_utils.github_config()])
class Tests(LiveClientTester.LiveClientTester):
def create_client(self, build_root):
c = utils.create_inl_client()
c.set_environment('BUILD_ROOT', build_root)
c.client_info["update_step_time"] = 1
c.client_info["ssl_cert"] = False # not needed but will get another line of coverage
c.client_info["server"] = self.live_server_url
c.client_info["servers"] = [self.live_server_url]
return c
def create_job(self, client, recipes_dir, name, sleep=1, n_steps=3, extra_script=''):
job = utils.create_client_job(recipes_dir, name=name, sleep=sleep, n_steps=n_steps, extra_script=extra_script)
settings.SERVERS = [(self.live_server_url, job.event.build_user.build_key, False)]
if job.config.name not in client.get_client_info('build_configs'):
client.add_config(job.config.name)
if job.config.name not in client.get_client_info('config_modules') or 'null' not in client.get_client_info('config_modules')[job.config.name]:
client.add_config_module(job.config.name, 'null')
client.client_info["build_key"] = job.recipe.build_user.build_key
return job
def create_client_and_job(self, recipes_dir, name, sleep=1):
c = self.create_client("/foo/bar")
c.client_info["single_shot"] = True
job = self.create_job(c, recipes_dir, name, sleep=sleep)
return c, job
def test_run_success(self):
with test_utils.RecipeDir() as recipe_dir:
c, job = self.create_client_and_job(recipe_dir, "RunSuccess", sleep=2)
self.set_counts()
c.run(exit_if=lambda client: True)
self.compare_counts(num_clients=1, num_events_completed=1, num_jobs_completed=1, active_branches=1)
utils.check_complete_job(self, job, c)
def test_run_graceful(self):
with test_utils.RecipeDir() as recipe_dir:
c, job = self.create_client_and_job(recipe_dir, "Graceful", sleep=2)
self.set_counts()
c.client_info["poll"] = 1
# graceful signal, should complete
script = "sleep 3 && kill -USR2 %s" % os.getpid()
proc = subprocess.Popen(script, shell=True, executable="/bin/bash", stdout=subprocess.PIPE)
c.run()
proc.wait()
self.compare_counts(num_clients=1, num_events_completed=1, num_jobs_completed=1, active_branches=1)
utils.check_complete_job(self, job, c)
self.assertEqual(c.graceful_signal.triggered, True)
self.assertEqual(c.cancel_signal.triggered, False)
def test_run_cancel(self):
with test_utils.RecipeDir() as recipe_dir:
c, job = self.create_client_and_job(recipe_dir, "Cancel", sleep=4)
self.set_counts()
c.client_info["poll"] = 1
# cancel signal, should stop
script = "sleep 3 && kill -USR1 %s" % os.getpid()
proc = subprocess.Popen(script, shell=True, executable="/bin/bash", stdout=subprocess.PIPE)
c.run()
proc.wait()
self.compare_counts(num_clients=1, canceled=1, num_events_completed=1, num_jobs_completed=1, active_branches=1, events_canceled=1)
self.assertEqual(c.cancel_signal.triggered, True)
self.assertEqual(c.graceful_signal.triggered, False)
utils.check_canceled_job(self, job, c)
def test_run_job_cancel(self):
with test_utils.RecipeDir() as recipe_dir:
c, job = self.create_client_and_job(recipe_dir, "JobCancel", sleep=60)
self.set_counts()
# cancel response, should cancel the job
thread = threading.Thread(target=c.run, args=(lambda client: True,))
thread.start()
time.sleep(10)
job.refresh_from_db()
views.set_job_canceled(job)
thread.join()
self.compare_counts(num_clients=1, canceled=1, num_events_completed=1, num_jobs_completed=1, active_branches=1, events_canceled=1)
self.assertEqual(c.cancel_signal.triggered, False)
self.assertEqual(c.graceful_signal.triggered, False)
utils.check_canceled_job(self, job, c)
def test_run_job_invalidated_basic(self):
with test_utils.RecipeDir() as recipe_dir:
c, job = self.create_client_and_job(recipe_dir, "JobInvalidated", sleep=40)
# stop response, should stop the job
self.set_counts()
thread = threading.Thread(target=c.run, args=(lambda client: True,))
thread.start()
start_time = time.time()
time.sleep(4)
job.refresh_from_db()
job.set_invalidated("Test invalidation", check_ready=True)
thread.join()
end_time = time.time()
self.assertGreater(15, end_time-start_time)
self.compare_counts(num_clients=1, invalidated=1, num_changelog=1)
utils.check_stopped_job(self, job)
def test_run_job_invalidated_nested_bash(self):
with test_utils.RecipeDir() as recipe_dir:
c, job = self.create_client_and_job(recipe_dir, "JobInvalidated", sleep=40)
job.delete()
job = utils.create_job_with_nested_bash(recipe_dir, name="JobWithNestedBash", sleep=40)
# stop response, should stop the job
self.set_counts()
thread = threading.Thread(target=c.run, args=(lambda client: True,))
start_time = time.time()
thread.start()
time.sleep(4)
job.refresh_from_db()
job.set_invalidated("Test invalidation", check_ready=True)
thread.join()
end_time = time.time()
self.assertGreater(15, end_time-start_time)
self.compare_counts(num_clients=1, invalidated=1, num_changelog=1)
utils.check_stopped_job(self, job)
@patch.object(JobGetter, 'find_job')
def test_exception(self, mock_getter):
with test_utils.RecipeDir() as recipe_dir:
# check exception handler
mock_getter.side_effect = Exception("oh no!")
c, job = self.create_client_and_job(recipe_dir, "JobStop", sleep=4)
self.set_counts()
c.run(exit_if=lambda client: True)
self.compare_counts()
def test_check_server_no_job(self):
with test_utils.RecipeDir() as recipe_dir:
# check no jobs
c, job = self.create_client_and_job(recipe_dir, "JobStop", sleep=4)
job.complete = True
job.save()
self.set_counts()
c.check_server(settings.SERVERS[0])
self.compare_counts(num_clients=1)
@patch.object(JobGetter, 'find_job')
def test_runner_error(self, mock_getter):
with test_utils.RecipeDir() as recipe_dir:
mock_getter.return_value = None
c, job = self.create_client_and_job(recipe_dir, "JobError")
self.set_counts()
c.runner_error = True
c.run()
self.compare_counts()
def test_exit_if_exception(self):
c = self.create_client("/foo/bar")
with self.assertRaises(BaseClient.ClientException):
c.run(exit_if="foo")
with self.assertRaises(BaseClient.ClientException):
c.run(exit_if=lambda: "foo")
with self.assertRaises(BaseClient.ClientException):
c.run(exit_if=lambda client: "foo")
def test_manage_build_root(self):
with test_utils.RecipeDir() as recipe_dir:
temp_dir = tempfile.TemporaryDirectory()
build_root = temp_dir.name + "/build_root"
self.assertEqual(os.path.isdir(build_root), False)
os.mkdir(build_root)
self.assertEqual(os.path.isdir(build_root), True)
manage_build_root_before = settings.MANAGE_BUILD_ROOT
settings.MANAGE_BUILD_ROOT = True
c = self.create_client(build_root)
settings.MANAGE_BUILD_ROOT = manage_build_root_before
self.assertEqual(c.get_build_root(), build_root)
self.assertEqual(c.get_client_info('manage_build_root'), True)
self.assertEqual(c.build_root_exists(), True)
c.check_build_root()
self.assertEqual(c.build_root_exists(), False)
c.create_build_root()
self.assertEqual(c.build_root_exists(), True)
extra_script = 'if [ -d "$BUILD_ROOT" ]; then\n'
extra_script += ' if [ ! -n "$(ls -A "$BUILD_ROOT")" ]; then\n'
extra_script += ' echo BUILD_ROOT_EXISTS_EMPTY\n'
extra_script += ' echo foo > $BUILD_ROOT/build_root_test || exit 1\n'
extra_script += ' fi\n'
extra_script += 'fi\n'
jobs = []
jobs.append(self.create_job(c, recipe_dir, "ManageBuildRoot1", n_steps=1, sleep=2, extra_script=extra_script))
jobs.append(self.create_job(c, recipe_dir, "ManageBuildRoot2", n_steps=1, sleep=2, extra_script=extra_script))
jobs.append(self.create_job(c, recipe_dir, "ManageBuildRoot3", n_steps=1, sleep=2, extra_script=extra_script))
self.set_counts()
c.client_info["poll"] = 1
def exit_create_build_root(client):
self.assertEqual(client.build_root_exists(), False)
client.create_build_root()
self.assertEqual(client.build_root_exists(), True)
return client.get_client_info('jobs_ran') == 3
c.run(exit_if=exit_create_build_root)
self.assertEqual(c.build_root_exists(), False)
self.compare_counts(num_clients=1, num_events_completed=1, num_jobs_completed=3, active_branches=1)
for job in jobs:
utils.check_complete_job(self, job, c, n_steps=1, extra_step_msg='BUILD_ROOT_EXISTS_EMPTY\n')
temp_dir.cleanup()
def test_manage_build_root_failure(self):
manage_build_root_before = settings.MANAGE_BUILD_ROOT
settings.MANAGE_BUILD_ROOT = True
with self.assertRaises(FileNotFoundError):
c = self.create_client("/foo/bar")
self.assertEqual(c.get_build_root(), '/foo/bar')
self.assertEqual(c.build_root_exists(), False)
c.check_build_root()
settings.MANAGE_BUILD_ROOT = manage_build_root_before
def test_no_modules(self):
with test_utils.RecipeDir() as recipe_dir:
c, job = self.create_client_and_job(recipe_dir, "NoModules", sleep=2)
c.client_info["config_modules"] = {}
c.run(exit_if=lambda client: True)
def test_deprecated_environment(self):
with test_utils.RecipeDir() as recipe_dir:
env_before = settings.ENVIRONMENT
settings.ENVIRONMENT = { 'FOO': 'bar' }
c = self.create_client("/foo/bar")
self.assertNotIn('FOO', c.get_environment())
extra_script = 'if [ "$FOO" == "bar" ]; then\n'
extra_script += ' echo "FOO=bar"\n'
extra_script += 'fi\n'
job = self.create_job(c, recipe_dir, "DeprecatedEnvironment", n_steps=1, sleep=2, extra_script=extra_script)
self.set_counts()
c.run(exit_if=lambda client: True)
self.assertEqual('bar', c.get_environment('FOO'))
self.compare_counts(num_clients=1, num_events_completed=1, num_jobs_completed=1, active_branches=1)
utils.check_complete_job(self, job, c, n_steps=1, extra_step_msg='FOO=bar\n')
settings.ENVIRONMENT = env_before
def test_deprecated_config_modules(self):
config_modules_before = settings.CONFIG_MODULES
settings.CONFIG_MODULES = { 'foo': ['null'] }
c = self.create_client("/foo/bar")
self.assertNotIn('foo', c.get_client_info('build_configs'))
self.assertNotIn('foo', c.get_client_info('config_modules'))
c.run(exit_if=lambda client: True)
self.assertIn('foo', c.get_client_info('build_configs'))
self.assertIn('foo', c.get_client_info('config_modules'))
self.assertEqual(['null'], c.get_client_info('config_modules')['foo'])
settings.CONFIG_MODULES = config_modules_before
|
astrometry.net.script.py
|
#============================================================
# astrometry.net script making program for ONE target
# 2018/08/29 G.Paek revision for skipping astrocrappy
# JUST RUN AND WRITE INPUT & REF IMAGE
# 2019.05.08 GREGORY S.H. PAEK
#============================================================
import numpy as np
import os, sys, glob
from astropy.io import fits
from multiprocessing import Process, Pool
import multiprocessing as mp
import time
#============================================================
def astrometry(imlist, pixelscale):
scale_low = str( float(pixelscale) - (float(pixelscale)*0.1) )
scale_high = str( float(pixelscale) + (float(pixelscale)*0.1) )
n = 1
for inim in imlist:
com = 'solve-field '+inim+' --scale-unit arcsecperpix --scale-low '+scale_low+' --scale-high '+scale_high+ ' --no-plots --new-fits a'+inim+' --overwrite --temp-dir ./ '+ '--cpulimit 90 --use-sextractor\n'
print('['+str(n)+'/'+str(len(imlist))+']'); n += 1
print(com)
os.system(com)
os.system('rm tmp*')
os.system('rm *.axy *.corr *.xyls *.match *.rdls *.solved *.wcs *axy *.corr')
#-------------------------------------------------------------
os.system('ls *.fits *.fit')
imlist = glob.glob(raw_input('Images to process (Calib*.fits)\t: ')); imlist.sort()
if imlist == '' :
imlist = 'Calib*.fits'
pixelscale = raw_input('Pixel scale of Images\t: ')
if pixelscale == '' :
pixelscale = '0'
#------------------------------------------------------------
# MULTI PROCESSING
#------------------------------------------------------------
starttime = time.time()
'''
if __name__ == '__main__':
jobs = []
p = mp.Process(target=astrometry, args=(imlist, pixelscale))
jobs.append(p)
p.start()
'''
astrometry(imlist, pixelscale)
deltime = time.time() - starttime
print('All PROCESS IS DONE.\t('+str(round(deltime, 1))+' sec)')
|
winInputHook.py
|
#winInputHook.py
#A part of NonVisual Desktop Access (NVDA)
#Copyright (C) 2006-2008 NVDA Contributors <http://www.nvda-project.org/>
#This file is covered by the GNU General Public License.
#See the file COPYING for more details.
import threading
import comtypes.client
import time
from ctypes import *
from ctypes.wintypes import *
from win32con import WM_QUIT, HC_ACTION, WH_KEYBOARD_LL, LLKHF_UP, LLKHF_EXTENDED, LLKHF_INJECTED, WH_MOUSE_LL, LLMHF_INJECTED
import watchdog
class KBDLLHOOKSTRUCT(Structure):
_fields_=[
('vkCode',DWORD),
('scanCode',DWORD),
('flags',DWORD),
('time',DWORD),
('dwExtraInfo',DWORD),
]
class MSLLHOOKSTRUCT(Structure):
_fields_=[
('pt',POINT),
('mouseData',DWORD),
('flags',DWORD),
('time',DWORD),
('dwExtraInfo',DWORD),
]
keyDownCallback=None
keyUpCallback=None
mouseCallback=None
@WINFUNCTYPE(c_long,c_int,WPARAM,LPARAM)
def keyboardHook(code,wParam,lParam):
if watchdog.isAttemptingRecovery or code!=HC_ACTION:
return windll.user32.CallNextHookEx(0,code,wParam,lParam)
kbd=KBDLLHOOKSTRUCT.from_address(lParam)
if keyUpCallback and kbd.flags&LLKHF_UP:
if not keyUpCallback(kbd.vkCode,kbd.scanCode,bool(kbd.flags&LLKHF_EXTENDED),bool(kbd.flags&LLKHF_INJECTED)):
return 1
elif keyDownCallback:
if not keyDownCallback(kbd.vkCode,kbd.scanCode,bool(kbd.flags&LLKHF_EXTENDED),bool(kbd.flags&LLKHF_INJECTED)):
return 1
return windll.user32.CallNextHookEx(0,code,wParam,lParam)
@WINFUNCTYPE(c_long,c_int,WPARAM,LPARAM)
def mouseHook(code,wParam,lParam):
if watchdog.isAttemptingRecovery or code!=HC_ACTION:
return windll.user32.CallNextHookEx(0,code,wParam,lParam)
msll=MSLLHOOKSTRUCT.from_address(lParam)
if mouseCallback:
if not mouseCallback(wParam,msll.pt.x,msll.pt.y,msll.flags&LLMHF_INJECTED):
return 1
return windll.user32.CallNextHookEx(0,code,wParam,lParam)
hookThread=None
hookThreadRefCount=0
def hookThreadFunc():
keyHookID=windll.user32.SetWindowsHookExW(WH_KEYBOARD_LL,keyboardHook,windll.kernel32.GetModuleHandleW(None),0)
if keyHookID==0:
raise OSError("Could not register keyboard hook")
mouseHookID=windll.user32.SetWindowsHookExW(WH_MOUSE_LL,mouseHook,windll.kernel32.GetModuleHandleW(None),0)
if mouseHookID==0:
raise OSError("Could not register mouse hook")
msg=MSG()
while windll.user32.GetMessageW(byref(msg),None,0,0):
pass
if windll.user32.UnhookWindowsHookEx(keyHookID)==0:
raise OSError("could not unregister key hook %s"%keyHookID)
if windll.user32.UnhookWindowsHookEx(mouseHookID)==0:
raise OSError("could not unregister mouse hook %s"%mouseHookID)
def initialize():
global hookThread, hookThreadRefCount
hookThreadRefCount+=1
if hookThreadRefCount==1:
hookThread=threading.Thread(target=hookThreadFunc)
hookThread.start()
def setCallbacks(keyUp=None,keyDown=None,mouse=None):
global keyUpCallback, keyDownCallback, mouseCallback
if keyUp:
keyUpCallback=keyUp
if keyDown:
keyDownCallback=keyDown
if mouse:
mouseCallback=mouse
def terminate():
global hookThread, hookThreadRefCount
if not hookThread:
raise RuntimeError("winInputHook not running")
hookThreadRefCount-=1
if hookThreadRefCount==0:
windll.user32.PostThreadMessageW(hookThread.ident,WM_QUIT,0,0)
hookThread.join()
hookThread=None
|
FileScan.py
|
import datetime
import subprocess
import threading
from tkinter import Listbox, messagebox, ttk
from tkinter.filedialog import askopenfilename, asksaveasfilename
import xlwt
from PIL import Image, ImageTk
TITLE_FONT = ("Helvetica", 16, "bold")
FALSE = False
function = 'function'
# images
img_IPtest = Image.open('./Images/IPtest_img.png')
img_ALL_IPimg = Image.open('./Images/ALL_IP_img.png')
img_infile = Image.open('./Images/inFile_img.png')
img_outFile = Image.open('./Images/outFile_img.png')
img_go = Image.open('./Images/go_img.png')
img_one_IPtes = Image.open('./Images/one_IPtest_img.png')
# 定义图片尺寸
IPtest_image = img_IPtest.resize((60, 60), Image.ANTIALIAS)
ALL_IPimg_image = img_ALL_IPimg.resize((60, 60), Image.ANTIALIAS)
one_IPtest_image = img_one_IPtes.resize((60, 60), Image.ANTIALIAS)
# 导入导出
infile_image = img_infile.resize((25, 25), Image.ANTIALIAS)
outFile_image = img_outFile.resize((25, 25), Image.ANTIALIAS)
go_image = img_go.resize((25, 25), Image.ANTIALIAS)
class ALL_IPtest(ttk.Frame):
"""
任意IP地址扫描
扫描结显示到窗口
也可以选择导出到文本文件
"""
def __init__(self, parent, mainframe):
ttk.Frame.__init__(self, parent)
self.mainframe = mainframe
# 获取图片
self.ALLIP_img = ImageTk.PhotoImage(ALL_IPimg_image)
self.infile_img = ImageTk.PhotoImage(infile_image)
self.outFile_img = ImageTk.PhotoImage(outFile_image)
self.go_img = ImageTk.PhotoImage(go_image)
self.IPtest = ttk.Label(self, text='自定义扫描',
image=self.ALLIP_img, compound='left', font=TITLE_FONT, foreground='#1296db')
self.Get_IPtxt = ttk.Button(
self, text="导入IP文件", image=self.infile_img, compound='left', command=lambda: self.start_ping()())
self.Go_Scanning = ttk.Button(
self, text="开始扫描", image=self.go_img, compound='left')
self.Out_ScanningTxt = ttk.Button(
self, text="导出结果", image=self.outFile_img, compound='left', command=lambda: self.save_view())
self.Clean_ScanningTxt = ttk.Button(
self, text="清空", command=lambda: self.cleane_view())
self.TestView = ttk.Label(
self, text='扫描结果:', font=TITLE_FONT, foreground='#1296db')
self.ping_test = []
# 结果显示
VERTICAL = "vertical"
self.Scanning_L = Listbox(self, height=20, width=100)
self.ScanViews = ttk.Scrollbar(
self, orient=VERTICAL, command=self.Scanning_L.yview)
self.Scanning_L['yscrollcommand'] = self.ScanViews.set
ttk.Sizegrip().grid(column=2, row=4, sticky="se")
self.ScanViews.grid(column=21, row=3, sticky="ns")
self.Scanning_L.grid(column=1, row=3, sticky="nwes",
columnspan=20, padx=5, pady=5)
self.IPtest.grid(column=0, row=0, sticky="nwes", padx=5, pady=5)
self.Get_IPtxt.grid(column=1, row=1, sticky="nwes",
columnspan=1, rowspan=1, padx=5, pady=5)
self.Go_Scanning.grid(column=2, row=1, sticky="nwes",
columnspan=1, rowspan=1, padx=5, pady=5)
self.Out_ScanningTxt.grid(
column=20, row=20, sticky="nwes", columnspan=1, rowspan=1, padx=5, pady=5)
self.Clean_ScanningTxt.grid(
column=1, row=20, sticky="nwes", columnspan=1, rowspan=1, padx=5, pady=5)
self.TestView.grid(column=1, row=2, sticky="nwes", padx=5, pady=5)
# 获取IP
def check_file(self):
"""
askopenfilename获取IP地址文件
"""
self.open_filename = askopenfilename(
title='打开文件', filetypes=[('All Files', '*')])
with open(self.open_filename, 'r') as f:
self.startip = f.readlines()
return(self.startip)
# 处理IP
def start_ping(self):
"""
启动多线程
检查IP地址合法性
"""
get_ALLip = self.check_file()
pthread_list = []
self.Scanning_L.insert(
'end', '时间 IP地址 测试次数 通信状态')
for line in get_ALLip:
if len(line.strip()):
ip = line.strip('\n')
# 开始测试
pthread_list.append(threading.Thread(
target=self.get_ping_result, args=(ip,)))
for item in pthread_list:
item.setDaemon(True)
item.start()
self.ping_test = [['时间', 'IP地址', 'Ping次数', '通信情况']]
def get_ping_result(self, ip):
"""
检查对应的IP是否被占用
"""
cmd_str = "ping {0} -n 4 -w 600".format(ip)
DETACHED_PROCESS = 0x00000008 # 不创建cmd窗口
time_now = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
try:
subprocess.run(cmd_str, creationflags=DETACHED_PROCESS,
check=True) # 仅用于windows系统
except subprocess.CalledProcessError as err:
self.Scanning_L.insert(
'end', '%s %s 4 通信失败' % (str(time_now), ip))
self.ping_test.append([time_now, ip, 4, '通信失败'])
else:
self.ping_test.append([time_now, ip, 4, '通信正常'])
self.Scanning_L.insert(
'end', '%s %s 4 通信正常' % (str(time_now), ip))
self.Scanning_L.update()
def cleane_view(self):
self.Scanning_L.delete('0', 'end')
def save_view(self):
PingTest = xlwt.Workbook() # 新建一个excel
sheet = PingTest.add_sheet('Ping测试数据结果') # 添加一个sheet页
row = 0 # 控制行
for stu in self.ping_test:
col = 0 # 控制列
for s in stu: # 再循环里面list的值,每一列
sheet.write(row, col, s)
col += 1
row += 1
PingTest.save('Ping测试数据结果.xls') # 保存到当前目录下
messagebox.showinfo('提示', '数据已导出到程序可执行文件目录下的(Ping测试数据结果.xls)文件中!')
if __name__ == "__main__":
app = ALL_IPtest()
app.mainloop()
|
03_motors.py
|
# -*- coding: utf-8 -*-
from threading import Thread
import WonderPy.core.wwMain
from WonderPy.core.wwConstants import WWRobotConstants
"""
This example shows connecting to the robot and sending commands each time sensors are received.
This builds on the "01_hello_world.py" example.
"""
class MyClass(object):
def on_sensors(self, robot):
"""
Called approximately 30 times per second - each time sensor data is received from the robot.
This method is optional.
Do not block in here !
This means only call the stage_foo() flavor of robot commands, and not the do_foo() versions.
"""
def on_connect(self, robot):
"""
Called when we connect to a robot. This method is optional. Do not Block in this method !
"""
print("Starting a thread for %s." % (robot.name))
Thread(target=self.thread_mover, args=(robot,)).start()
def thread_mover(self, robot):
# turn the robot this way and that
if not robot.has_ability(WWRobotConstants.WWRobotAbilities.BODY_MOVE, True):
# it doesn't do any harm to send drive commands to a robot with no wheels,
# but it doesn't help either.
print(u"%s cannot drive! try a different example." % (robot.name))
return
while True:
# call convenience function to wait for button press
print(u"%s waiting for button press." % (robot.name))
robot.block_until_button_main_press_and_release()
print(u"%s driving forward 20cm at 10cm/s." % (robot.name))
robot.cmds.body.do_forward(20, 10)
print(u"%s turning head to 120° left." % (robot.name))
robot.cmds.head.stage_pan_angle(120)
print(u"%s turning body around to the left." % (robot.name))
robot.cmds.body.do_turn(180, 200)
print(u"%s turning head back to 0°." % (robot.name))
robot.cmds.head.stage_pan_angle(0)
# now repeat: drive back. this time we'll turn the head / body the other way tho.
print(u"%s driving forward 20cm at 10cm/s." % (robot.name))
robot.cmds.body.do_forward(20, 10)
print(u"%s turning head to 120° right." % (robot.name))
robot.cmds.head.stage_pan_angle(-120)
print(u"%s turning body around to the right." % (robot.name))
robot.cmds.body.do_turn(-180, 200)
print(u"%s turning head back to 0°." % (robot.name))
robot.cmds.head.stage_pan_angle(0)
if __name__ == "__main__":
WonderPy.core.wwMain.start(MyClass())
|
launch.py
|
# Copyright 2018 Datawire. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import typing
from collections import deque
from subprocess import DEVNULL, PIPE, Popen
from threading import Lock, Thread
class BackgroundProcessCrash(Exception):
def __init__(self, message: str, details: str) -> None:
super().__init__(message)
self.details = details
class _Logger:
"""Logger that optionally captures what is logged"""
def __init__(
self,
write: typing.Callable[[str], None],
do_log: bool,
do_capture: bool,
max_capture: int,
):
self.write = write
self.do_log = do_log
self.do_capture = do_capture
self.finished = Lock()
if max_capture < 0:
capture = [] # type: typing.MutableSequence[str]
else:
capture = deque(maxlen=max_capture)
self.capture = capture # type: typing.MutableSequence[str]
self.finished.acquire()
def log(self, line: str) -> None:
if self.do_log:
self.write(line)
if self.do_capture:
self.capture.append(line)
def finish(self) -> None:
self.finished.release()
def get_captured(self) -> str:
self.finished.acquire() # Block until finish is called
return "".join(self.capture).strip()
def _launch_command(
args: typing.List[str],
out_logger: _Logger,
err_logger: _Logger,
done: typing.Optional[typing.Callable[[Popen], None]] = None,
**kwargs: typing.Any
) -> Popen:
"""
Launch subprocess with args, kwargs.
Log stdout and stderr by calling respective callbacks.
"""
def pump_stream(logger: _Logger, stream: typing.Iterable[str]) -> None:
"""Pump the stream"""
for line in stream:
logger.log(line)
logger.finish()
def joiner() -> None:
"""Wait for streams to finish, then call done callback"""
for th in threads:
th.join()
if done:
done(process)
kwargs = kwargs.copy()
in_data = kwargs.get("input")
if "input" in kwargs:
del kwargs["input"]
assert kwargs.get("stdin") is None, kwargs["stdin"]
kwargs["stdin"] = PIPE
elif "stdin" not in kwargs:
kwargs["stdin"] = DEVNULL
kwargs.setdefault("stdout", PIPE)
kwargs.setdefault("stderr", PIPE)
kwargs["universal_newlines"] = True # Text streams, not byte streams
process = Popen(args, **kwargs)
threads = []
if process.stdout:
thread = Thread(
target=pump_stream, args=(out_logger, process.stdout), daemon=True
)
thread.start()
threads.append(thread)
if process.stderr:
thread = Thread(
target=pump_stream, args=(err_logger, process.stderr), daemon=True
)
thread.start()
threads.append(thread)
if done and threads:
Thread(target=joiner, daemon=True).start()
if in_data:
process.stdin.write(str(in_data, "utf-8"))
process.stdin.close()
return process
|
controller.py
|
# controller.py
# This script will launch instances and running avrora benchmark on them automatically
# To use, you need to setup your ec2_access_id and ec2_access_key in vm.py, the number
# of instance you want to run and traces, lb, es filename in the configuration field below
# This script will record the execution time of each job, usage of spot and OD instance in
# corresponding txt files
import re
import vm
import os
import sys
import math
import datetime
import logging
import json
import socket
import boto.ec2
import threading
import pdb
from time import sleep
from random import randint
logging.basicConfig(filename='spot_fail.log', format='%(asctime)s: %(message)s')
LOW_WEIGHT = 256
HIGH_WEIGHT = 1024
RHO = 1
K = 4
EC2_AMI = 'ami-75636f1f'
BID = 0.266
OD_price = 0.266
HOUR_INTERVAL = 300
MINUTE_INTERVAL = 5
NUM_CORE = 4
LONG_JOB_LENGTH = 100
JOB_LENGTH_SCALAR = 8
TRACE_LENGTH = 1440
def send_to_server(server_ip, data, port=10086):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((server_ip, port))
res_data = []
try:
sock.sendall(data)
while True:
res = sock.recv(1024)
if res:
res_data.append(res)
else:
break
finally:
sock.close()
return ''.join(res_data)
def isServerAlive(server_ip, active_server_list):
"""
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
sock.connect((server_ip, 22))
return True
except socket.error as e:
return False
"""
if not server_ip:
return False
for server in active_server_list:
if server.ip == server_ip:
return True
return False
def launch_vm(v):
v.launch_instance()
def get_vm(num_spot, num_backup):
spot_list = []
backup_list = []
t = []
for i in range(num_spot):
new_vm = vm.VM(EC2_AMI,
'ec2-sample-key',
'ec2-sample-key.pem',
'ubuntu',
bidding = 1,
instance_type = 'c4.xlarge')
spot_list.append(new_vm)
t.append(threading.Thread(target = launch_vm, args = (new_vm,)))
for i in range(num_backup):
new_vm = vm.VM(EC2_AMI,
'ec2-sample-key',
'ec2-sample-key.pem',
'ubuntu',
bidding = 1,
instance_type = 'c4.xlarge')
backup_list.append(new_vm)
t.append(threading.Thread(target = launch_vm, args = (new_vm,)))
for thread in t:
thread.start()
for thread in t:
thread.join()
return spot_list, backup_list
def add_job(running_jobs, spot_market_list, backup_market, time, job_length, job_id):
global MINUTE_INTERVAL
global JOB_LENGTH_SCALAR
# ============================== Find the best market ==================================
# find the index of trace
trace_index = min(time / MINUTE_INTERVAL, TRACE_LENGTH - 1)
# find feasible markets (Lb > job length)
feasible_market = []
candidate_market_count = 2
for market in spot_market_list:
# if remaining time > job length
if market.Lb[trace_index] * MINUTE_INTERVAL > job_length * JOB_LENGTH_SCALAR:
feasible_market.append(market)
# if no feasible market, exit
if not feasible_market:
logging.error("No feasible market...")
sys.exit(1)
# find the smallest Esb market
feasible_market = sorted(feasible_market, key = lambda market: market.Esb)
chosen_market = feasible_market[randint(0, min(candidate_market_count, len(feasible_market)) - 1)]
# ============================== Find open slot ========================================
open_s_server = None
open_b_server = None
s_slot = None
b_slot = None
# find spot slot
if chosen_market.spot_price[trace_index] > chosen_market.bid:
print ('Price %f too high, job will not run on spot instances...' % chosen_market.spot_price[trace_index])
else:
for AS in chosen_market.active_list:
# if remaining time > job length
if (AS.Lb - (time - AS.created_time)) > job_length * JOB_LENGTH_SCALAR:
s_slot = AS.find_open_slot()
if s_slot:
open_s_server = AS
break
# get new instances
if not s_slot:
print ('No slot available, get more instances...')
open_s_server,s_slot = chosen_market.add_instance(time)
print ('Now, %d free spot instance left...' % len(chosen_market.free_list))
# find backup slot
min_coef = sys.maxint
for AB in backup_market.active_list:
# b_slot = AB.find_open_slot()
# if b_slot:
# open_b_server = AB
# break
slot,coef = AB.find_open_slot_backup(chosen_market.name, chosen_market.bid, running_jobs)
if slot and coef < min_coef:
b_slot = slot
min_coef = coef
open_b_server = AB
# get new instances
if not b_slot:
print ('No backup slot available, get more...')
open_b_server,b_slot = backup_market.add_instance(time)
print ('Now, %d free backup instance left...' % len(free_backup_list))
# ================================ Run job ============================================
if open_s_server:
new_job = Job(open_s_server.ip, open_b_server.ip, s_slot, b_slot, 'avrora:v1', './start-up.sh %d' % job_length,
chosen_market.active_list, job_length, job_id, chosen_market.name, chosen_market.bid)
else:
new_job = Job(None, open_b_server.ip, s_slot, b_slot,'avrora:v1', './start-up.sh %d' % job_length,
chosen_market.active_list, job_length, job_id, chosen_market.name, chosen_market.bid)
new_job.run(start_time=time)
running_jobs.append(new_job)
def write_record(filename, count, cost):
with open(filename, 'a') as f:
f.write('%d %f\n' % (count ,cost))
class Job:
""" A class contain information about a job"""
finished = False
complete_time = 0
def __init__(self, spot_ip, backup_ip, spot_cpuset, backup_cpuset, image, cmd, active_list, job_length, job_id,
market_name, bid):
""" set instance ips and command for the job """
self.spot_ip = spot_ip
self.spot_cpuset = spot_cpuset
self.backup_ip = backup_ip
self.backup_cpuset = backup_cpuset
self.image = image
self.command = cmd
self.active_list = active_list
self.spot_fail = False
self.spot_cid = None
self.backup_cid = None
self.job_length = job_length
self.id = str(job_id)
self.market_name = market_name
self.bid = bid
def run(self, start_time, spot_cpushares = HIGH_WEIGHT, backup_cpushares = LOW_WEIGHT):
""" run a job on both spot and backup instances """
try:
cmd_to_spot = 'run$' + self.image + '|' +self.command + '|' + str(self.spot_cpuset) + '|' + str(spot_cpushares)
cmd_to_backup = 'run$' + self.image + '|' +self.command + '|' + str(self.backup_cpuset) + '|' + str(backup_cpushares)
except:
print (self.image, self.command, self.spot_cpuset, spot_cpushares)
sys.exit(1)
if self.spot_ip:
self.spot_cid = send_to_server(self.spot_ip, cmd_to_spot).strip()
else:
self.spot_fail = True
self.backup_cid = send_to_server(self.backup_ip, cmd_to_backup).strip()
self.start_time = start_time
self.backup_cpushares = backup_cpushares
def set_backup_cpushares(self, cpu_shares):
print('setting cpu shares for %s to %d' % (self.backup_cid, cpu_shares))
cmd = 'setcpu$' + self.backup_cid + '|' + str(cpu_shares)
send_to_server(self.backup_ip, cmd)
self.backup_cpushares = cpu_shares
print ('Done')
def update(self, current_time):
""" check if a job is finished if it finishes on spot server
terminate the copy on backup server and compute the finish time
"""
if not self.spot_fail:
print ('Checking job status on spot server...')
cmd = 'inspect$' + self.spot_cid
res = send_to_server(self.spot_ip, cmd)
try:
stat = json.loads(res)[0]
except:
print res
print self.spot_cid
print self.backup_cid
print self.command
if stat['state']['Status'] == 'exited' and stat['state']['ExitCode'] == 0:
self.finished = True
try:
start = datetime.datetime.strptime(stat['state']['StartedAt'][:26], '%Y-%m-%dT%H:%M:%S.%f')
end = datetime.datetime.strptime(stat['state']['FinishedAt'][:26], '%Y-%m-%dT%H:%M:%S.%f')
except:
start = datetime.datetime.strptime(stat['state']['StartedAt'][:26], '%Y-%m-%dT%H:%M:%S.%fZ')
end = datetime.datetime.strptime(stat['state']['FinishedAt'][:26], '%Y-%m-%dT%H:%M:%S.%fZ')
delta = end - start
self.complete_time = delta.seconds
#self.complete_time = current_time - self.start_time
print ('The job %s/%s is finished on spot. Finished time is %f seconds' % (self.spot_cid, self.backup_cid, self.complete_time))
# terminate the job on backup server
print 'terminating the copy on backup server...',
terminate_cmd = 'stop$' + self.backup_cid
send_to_server(self.backup_ip, terminate_cmd)
print 'Done.'
if not isServerAlive(self.spot_ip, self.active_list):
self.spot_fail = True
else:
print ('Spot server fails, checking job status on backup server...')
cmd = 'inspect$' + self.backup_cid
for i in range(5):
try:
res = send_to_server(self.backup_ip, cmd)
stat = json.loads(res)[0]
break
except:
logging.warning(res)
sleep(1)
continue
if stat['state']['Status'] == 'exited' and stat['state']['ExitCode'] == 0:
self.finished = True
try:
start = datetime.datetime.strptime(stat['state']['StartedAt'][:26], '%Y-%m-%dT%H:%M:%S.%f')
end = datetime.datetime.strptime(stat['state']['FinishedAt'][:26], '%Y-%m-%dT%H:%M:%S.%f')
except:
start = datetime.datetime.strptime(stat['state']['StartedAt'][:26], '%Y-%m-%dT%H:%M:%S.%fZ')
end = datetime.datetime.strptime(stat['state']['FinishedAt'][:26], '%Y-%m-%dT%H:%M:%S.%fZ')
delta = end - start
self.complete_time = delta.seconds
# self.complete_time = current_time - self.start_time
print ('The job %s/%s is finished on backup. Finished time is %f minutes' % (str(self.spot_cid), self.backup_cid, self.complete_time))
elif stat['cpu_shares'] < HIGH_WEIGHT:
self.set_backup_cpushares(HIGH_WEIGHT)
class Instance:
""" A class store information about an EC2 instance """
def __init__(self, ip, is_spot, num_core, Lb = None, Es = None, instance = None, bid = None):
self.ip = ip
self.is_spot = is_spot
self.num_core = num_core
self.Lb = Lb
self.Es = Es
self.instance = instance
self.bid = bid
self.created_time = None
def report(self):
""" report processes info """
print ('IP: %s' % self.ip)
if self.is_spot:
print ('bid: %f' % self.bid)
print ('%15s %10s %8s %7s\n' % ('ID', 'CPU shares', 'CPU sets', 'Status'))
get_stat_cmd = 'stat$'
res = send_to_server(self.ip, get_stat_cmd)
stat = json.loads(res)
for s in stat:
print ('%15s %10s %8s %7s\n' % (s['id'], str(s['cpu_shares']), s['cpuset_cpus'].strip(), s['state']['Status']))
def is_idle(self):
get_stat_cmd = 'stat$'
res = send_to_server(self.ip, get_stat_cmd)
stat = json.loads(res)
if not stat:
return True
else:
return False
def stop_all(self):
""" stop all containers on the server """
# stop all running job on the server
cmd = 'stat$'
res = send_to_server(self.ip, cmd)
stat = json.loads(res)
ids = []
for s in stat:
ids.append(s['id'])
cmd = 'stop$' + '|'.join(ids)
res = send_to_server(self.ip, cmd)
def find_open_slot(self):
""" return a cpuset number if available for a job """
job_count = {}
for i in range(self.num_core):
job_count[i] = 0
get_stat_cmd = 'stat$'
res = send_to_server(self.ip, get_stat_cmd)
try:
stat = json.loads(res)
except:
print res
for s in stat:
# NOTE: cpuset_cpus must state in format 0,1,2,3,4... etc
# The format such as 0-4 is not supported yet
for core in s['cpuset_cpus'].strip().split(','):
if core:
job_count[int(core)] += 1
core_min = None
min_job = sys.maxint # some large number
if self.is_spot:
for c in range(self.num_core):
if (job_count[c] < RHO and job_count[c] < min_job):
core_min = c
min_job = job_count[c]
else:
for c in range(self.num_core):
if (job_count[c] < K * RHO and job_count[c] < min_job):
core_min = c
min_job = job_count[c]
if core_min != None:
return str(core_min)
else:
return None
def find_open_slot_backup(self, market_name, bid, running_jobs):
""" return a cpuset number if available for a job """
job_count = {}
coef_count = {}
for i in range(self.num_core):
job_count[i] = 0
coef_count[i] = 0
get_stat_cmd = 'stat$'
res = send_to_server(self.ip, get_stat_cmd)
try:
stat = json.loads(res)
except:
print res
for s in stat:
# NOTE: cpuset_cpus must state in format 0,1,2,3,4... etc
# The format such as 0-4 is not supported yet
for core in s['cpuset_cpus'].strip().split(','):
if core:
job_count[int(core)] += 1
for j in running_jobs:
if s['id'] == j.backup_cid:
if j.spot_fail:
coef_count[int(core)] += 1
else:
if market_name == j.market_name and bid == j.bid:
coef_count[int(core)] += 1
break
core_min = None
min_coef = sys.maxint # some large number
# if self.is_spot:
# for c in range(self.num_core):
# if (job_count[c] < RHO and job_count[c] < min_job):
# core_min = c
# min_job = job_count[c]
#
# else:
for c in range(self.num_core):
if (job_count[c] < K * RHO and job_count[c] < min_coef):
core_min = c
min_coef = coef_count[c]
if core_min != None and min_coef <= 1:
return str(core_min),min_coef
else:
return None,min_coef
class ec2Market:
def __init__(self, name, instance_type, free_list, bid = None, Lb = None, Esb = None, spot_price = None):
self.name = name
self.instance_type = instance_type
self.free_list = free_list
self.active_list = []
self.bid = bid
self.Lb = Lb
self.Esb = Esb
self.spot_price = spot_price
def add_instance(self, time):
if self.free_list:
open_server = self.free_list.pop()
open_slot = open_server.find_open_slot()
open_server.bid = self.bid
open_server.created_time = time
trace_index = min(time / MINUTE_INTERVAL, TRACE_LENGTH - 1)
if self.Lb:
open_server.Lb = self.Lb[trace_index]
if self.Esb:
open_server.Es = self.Esb[trace_index]
self.active_list.append(open_server)
else:
print ('No free instance available...')
sys.exit(1)
return (open_server, open_slot)
def remove_active(self, time):
""" Use to remove all active instances when spot market fails"""
for server in self.active_list:
server.stop_all()
logging.warning('Spot server on market %s fails at time %d. Because spot price %f is larger than bid %f' %
(self.name, time, self.spot_price[min(time / MINUTE_INTERVAL, TRACE_LENGTH - 1)], self.bid))
server.created_time = None
server.spot_fail = False
server.Lb = None
server.Es = None
server.bid = None
self.free_list.append(server)
del self.active_list[:]
def check_spot_fail(self, time):
if self.spot_price[min(time /MINUTE_INTERVAL, TRACE_LENGTH - 1)] > self.bid:
self.remove_active(time)
def status(self):
print "=========================== %s =================================" % self.name
print "Market type: %s" % self.instance_type
for server in self.active_list:
server.report()
def remove_idles(self):
idle_list = []
for inst in self.active_list:
if inst.is_idle():
logging.warning('Instance %s is idle, removing it...' % inst.ip)
idle_list.append(inst)
for inst in idle_list:
inst.created_time = None
inst.Lb = None
inst.Es = None
inst.bid = None
self.active_list.remove(inst)
self.free_list.append(inst)
if __name__ == '__main__':
# ==================================== Configuration =======================================
spots, backups = get_vm(num_spot=10, num_backup=3)
# get more time for initialization
sleep(10)
free_spot_list = []
free_backup_list = []
for s in spots:
free_spot_list.append(Instance(s.instance.private_dns_name, True, NUM_CORE, instance = s.instance))
for od in backups:
free_backup_list.append(Instance(od.instance.private_dns_name, False, NUM_CORE, instance = od.instance))
# Use for debug
"""
free_spot_list = [Instance('54.209.193.99', True, 4),
Instance('54.175.247.52', True ,4),
Instance('54.174.223.12', True, 4),
Instance('54.175.19.187', True, 4),
Instance('52.91.253.90', True, 4)]
free_backup_list = [Instance('54.209.178.139', False, 4),
Instance('54.88.47.178', False, 4)]
"""
# read spot price trace
with open('zone1/trace_1b.txt') as f:
trace_1b = map(lambda x: float(x), f.readlines())
with open('zone2/trace_1d.txt') as f:
trace_1d = map(lambda x: float(x), f.readlines())
trace_length = min(len(trace_1b), len(trace_1d))
# read workload
with open('workload.txt') as f:
lines = f.readlines()
workload = iter(lines)
job_ids = iter(range(len(lines)))
# read Lb
with open('zone1/lb1_1b.txt') as f:
lb1_1b = map(lambda x: float(x), f.readlines())
with open('zone1/lb5_1b.txt') as f:
lb5_1b = map(lambda x: float(x), f.readlines())
with open('zone2/lb1_1d.txt') as f:
lb1_1d = map(lambda x: float(x), f.readlines())
with open('zone2/lb5_1d.txt') as f:
lb5_1d = map(lambda x: float(x), f.readlines())
# real Esb
with open('zone1/es1_1b.txt') as f:
es1_1b = map(lambda x: float(x), f.readlines())
with open('zone1/es5_1b.txt') as f:
es5_1b = map(lambda x: float(x), f.readlines())
with open('zone2/es1_1d.txt') as f:
es1_1d = map(lambda x: float(x), f.readlines())
with open('zone2/es5_1d.txt') as f:
es5_1d = map(lambda x: float(x), f.readlines())
# create markets
spot_market_list = [ec2Market('us-east-1b', 'spot', free_spot_list, bid=BID, Lb=lb1_1b, Esb=es1_1b, spot_price=trace_1b),
ec2Market('us-east-1b', 'spot', free_spot_list, bid=5*BID, Lb=lb5_1b, Esb=es5_1b, spot_price=trace_1b),
ec2Market('us-east-1d', 'spot', free_spot_list, bid=BID, Lb=lb1_1d, Esb=es1_1d, spot_price=trace_1d),
ec2Market('us-east-1d', 'spot', free_spot_list, bid=5*BID, Lb=lb5_1d, Esb=es5_1d, spot_price=trace_1d)]
backup_market = ec2Market('On-Demand', 'OD', free_backup_list)
# clear previous data
if os.path.isfile('exec_time.txt'):
os.remove('exec_time.txt')
backup_filename = '%s.txt' % backup_market.name
if os.path.isfile(backup_filename):
os.remove(backup_filename)
for market in spot_market_list:
spot_filename = '%s_%f.txt' % (market.name, market.bid)
if os.path.isfile(spot_filename):
os.remove(spot_filename)
# =================================== Running ============================================
running_jobs = []
finished_jobs = []
hasJobs = True
start = datetime.datetime.now()
count = int((datetime.datetime.now() - start).seconds)
try:
next_job = map(lambda x: int(x), workload.next().strip().split())
except StopIteration:
print 'ERROR: Should have at least one job to start'
sys.exit(1)
while hasJobs:
# next_job[0] is job length and next_job[1] is job arrival time
while next_job and count >= next_job[1]:
# add the job
add_job(running_jobs, spot_market_list, backup_market, count, next_job[0], job_ids.next())
# get the next job
try:
next_job = map(lambda x: int(x), workload.next().strip().split())
except StopIteration:
print 'No more jobs...'
next_job = None
hasJobs = False
# if count % MINUTE_INTERVAL == 0:
# check spot fail
for market in spot_market_list:
market.check_spot_fail(count)
# check job finish
for job in running_jobs:
job.update(current_time=count)
if job.finished:
finished_jobs.append(job)
with open('exec_time.txt', 'a') as f:
f.write('%s %f %f\n' % (job.id, job.complete_time / MINUTE_INTERVAL, job.job_length))
for job in finished_jobs:
if job in running_jobs:
running_jobs.remove(job)
# record number of servers and cost
for market in spot_market_list:
write_record('%s_%f.txt' % (market.name, market.bid), len(market.active_list),
count)
write_record('%s.txt' % backup_market.name, len(backup_market.active_list),
count)
# close server if no jobs on it
for market in spot_market_list:
market.remove_idles()
backup_market.remove_idles()
# check server status
for market in spot_market_list:
market.status()
backup_market.status()
print 'Time: %d' % count
count = int((datetime.datetime.now() - start).seconds)
while running_jobs:
print ('waiting job all done, %d jobs remain are still running...' % len(running_jobs))
job_buf = []
for job in running_jobs:
job.update(current_time=count)
if job.finished:
finished_jobs.append(job)
with open('exec_time.txt', 'a') as f:
f.write('%s %f %f\n' % (job.id, job.complete_time / MINUTE_INTERVAL, job.job_length))
job_buf.append(job)
for job in job_buf:
running_jobs.remove(job)
# if count % MINUTE_INTERVAL == 0:
# NOTE if the trace is long enough, we will use the trace
# if not, we will use the last spot price available
# if count / MINUTE_INTERVAL < trace_length:
# price_index = count / MINUTE_INTERVAL
# else:
# price_index = trace_length - 1
# record number of servers and cost
for market in spot_market_list:
write_record('%s_%f.txt' % (market.name, market.bid), len(market.active_list),
count)
write_record('%s.txt' % backup_market.name, len(backup_market.active_list),
count)
# close server if no jobs on it
for market in spot_market_list:
market.remove_idles()
backup_market.remove_idles()
# check server status
for market in spot_market_list:
market.status()
backup_market.status()
count = int((datetime.datetime.now() - start).seconds)
for s in spots:
s.terminate_instance()
for od in backups:
od.terminate_instance()
|
ECCEG Server.py
|
from ECC import *
import math
import random
import socket
import threading
import pickle
MAX_CHUNK = 8*1024
sec_rand = random.SystemRandom()
class ECCEG:
def __init__(self):
self.host=socket.gethostbyname(socket.gethostname())
print(self.host)
self.port = 12345
self.server = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
self.server.bind((self.host,self.port))
self.curve = EC(2,3,67)
def keyGenPhase(self):
e1 = Point(2,22,self.curve)
d = random.randint(2,self.curve.getModulus())
e2 = e1.multiply(d)
return [self.curve,e1,e2,d]
def decryptor(self,c1,c2,d):
temp = c1.multiply(d)
tempInv = temp.getInverse()
p = c2.add(tempInv)
return p
def client_thread(self,conn,addr):
keys=self.keyGenPhase()
pvtKey = keys.pop()
pubKey = keys
conn.send(pickle.dumps(pubKey))
cipher=pickle.loads(conn.recv(MAX_CHUNK))
message=self.decryptor(cipher[0],cipher[1],pvtKey)
print(message.getX(),message.getY(),sep=',')
def listen(self):
self.server.listen(5)
while True:
conn,addr = self.server.accept()
print("Connected with ",addr)
try:
t1=threading.Thread(target=self.client_thread,args=(conn,addr))
t1.start()
print("Thread started")
except:
print("Thread did not start")
self.sock.close()
e = ECCEG()
e.listen()
|
tunnel.py
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=import-error,unused-import
import sys
import ssl
import socket
import time
import traceback
import logging as logs
from contextlib import closing
from datetime import datetime
from threading import Thread
import websocket
from websocket import create_connection, WebSocket
from knack.util import CLIError
from knack.log import get_logger
logger = get_logger(__name__)
class TunnelWebSocket(WebSocket):
def recv_frame(self):
frame = super(TunnelWebSocket, self).recv_frame()
logger.info('Received frame: %s', frame)
return frame
def recv(self):
data = super(TunnelWebSocket, self).recv()
logger.info('Received websocket data: %s', data)
return data
# pylint: disable=no-member,too-many-instance-attributes,bare-except,no-self-use
class TunnelServer(object):
def __init__(self, local_addr, local_port, remote_addr, remote_user_name, remote_password):
self.local_addr = local_addr
self.local_port = local_port
if self.local_port != 0 and not self.is_port_open():
raise CLIError('Defined port is currently unavailable')
self.remote_addr = remote_addr
self.remote_user_name = remote_user_name
self.remote_password = remote_password
self.client = None
self.ws = None
logger.info('Creating a socket on port: %s', self.local_port)
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
logger.info('Setting socket options')
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
logger.info('Binding to socket on local address and port')
self.sock.bind((self.local_addr, self.local_port))
if self.local_port == 0:
self.local_port = self.sock.getsockname()[1]
logger.warning('Auto-selecting port: %s', self.local_port)
logger.info('Finished initialization')
def create_basic_auth(self):
from base64 import b64encode
basic_auth_string = '{}:{}'.format(self.remote_user_name, self.remote_password).encode()
basic_auth_string = b64encode(basic_auth_string).decode('utf-8')
return basic_auth_string
def is_port_open(self):
is_port_open = False
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock:
if sock.connect_ex(('', self.local_port)) == 0:
logger.info('Port %s is NOT open', self.local_port)
else:
logger.warning('Port %s is open', self.local_port)
is_port_open = True
return is_port_open
def is_port_set_to_default(self):
import certifi
import urllib3
try:
import urllib3.contrib.pyopenssl
urllib3.contrib.pyopenssl.inject_into_urllib3()
except ImportError:
pass
http = urllib3.PoolManager(cert_reqs='CERT_REQUIRED', ca_certs=certifi.where())
headers = urllib3.util.make_headers(basic_auth='{0}:{1}'.format(self.remote_user_name, self.remote_password))
url = 'https://{}{}'.format(self.remote_addr, '.scm.azurewebsites.net/AppServiceTunnel/Tunnel.ashx?GetStatus')
r = http.request(
'GET',
url,
headers=headers,
preload_content=False
)
if r.status != 200:
raise CLIError("Failed to connect to '{}' with status code '{}' and reason '{}'".format(
url, r.status, r.reason))
msg = r.read().decode('utf-8')
logger.info('Status response message: %s', msg)
if 'FAIL' in msg.upper():
logger.warning('WARNING - Remote debugging may not be setup properly. Reponse content: %s', msg)
if '2222' in msg:
return True
return False
def _listen(self):
self.sock.listen(100)
index = 0
basic_auth_string = self.create_basic_auth()
while True:
self.client, _address = self.sock.accept()
self.client.settimeout(1800)
host = 'wss://{}{}'.format(self.remote_addr, '.scm.azurewebsites.net/AppServiceTunnel/Tunnel.ashx')
basic_auth_header = 'Authorization: Basic {}'.format(basic_auth_string)
cli_logger = get_logger() # get CLI logger which has the level set through command lines
is_verbose = any(handler.level <= logs.INFO for handler in cli_logger.handlers)
if is_verbose:
logger.info('Websocket tracing enabled')
websocket.enableTrace(True)
else:
logger.warning('Websocket tracing disabled, use --verbose flag to enable')
websocket.enableTrace(False)
self.ws = create_connection(host,
sockopt=((socket.IPPROTO_TCP, socket.TCP_NODELAY, 1),),
class_=TunnelWebSocket,
header=[basic_auth_header],
sslopt={'cert_reqs': ssl.CERT_NONE},
enable_multithread=True)
logger.info('Websocket, connected status: %s', self.ws.connected)
index = index + 1
logger.info('Got debugger connection... index: %s', index)
debugger_thread = Thread(target=self._listen_to_client, args=(self.client, self.ws, index))
web_socket_thread = Thread(target=self._listen_to_web_socket, args=(self.client, self.ws, index))
debugger_thread.start()
web_socket_thread.start()
logger.info('Both debugger and websocket threads started...')
logger.warning('Successfully connected to local server..')
debugger_thread.join()
web_socket_thread.join()
logger.info('Both debugger and websocket threads stopped...')
logger.warning('Stopped local server..')
def _listen_to_web_socket(self, client, ws_socket, index):
while True:
try:
logger.info('Waiting for websocket data, connection status: %s, index: %s', ws_socket.connected, index)
data = ws_socket.recv()
logger.info('Received websocket data: %s, index: %s', data, index)
if data:
# Set the response to echo back the recieved data
response = data
logger.info('Sending to debugger, response: %s, index: %s', response, index)
client.sendall(response)
logger.info('Done sending to debugger, index: %s', index)
else:
logger.info('Client disconnected!, index: %s', index)
client.close()
ws_socket.close()
break
except:
traceback.print_exc(file=sys.stdout)
client.close()
ws_socket.close()
return False
def _listen_to_client(self, client, ws_socket, index):
while True:
try:
logger.info('Waiting for debugger data, index: %s', index)
buf = bytearray(4096)
nbytes = client.recv_into(buf, 4096)
logger.info('Received debugger data, nbytes: %s, index: %s', nbytes, index)
if nbytes > 0:
responseData = buf[0:nbytes]
logger.info('Sending to websocket, response data: %s, index: %s', responseData, index)
ws_socket.send_binary(responseData)
logger.info('Done sending to websocket, index: %s', index)
else:
logger.warning('Client disconnected %s', index)
client.close()
ws_socket.close()
break
except:
traceback.print_exc(file=sys.stdout)
client.close()
ws_socket.close()
return False
def start_server(self):
logger.warning('Start your favorite client and connect to port %s', self.local_port)
self._listen()
|
master.py
|
import sys
import socket
import threading
import logging
import json
import time
import random
import os
# Docker requires loopback address to be 0.0.0.0 instead of localhost.
# 'localhost' is chosen if run manually without docker.
JOB_REQUESTS_HOST = os.getenv("LOOPBACK_ADDRESS", "localhost")
JOB_REQUESTS_PORT = 5000
WORKER_RESPONSES_HOST = os.getenv("LOOPBACK_ADDRESS", "localhost")
WORKER_RESPONSES_PORT = 5001
WORKER_ACCEPT_JOBS_HOST = os.getenv("LOOPBACK_ADDRESS", "localhost")
ALL_MAPPERS_COMPLETED_CODE = -1
thread_lock = threading.Lock()
random.seed(3)
def read_args():
if len(sys.argv) != 3:
print("Usage: python master.py /path/to/config <scheduling-algorithm>")
exit(1)
config_file = sys.argv[1]
scheduling_algo = sys.argv[2]
with open(config_file, "r") as f:
config = json.loads(f.read())
return config, scheduling_algo
def init_logging(scheduling_algo):
logging.basicConfig(
filename=f"../logs/master_{scheduling_algo}.log",
filemode="w",
level=logging.DEBUG,
format="%(asctime)s - %(levelname)s - %(message)s",
)
logging.disable(logging.DEBUG)
def preprocess_workers(workers):
for worker in workers:
worker["free_slots"] = worker["slots"]
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind((WORKER_ACCEPT_JOBS_HOST, int(worker["port"])))
s.listen(50)
worker["socket"] = s
workers_dict = {}
for worker in workers:
workers_dict[worker["worker_id"]] = worker
return workers_dict
def send_task_to_worker(worker, job_id, task):
worker_socket = worker["socket"]
c, addr = worker_socket.accept()
c.settimeout(5)
task_json = {
"job_id": job_id,
"task_id": task["task_id"],
"duration": task["duration"],
}
c.send(json.dumps(task_json).encode())
c.close()
logging.info(f"started task {task['task_id']} of job {job_id} on worker {worker['worker_id']}")
def listen_for_jobs(workers, scheduling_algo, jobs):
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as job_request_socket:
job_request_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
job_request_socket.bind((JOB_REQUESTS_HOST, JOB_REQUESTS_PORT))
job_request_socket.listen(50)
selected_worker_index = 0
all_worker_ids = list(workers.keys())
while True:
client_socket, address = job_request_socket.accept()
client_socket.settimeout(5)
job_request = json.loads(client_socket.recv(2048).decode())
job_request["unfinished_map_tasks"] = len(job_request["map_tasks"])
jobs[job_request["job_id"]] = job_request
logging.info(f"started job {job_request['job_id']}")
for task in job_request["map_tasks"]:
assigned = False
while not assigned:
thread_lock.acquire()
if scheduling_algo == "RANDOM":
selected_worker_id = random.randint(1, len(workers))
elif scheduling_algo == "RR":
selected_worker_id = all_worker_ids[selected_worker_index]
elif scheduling_algo == "LL":
selected_worker_id = max(workers, key=lambda worker: workers[worker]["free_slots"])
if workers[selected_worker_id]["free_slots"] > 0:
send_task_to_worker(workers[selected_worker_id], job_request["job_id"], task)
workers[selected_worker_id]["free_slots"] -= 1
logging.debug(
f'worker {selected_worker_id} has {workers[selected_worker_id]["free_slots"]} free slots'
)
thread_lock.release()
assigned = True
else:
thread_lock.release()
if scheduling_algo == "LL":
logging.debug(f"all workers have filled slots")
time.sleep(1)
else:
logging.debug(f"all slots of worker {selected_worker_id} are full")
time.sleep(0.1)
selected_worker_index = (selected_worker_index + 1) % len(workers)
client_socket.close()
def finish_task_from_worker(workers, server_worker_socket, jobs):
client_socket, address = server_worker_socket.accept()
client_socket.settimeout(5)
completed_task = json.loads(client_socket.recv(2048).decode())
logging.info(
f"task {completed_task['task_id']} of job {completed_task['job_id']} on worker {completed_task['worker_id']} has finished executing"
)
thread_lock.acquire()
workers[completed_task["worker_id"]]["free_slots"] += 1
logging.debug(
f'worker {completed_task["worker_id"]} has {workers[completed_task["worker_id"]]["free_slots"]} free slots'
)
thread_lock.release()
if "M" in completed_task["task_id"]:
jobs[completed_task["job_id"]]["unfinished_map_tasks"] -= 1
logging.debug(
f"job {completed_task['job_id']} has {jobs[completed_task['job_id']]['unfinished_map_tasks']} remaining map tasks"
)
client_socket.close()
def listen_to_workers(workers, scheduling_algo, jobs):
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as server_worker_socket:
server_worker_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server_worker_socket.bind((WORKER_RESPONSES_HOST, WORKER_RESPONSES_PORT))
server_worker_socket.listen(50)
selected_worker_index = 0
all_worker_ids = list(workers.keys())
while True:
finish_task_from_worker(workers, server_worker_socket, jobs)
for job_id in list(jobs.keys()):
if jobs[job_id]["unfinished_map_tasks"] == 0:
for task in jobs[job_id]["reduce_tasks"]:
assigned = False
while not assigned:
thread_lock.acquire()
if scheduling_algo == "RANDOM":
selected_worker_id = random.randint(1, len(workers))
elif scheduling_algo == "RR":
selected_worker_id = all_worker_ids[selected_worker_index]
elif scheduling_algo == "LL":
selected_worker_id = max(workers, key=lambda worker: workers[worker]["free_slots"])
if workers[selected_worker_id]["free_slots"] > 0:
send_task_to_worker(workers[selected_worker_id], job_id, task)
workers[selected_worker_id]["free_slots"] -= 1
logging.debug(
f'worker {selected_worker_id} has {workers[selected_worker_id]["free_slots"]} free slots'
)
thread_lock.release()
assigned = True
else:
thread_lock.release()
if scheduling_algo == "LL":
logging.debug(f"all workers have filled slots")
time.sleep(1)
else:
logging.debug(f"all slots of worker {selected_worker_id} are full")
time.sleep(0.1)
finish_task_from_worker(workers, server_worker_socket, jobs)
selected_worker_index = (selected_worker_index + 1) % len(workers)
jobs[job_id]["unfinished_map_tasks"] = ALL_MAPPERS_COMPLETED_CODE
def main():
config, scheduling_algo = read_args()
init_logging(scheduling_algo)
workers = preprocess_workers(config["workers"])
jobs = {}
job_listen_thread = threading.Thread(target=listen_for_jobs, args=[workers, scheduling_algo, jobs])
job_listen_thread.start()
worker_listen_thread = threading.Thread(target=listen_to_workers, args=[workers, scheduling_algo, jobs])
worker_listen_thread.start()
if __name__ == "__main__":
main()
|
alexa_audio_bt.py
|
#!/usr/bin/env python3
import bluetooth
import logging
import time
import threading
import struct
import sys
import math
# from specs
SOL_BLUETOOTH = 274
SOL_SCO = 17
BT_VOICE = 11
BT_VOICE_TRANSPARENT = 0x0003
BT_VOICE_CVSD_16BIT = 0x0060
SCO_OPTIONS = 1
L2CAP_UUID = "0100"
SCO_HEADERS_SIZE = 16
class BluetoothAudio:
""" This object connect to Bluetooth handset/nandsfree device
stream audio from microphone and to speaker.
"""
HFP_TIMEOUT = 1.0
HFP_CONNECT_AUDIO_TIMEOUT = 10.0
AUDIO_8KHZ_SIGNED_16BIT_LE_MONO = 0
AUDIO_16KHZ_SIGNED_16BIT_LE_MONO = 1
CAPTURE_BUFFER_MAX_SIZE = 16777216 # 16 Mb
def __init__(self, addr, format = AUDIO_8KHZ_SIGNED_16BIT_LE_MONO):
""" Create object which connects to bluetooth device in the background.
Class automatically reconnects to the device in case of any errors.
:param addr: MAC address of Bluetooth device, string.
"""
self.audio = None
self.hfp = None
self.addr = addr
self.resample = (format == self.AUDIO_16KHZ_SIGNED_16BIT_LE_MONO)
self.wlt = threading.Thread(target=self._worker_loop)
self.wlt.start()
self.buf = bytes()
self.rlt = None
self.rltl = threading.Lock()
def _read_loop(self):
logging.info('Read loop start')
self.buf = bytes()
while self.rlt:
try:
data_raw = self.audio.recv(self.sco_payload)
except bluetooth.btcommon.BluetoothError:
data_raw = None
if not data_raw or len(data_raw) == 0:
self.audio.close()
self.audio = None
logging.warning('Capture audio failed')
break
if self.resample:
# convert data
data = bytes()
prev = 0
for i in range(0, len(data_raw), 2):
# convert from 8 kHz signed 16 bit to 16 kHz signed 16 bit le
v = struct.unpack_from('<h', data_raw, i)[0]
data += struct.pack('<hh', v, v)
else:
data = data_raw
self.rltl.acquire(True)
if len(self.buf) > self.CAPTURE_BUFFER_MAX_SIZE:
logging.warning('Capture buffer overflow')
self.buf = bytes()
self.buf += data
self.rltl.release()
logging.info('Read loop stop')
def _worker_loop(self):
logging.info('HFPDevice class is initialised, using ' + self.addr)
while self.wlt:
self._find_channel()
if not self.channel:
time.sleep(self.HFP_TIMEOUT)
continue
logging.info('HSP/HFP found on RFCOMM channel ' + str(self.channel))
self._connect_service_level()
if not self.hfp:
time.sleep(self.HFP_TIMEOUT)
continue
try:
self._parse_channel()
except bluetooth.btcommon.BluetoothError as e:
logging.warning('Service level connection disconnected: ' + str(e))
time.sleep(self.HFP_TIMEOUT)
self._cleanup()
def _parse_channel(self):
audio_time = time.time() + self.HFP_CONNECT_AUDIO_TIMEOUT
sevice_notice = True
while self.wlt:
data = self._read_at()
if data:
if b'AT+BRSF=' in data:
self._send_at(b'+BRSF: 0')
self._send_ok()
elif b'AT+CIND=?\r' == data:
self._send_at(b'+CIND: ("service",(0,1)),("call",(0,1))')
self._send_ok()
elif b'AT+CIND?\r' == data:
self._send_at(b'+CIND: 1,0')
self._send_ok()
elif b'AT+CMER=' in data:
self._send_ok()
# after this command we can establish audio connection
sevice_notice = False
self._connect_audio()
elif b'AT+CHLD=?\r' == data:
self._send_at(b'+CHLD: 0')
self._send_ok()
else:
self._send_error()
# if we don't get service level connection, try audio anyway
if not self.audio:
if audio_time < time.time():
if sevice_notice:
logging.warning('Service connection timed out, try audio anyway...')
sevice_notice = False
self._connect_audio()
def _connect_service_level(self):
hfp = bluetooth.BluetoothSocket(bluetooth.RFCOMM)
try:
hfp.connect((self.addr, self.channel))
except bluetooth.btcommon.BluetoothError as e:
hfp.close()
logging.warning('Failed to establish service level connection: ' + str(e))
return
hfp.settimeout(self.HFP_TIMEOUT)
logging.info('HSP/HFP service level connection is established')
self.hfp = hfp
def _connect_audio(self):
audio = bluetooth.BluetoothSocket(bluetooth.SCO)
# socket config
opt = struct.pack ("H", BT_VOICE_CVSD_16BIT)
audio.setsockopt(SOL_BLUETOOTH, BT_VOICE, opt)
try:
audio.connect((self.addr,))
except bluetooth.btcommon.BluetoothError as e:
audio.close()
logging.info('Failed to establish audio connection: ' + str(e))
return
opt = audio.getsockopt(SOL_SCO, SCO_OPTIONS, 2)
mtu = struct.unpack('H', opt)[0]
self.audio = audio
self.sco_payload = mtu - SCO_HEADERS_SIZE
self.rlt = threading.Thread(target=self._read_loop)
self.rlt.start()
logging.info('Audio connection is established, mtu = ' + str(mtu))
def _find_channel(self):
# discovery RFCOMM channell, prefer HFP.
hsp_channel = None
generic_channel = None
services = bluetooth.find_service(address=self.addr, uuid=L2CAP_UUID)
for svc in services:
for c in svc["service-classes"]:
service_class = c.lower()
if bluetooth.HANDSFREE_CLASS.lower() == service_class:
self.channel = int(svc["port"])
return
elif bluetooth.HEADSET_CLASS.lower() == service_class:
hsp_channel = int(svc["port"])
elif bluetooth.GENERIC_AUDIO_CLASS.lower() == service_class:
generic_channel = int(svc["port"])
if hsp_channel:
self.channel = hsp_channel
else:
self.channel = generic_channel
def _read_at(self):
try:
d = self.hfp.recv(1024)
logging.debug('> ' + d.decode('utf8'))
return d
except bluetooth.btcommon.BluetoothError as e:
if str(e) != 'timed out':
raise
return None
def _send(self, data):
logging.debug('< ' + data.decode('utf8').replace('\r\n', ''))
self.hfp.send(data)
def _send_at(self, data):
self._send(b'\r\n' + data + b'\r\n')
def _send_ok(self):
self._send_at(b'OK')
def _send_error(self):
self._send_at(b'ERROR')
def _cleanup(self):
if self.rlt:
rlt = self.rlt
self.rlt = None
rlt.join()
if self.audio:
self.audio.close()
if self.hfp:
self.hfp.close()
self.hfp = None
self.audio = None
def close(self):
wlt = self.wlt
self.wlt = None
wlt.join()
self._cleanup()
def is_connected(self):
""" Check if headset/handfree device is connected.
:return: True if connected, False otherwise.
"""
return (self.audio != None)
def flush(self):
""" Clean up capture buffer
"""
self.rltl.acquire(True)
self.buf = bytes()
self.rltl.release()
def read(self, length = None):
""" Receive audio from bluetooth device. Block until read something.
:param length: number of bytes(not samples) to read, not more then CAPTURE_BUFFER_MAX_SIZE.
If None or 0, all aviliable will be read
:return: Array with audio data(16 kHz signed 16 bit little endian mono data) or None on error.
"""
if not self.rlt:
return None
if length == None:
length = 0
while len(self.buf) == 0 or len(self.buf) < length:
if not self.rlt:
return None
if not self.rlt.isAlive():
return None
s = (length - len(self.buf)) / 8000
if s <= 0:
self.rlt.join(0.001)
else:
if self.resample:
s = s / 2
self.rlt.join(s)
self.rltl.acquire(True)
if length == 0:
data = self.buf
self.buf = bytes()
else:
data = self.buf[0:length]
self.buf =self.buf[length:]
self.rltl.release()
return data
def write(self, data):
""" Send audio data to bluetooth device. Blocking.
:param data: array with audio data.
:param format: audio fromat, for example AUDIO_8KHZ_SIGNED_8BIT_MONO or AUDIO_16KHZ_SIGNED_16BIT_LE_MONO.
:return: True on success, False on error.
"""
if not self.audio:
return False
try:
if self.resample:
# convert data
data_raw = bytes()
for i in range(0, len(data), 4):
val1, val2 = struct.unpack_from('<hh', data, i) # two samples of signed 16 bit le
val = round((val1 + val2) / 2) # downsample to 8 kHz
data_raw += struct.pack('<h', val)
else:
data_raw = data
sent = 0
while sent < len(data_raw):
ts = data_raw[sent:(sent+int(self.sco_payload))]
if len(ts) < self.sco_payload:
ts += bytes([0] * (self.sco_payload - len(ts)))
sent += self.audio.send(ts)
return True
except bluetooth.btcommon.BluetoothError:
return False
except AttributeError: # 'NoneType' object has no attribute 'send'
return False
def beep(self, length_ms = 300, frequency = 1000.0, amplitude = 0.5):
""" Make a beep sound with specified parameters
:return: True on success, False on error.
"""
fps = 8000
if self.resample:
fps = 16000
logging.info('Beep {} Hz, {} ms'.format(frequency, length_ms))
period = int(fps / frequency)
length = int(fps * length_ms / 1000)
snd = bytes()
for i in range(0, length):
val = 32767.0 * amplitude * math.sin(2.0 * math.pi * float(i % period) / period)
snd += struct.pack('<h', int(val))
return self.write(snd)
def demo_ring(hf):
time.sleep(1)
hf._send_at(b'RING')
def main():
""" Sample of usage BluetoothAudio.
This sample loopback audio from microphone to speaker.
"""
logging.basicConfig(level=logging.DEBUG, format='%(message)s')
if len(sys.argv) == 1:
print("Please specify device MAC address or 'scan' to scan it.")
sys.exit(1)
if sys.argv[1] == 'scan':
nearby_devices = bluetooth.discover_devices(duration=4,lookup_names=True,
flush_cache=True, lookup_class=False)
print(nearby_devices)
return
if not bluetooth.is_valid_address(sys.argv[1]):
print("Wrong device address.")
return
hf = BluetoothAudio(sys.argv[1], BluetoothAudio.AUDIO_16KHZ_SIGNED_16BIT_LE_MONO)
# Make a test RING from headset
#threading.Thread(target=demo_ring, args=[hf]).start()
try:
while not hf.is_connected():
time.sleep(0.1)
time.sleep(1.5)
hf.beep()
time.sleep(0.5)
hf.flush()
while True:
d = hf.read()
if d:
hf.write(d)
else:
time.sleep(0.5)
# generate noise
#hf.write(bytes(i for i in range(hf.sco_payload)))
except KeyboardInterrupt:
pass
hf.close()
logging.info('\nExiting...')
if __name__ == '__main__':
main()
|
process_handler.py
|
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import io
import multiprocessing
import subprocess
import sys
from abc import ABC, abstractmethod
from typing import Optional
class ProcessHandler(ABC):
"""An abstraction of process handling calls using the same interface as subprocess.Popen.
See SubprocessProcessHandler below for an example.
"""
@abstractmethod
def wait(self, timeout: Optional[float] = None) -> int:
"""Wait for the underlying process to terminate.
:param timeout: The time to wait for the process to terminate in fractional seconds. Wait
forever by default.
:returns: The process exit code is it has terminated.
:raises: :class:`subprocess.TimeoutExpired`
"""
@abstractmethod
def kill(self) -> None:
pass
@abstractmethod
def terminate(self) -> None:
pass
@abstractmethod
def poll(self) -> Optional[int]:
pass
class SubprocessProcessHandler(ProcessHandler):
"""A `ProcessHandler` that delegates directly to a subprocess.Popen object."""
def __init__(self, process: subprocess.Popen) -> None:
self._process = process
def wait(self, timeout: Optional[float] = None) -> int:
return self._process.wait(timeout=timeout)
def kill(self) -> None:
self._process.kill()
def terminate(self) -> None:
self._process.terminate()
def poll(self) -> Optional[int]:
return self._process.poll()
def communicate_teeing_stdout_and_stderr(self, stdin=None):
"""Just like subprocess.communicate, but tees stdout and stderr to both sys.std{out,err} and
a buffer. Only operates on stdout/stderr if the Popen call send them to subprocess.PIPE.
:param stdin: A string to send to the stdin of the subprocess.
:return: (stdout, stderr) as strings.
"""
if stdin is not None and self._process.stdin is not None:
self._process.stdin.write(stdin)
def fork_tee(infile, outfile):
if infile is None:
return lambda: None
queue = multiprocessing.Queue()
process = multiprocessing.Process(target=_tee, args=(infile, outfile, queue.put))
process.start()
def join_and_get_output():
process.join()
return queue.get()
return join_and_get_output
stdout = fork_tee(self._process.stdout, sys.stdout)
stderr = fork_tee(self._process.stderr, sys.stderr)
self._process.wait()
return stdout(), stderr()
def _tee(infile, outfile, return_function):
accumulator = io.BytesIO()
for line in iter(infile.readline, b""):
accumulator.write(line)
outfile.buffer.write(line)
infile.close()
return_function(accumulator.getvalue())
|
rank.py
|
from typing import List, Dict, Union, Optional, Tuple, Any, Callable, ClassVar
import numpy
import threading
from loguru import logger
import asyncio
import functools
import aiohttp
import asyncio
import pickle
import requests
def server_wrapper(func):
"""Checks for a server at http://localhost:22647/. If there isn't one, it sets server_is_running to False, and it returns the function as per normal. Otherwise, it returns a function that takes the **kwargs and makes a post request to the server using the kwargs, and a field called 'method' with the name of the function. If that fails, it calls the function as per normal."""
async def interceptor_inner_wrapper(self, **kwargs):
if self.is_server:
self.server_is_running = False
return await func(self, **kwargs)
if self.server_is_running == False:
return await func(self, **kwargs)
if self.server_is_running == None:
try:
requests.post("http://localhost:22647/")
self.server_is_running = True
pass
except requests.exceptions.ConnectionError:
self.server_is_running = False
return await func(self, **kwargs)
async with aiohttp.ClientSession() as session:
body = {"method": func.__name__, **kwargs}
async with session.post("http://localhost:22647/", json=body) as resp:
r = await resp.read()
try:
obj = pickle.loads(r)
except pickle.UnpicklingError:
self.server_is_running == False
return await func(self, **kwargs)
return obj
interceptor_inner_wrapper.__doc__ = func.__doc__
interceptor_inner_wrapper.__annotations__ = func.__annotations__
interceptor_inner_wrapper.__name__ = func.__name__
return interceptor_inner_wrapper
class Ranker:
"""
This class is responsible for providing a high-level interface for ranking sentences by their semantic similarity. It underpins a lot of this library's functionality. The problem is, when you load it for the first time, it will download a 450M language model to your drive. Every time you want to reload your chatbot, it will take around 11-12 seconds just to load mpnet-large. So you may want to start a server in the background via from acrossword import run; run()."""
from sentence_transformers import SentenceTransformer, models
from torch import Tensor
# Imports occur inside the class because loading these libraries is extremely slow and this prevents quick iterative testing. Is this a good rationale? I don't know, I just find it annoying so you'll have to cope.
__text_embeddings_cache__: Dict[str, Dict[str, Tensor]] = dict()
__models_cache__: Dict[str, SentenceTransformer] = dict()
server_is_running = None
is_loading_model = False
def __init__(
self, # model_locations: List[str] = list(),
default_model: str = "all-mpnet-base-v2",
is_server: bool = False,
) -> None:
self.default_model = default_model
self.is_server = is_server
# for model_name in model_locations:
# threading.Thread(
# target=self._load_model, args=[model_name]
# ).start()
# self._load_model(model_name)
@server_wrapper
async def is_empty(self) -> bool:
return len(self.__models_cache__) == 0
def _download_model(self, model_name: str) -> None:
logger.debug(f"Downloading model {model_name}")
model_name = self._split_at_last_slash(model_name)
if model_name in self.__models_cache__:
return
model = self.SentenceTransformer(model_name)
logger.debug(f"Downloaded model {model_name}")
model.max_seq_length = 512
self.__models_cache__[model_name] = model
self.__text_embeddings_cache__[model_name] = dict()
self.is_loading_model = False
def _load_from_file(self, model_name: str) -> None:
word_embedding_model = self.models.Transformer(model_name)
pooling_model = self.models.Pooling(
word_embedding_model.get_word_embedding_dimension(), pooling_mode="mean"
)
model = self.SentenceTransformer(modules=[word_embedding_model, pooling_model])
model.max_seq_length = 512
self.__models_cache__[model_name] = model
self.__text_embeddings_cache__[model_name] = dict()
self.is_loading_model = False
@server_wrapper
async def add_model(self, model_name: str, from_file: bool = False) -> None:
# if from_file:
# self._load_from_file(model_name)
# else:
# self._download_model(model_name)
self.is_loading_model = True
if from_file:
threading.Thread(target=self._load_from_file, args=[model_name]).start()
else:
threading.Thread(target=self._download_model, args=[model_name]).start()
def _split_at_last_slash(self, model_name: str) -> str:
if "/" in model_name:
model_name = model_name.split("/")[-1]
return model_name
@server_wrapper
async def convert(
self, model_name: str, sentences: Union[List[str], Tuple[str, ...]]
) -> List[Tensor]:
not_in_cache = dict()
try:
for i, sentence in enumerate(sentences):
if sentence not in self.__text_embeddings_cache__[model_name]:
not_in_cache[i] = sentence
except KeyError:
if not self.is_loading_model:
await self.add_model(model_name=model_name)
while model_name not in self.__models_cache__ and self.is_loading_model:
await asyncio.sleep(0.1)
return await self.convert(model_name=model_name, sentences=sentences)
if not_in_cache:
embeddings = await self._convert(
model_name=model_name, sentences=tuple(not_in_cache.values())
)
for i, embedding in enumerate(embeddings):
position = list(not_in_cache.keys())[i]
self.__text_embeddings_cache__[model_name][
not_in_cache[position]
] = embedding
return [
self.__text_embeddings_cache__[model_name][sentence]
for sentence in sentences
]
async def _convert(
self, model_name: str, sentences: Tuple[str, ...]
) -> Union[List[Tensor], numpy.ndarray, Tensor]:
model = self.__models_cache__[model_name]
current_loop = asyncio.get_running_loop()
embeddings = await current_loop.run_in_executor(
None,
functools.partial(
model.encode,
list(sentences),
convert_to_tensor=True,
batch_size=5,
show_progress_bar=True,
normalize_embeddings=True,
),
)
for i, embedding in enumerate(embeddings):
self.__text_embeddings_cache__[model_name][sentences[i]] = embedding
return embeddings
@server_wrapper
async def rank(
self,
texts: Tuple[str, ...],
query: str,
top_k: int,
model: str,
threshold: float = 0.1,
return_none_if_below_threshold: bool = False,
) -> List[str]:
import numpy
if isinstance(texts, list):
texts = tuple(texts)
text_embeddings, query_embedding = await self.convert(
model_name=model, sentences=texts
), await self.convert(model_name=model, sentences=tuple([query]))
return await self._rank(
texts,
text_embeddings,
query_embedding[0],
top_k,
threshold,
return_none_if_below_threshold,
)
@server_wrapper
async def weighted_rank(
self,
texts: Tuple[str, ...],
queries: Tuple[str],
weights: Tuple[float],
top_k: int,
model: str,
threshold: float = 0.1,
return_none_if_below_threshold: bool = False,
) -> List[str]:
import numpy
if isinstance(texts, list):
texts = tuple(texts)
text_embeddings, query_embeddings = await self.convert(
model_name=model, sentences=texts
), await self.convert(model_name=model, sentences=queries)
weighted_average_query = numpy.average(
query_embeddings, axis=0, weights=weights
)
return await self._rank(
texts,
text_embeddings,
weighted_average_query,
top_k,
threshold,
return_none_if_below_threshold,
)
async def _rank(
self,
texts: Tuple[str, ...],
text_embeddings: List[Tensor],
query_embedding: Tensor,
top_k: int,
threshold: float,
return_none_if_below_threshold: bool,
) -> List[str]:
results = list()
# logger.debug(f"Query embedding shape: {query_embedding.shape}")
# logger.debug(f"Query embeddings: {query_embedding}")
for i, tensor_embedding in enumerate(text_embeddings):
similarity_tuple = (
texts[i],
numpy.dot(query_embedding.cpu(), tensor_embedding.cpu()),
)
if (
similarity_tuple[1] >= threshold and return_none_if_below_threshold
) or not return_none_if_below_threshold:
results.append(similarity_tuple)
results.sort(key=lambda similarity_tuple: similarity_tuple[1], reverse=True)
return [result[0] for result in results][:top_k]
# Note that the above takes a very long time to load, so often users will have a localhost:5000 server with an instance of Ranker already running, accessible via json requests in the form:
# async with aiohttp.ClientSession() as session:
# async with session.post("http://localhost:5000/", json={"method": "convert", "sentences": ["cats", "stones", "Jupiter"], "model_name": "all-mpnet-base-v2"}) as resp:
# r = await resp.read()
# obj = pickle.loads(r)
# So this module also creates a class decorator that sneakily intercepts all of its methods, checking for the existence of a localhost:5000 server, and if it exists, it uses it to do the work.
|
selftest.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# (c) Copyright IBM Corp. 2010, 2021. All Rights Reserved.
import logging
import os
from threading import Thread
import time
from collections import defaultdict
import pkg_resources
from requests.exceptions import ConnectionError, SSLError
from resilient import constants as res_constants
from resilient import BasicHTTPException, SimpleHTTPException, SimpleClient, is_env_proxies_set, get_and_parse_proxy_env_var
from resilient_circuits.actions_component import SELFTEST_ERRORS, SELFTEST_SUBSCRIPTIONS
from resilient_circuits.stomp_events import SelftestTerminateEvent
from resilient_circuits import constants, helpers, app
# Get the same logger object that is used for resilient_circuits commands
LOG = logging.getLogger(constants.CMDS_LOGGER_NAME)
ERROR_EXIT_CODES_MAP = {
1: 1, # Error running App's selftest
20: 20, # REST: Generic connection error
401: 21, # REST: Connection unauthorized
22: 22, # REST: OSError (Could not find Certificate file)
23: 23, # REST: SSL Error (Invalid Certificate Error)
24: 24, # REST: Organization Membership Error
25: 25, # REST: Invalid Username or Password
30: 30, # STOMP: Generic connection error
31: 31, # STOMP: Not authorized to instansiate STOMP connection
32: 32, # STOMP: Not authorized to read from queue
33: 33 # STOMP: Timed out trying to see if resilient-circuits is subscribed to a message destination
}
def error_connecting_to_soar(host, reason="Unknown", status_code=20):
"""
Logs the host, reason and error code why it cannot connect to SOAR
and exits with the error code defined in ERROR_EXIT_CODES_MAP
"""
# Remove resilient-circuits logging handler
LOG.parent.handlers = []
LOG.info("\nERROR: could not connect to SOAR at '{0}'.\nReason: {1}\nError Code: {2}".format(host, reason, ERROR_EXIT_CODES_MAP.get(status_code, 1)))
exit(ERROR_EXIT_CODES_MAP.get(status_code, 1))
def check_soar_rest_connection(cmd_line_args, app_configs):
"""
Check if we can successfully get a resilient_client
therefore that will tell us if we have configured the app.config
file correctly in order to establish a REST connection and authenticate
with SOAR
:param cmd_line_args: an argparse.Namespace object containing all command line params
:type cmd_line_args: argparse.Namespace
:param app_configs: a dict of all the configurations in the app.config file
:type app_configs: dict
:excepts BasicHTTPException: if we cannot authenticate. Exits with 21
:excepts SSLError: if the cafile that is supplied is invalid. Exits with 23
:excepts Exception: generic error. Also raises if the user is not a member of the current org. Exits with 20
:return: Nothing
"""
LOG.info("{0}Testing REST connection to SOAR{0}".format(constants.LOG_DIVIDER))
host = app_configs.get("host", constants.DEFAULT_NONE_STR)
user = app_configs.get("api_key_id", app_configs.get("email", constants.DEFAULT_NONE_STR))
cafile = app_configs.get("cafile") if app_configs.get("cafile") else ""
if not os.path.isfile(cafile):
LOG.warning("- WARNING: No certificate file specified, connection will not be secure")
LOG.info("- Checking if we can authenticate a REST connection with '{0}' to '{1}'".format(user, host))
if is_env_proxies_set():
proxy_details = get_and_parse_proxy_env_var(res_constants.ENV_HTTPS_PROXY)
if not proxy_details:
proxy_details = get_and_parse_proxy_env_var(res_constants.ENV_HTTP_PROXY)
LOG.info("- Using a '{0}' Proxy with Host '{1}' and Port '{2}'".format(proxy_details.get("scheme"), proxy_details.get("hostname"), proxy_details.get("port")))
try:
res_client = helpers.get_resilient_client(ALLOW_UNRECOGNIZED=True)
except BasicHTTPException as e:
# Connection unauthorized
error_connecting_to_soar(host, e.response.reason, e.response.status_code)
except SimpleHTTPException as e:
# Incorrect User/Password
if hasattr(e, "args") and isinstance(e.args, tuple) and constants.ERROR_INVALID_USR in e.args[0]:
error_connecting_to_soar(host, e, 25)
error_connecting_to_soar(host, u"Unknown REST Error: {0}".format(e), 20)
except SSLError as e:
# SSL Error (Certificate Error)
error_connecting_to_soar(host, e, 23)
except ConnectionError as e:
# Generic connection error
error_connecting_to_soar(host, e, 20)
except OSError as e:
if hasattr(e, "args") and isinstance(e.args, tuple) and constants.ERROR_CA_FILE_NOT_FOUND in e.args[0]:
# CA file could not be found/read
error_connecting_to_soar(host, e, 22)
error_connecting_to_soar(host, u"Unknown REST Error: {0}".format(e), 20)
except Exception as e:
# Generic connection error (normally related to the user's org membership)
if hasattr(e, "args") and isinstance(e.args, tuple):
# User not member of organization error
if constants.ERROR_USR_NOT_MEMBER_ORG in e.args[0]:
error_connecting_to_soar(host, e, 24)
error_connecting_to_soar(host, e, 20)
error_connecting_to_soar(host, u"Unknown REST Error: {0}".format(e), 20)
if not isinstance(res_client, SimpleClient):
error_connecting_to_soar(host, "Unknown REST Error", 20)
LOG.info("{0}Successfully connected via REST!{0}".format(constants.LOG_DIVIDER))
def check_soar_stomp_connection(cmd_line_args, app_configs):
"""
Check if we can successfully start an instance of resilient_circuits
and therefore that will tell us if we have configured the app.config
file correctly in order to establish a STOMP connection and authenticate
with SOAR
:param cmd_line_args: an argparse.Namespace object containing all command line params
:type cmd_line_args: argparse.Namespace
:param app_configs: a dict of all the configurations in the app.config file
:type app_configs: dict
:excepts BasicHTTPException: if we cannot authenticate. Exits with 32
:excepts Exception: generic error. Exits with 30
:return: Nothing
"""
LOG.info("{0}Testing STOMP connection to SOAR{0}".format(constants.LOG_DIVIDER))
host = app_configs.get("host", constants.DEFAULT_NONE_STR)
user = app_configs.get("api_key_id", app_configs.get("email", constants.DEFAULT_NONE_STR))
LOG.info("- Checking if we can authenticate a STOMP connection with '{0}' to '{1}'".format(user, host))
try:
LOG.info("{0}Instantiating instance of resilient-circuits and starting it...{0}".format(constants.LOG_DIVIDER))
resilient_circuits_instance = app.App(ALLOW_UNRECOGNIZED=True, IS_SELFTEST=True)
# Create thread that targets the `run()` method of the main resilient_circuits component
t_running_resilient_circuits = Thread(target=resilient_circuits_instance.run, name="resilient_circuits")
t_running_resilient_circuits.start()
start_time = time.time()
while len(SELFTEST_SUBSCRIPTIONS) == 0:
LOG.parent.info("- Waiting for subscription to message destination. Sleeping for 2 seconds")
time.sleep(2)
if helpers.should_timeout(start_time, app_configs.get(constants.DEFAULT_SELFTEST_TIMEOUT_KEY, constants.DEFAULT_SELFTEST_TIMEOUT_VALUE)):
resilient_circuits_instance.action_component.fire(SelftestTerminateEvent())
error_connecting_to_soar(host, "Could not subscribe to any message destinations", 33)
# Send event to Terminate resilient-circuits
resilient_circuits_instance.action_component.fire(SelftestTerminateEvent())
# Remove resilient-circuits logging handler
time.sleep(2)
LOG.parent.handlers = []
if SELFTEST_ERRORS:
# Not authorized to read from queue
for e in SELFTEST_ERRORS:
if b"is not authorized to read from queue" in e:
error_connecting_to_soar(host, "'{0}' is not authorized to read from the App's Message Destination".format(helpers.get_user(app_configs)), 32)
error_connecting_to_soar(host, u"Unknown STOMP Error: {0}".format(e), 30)
except BasicHTTPException as e:
# Not authorized to instansiate STOMP connection
error_connecting_to_soar(host, e.response.reason, 31)
except Exception as e:
# Generic connection error
error_connecting_to_soar(host, u"Unknown STOMP Error: {0}".format(e), 30)
LOG.info("{0}Successfully connected via STOMP!{0}".format(constants.LOG_DIVIDER))
def run_apps_selftest(cmd_line_args, app_configs):
"""
loop through every selftest for every eligible package, call and store returned state,
print out package and their selftest states
"""
if hasattr(cmd_line_args, "print_env") and cmd_line_args.print_env:
LOG.info(helpers.get_env_str(pkg_resources.working_set))
components = defaultdict(list)
# custom entry_point only for selftest functions
selftest_entry_points = [ep for ep in pkg_resources.iter_entry_points('resilient.circuits.selftest')]
for ep in selftest_entry_points:
components[ep.dist].append(ep)
if len(selftest_entry_points) == 0:
LOG.info("No selftest entry points found.")
return None
# make a copy
install_list = list(cmd_line_args.install_list) if cmd_line_args.install_list else []
# Prepare a count of exceptions found with selftests.
selftest_failure_count = 0
for dist, component_list in components.items():
if cmd_line_args.install_list is None or dist.project_name in install_list:
# remove name from list
if dist.project_name in install_list:
install_list.remove(dist.project_name)
LOG.info("{0}Running selftest for: '{1}'{0}".format(constants.LOG_DIVIDER, dist.project_name))
# add an entry for the package
LOG.info("\n%s: ", dist.project_name)
for ep in component_list:
# load the entry point
f_selftest = ep.load()
try:
# f_selftest is the selftest function, we pass the selftest resilient options in case it wants to use it
start_time_milliseconds = int(round(time.time() * 1000))
status = f_selftest(app_configs)
end_time_milliseconds = int(round(time.time() * 1000))
delta_milliseconds = end_time_milliseconds - start_time_milliseconds
delta_seconds = delta_milliseconds / 1000
state = status.get("state")
if isinstance(state, str):
LOG.info("\t%s: %s\n\tselftest output:\n\t%s\n\tElapsed time: %f seconds", ep.name, state, status, delta_seconds)
if state.lower() == "failure":
selftest_failure_count += 1
else:
LOG.info("\t%s:\n\tUnsupported dictionary returned:\n\t%s\n\tElapsed time: %f seconds", ep.name, status, delta_seconds)
except Exception as e:
LOG.error("Error while calling %s. Exception: %s", ep.name, str(e))
selftest_failure_count += 1
continue
# any missed packages?
if len(install_list):
LOG.warning("%s not found. Check package name(s)", install_list)
# Check if any failures were found and printed to the console
if selftest_failure_count:
LOG.info("\nERROR: running selftest for App.\nError Code: {0}".format(ERROR_EXIT_CODES_MAP.get(1, 1)))
exit(ERROR_EXIT_CODES_MAP.get(1, 1))
LOG.info("{0}Successfully ran App's selftest!{0}".format(constants.LOG_DIVIDER))
def execute_command(cmd_line_args):
LOG.info("{0}Running selftest with IBM SOAR{0}".format(constants.LOG_DIVIDER))
if hasattr(cmd_line_args, "print_env") and cmd_line_args.print_env:
LOG.info("- Printing runtime environment")
LOG.info(helpers.get_env_str(pkg_resources.working_set))
LOG.info("- Getting app.configs")
app_configs = helpers.get_configs(ALLOW_UNRECOGNIZED=True)
check_soar_rest_connection(cmd_line_args, app_configs)
check_soar_stomp_connection(cmd_line_args, app_configs)
run_apps_selftest(cmd_line_args, app_configs)
LOG.info("{0}selftest complete{0}".format(constants.LOG_DIVIDER))
|
monster_tel.py
|
import telegram as tl
from telegram.ext import *
import logging
from monster import *
import time
def main():
def stop_and_restart():
import os, sys
updater.stop()
os.execl(sys.executable, sys.executable, *sys.argv)
def restart(update, context):
from threading import Thread
update.message.reply_text('Bot is restarting...')
Thread(target=stop_and_restart).start()
update.message.reply_text("Hey, I'm back")
updater = Updater(
"793965900:AAHxDmCQmK88F3Mjr9ODPOO7_YC3bL_UsTE",
use_context=True)
dp = updater.dispatcher
jq = updater.job_queue
# Personal Handlers
dp.add_handler(CommandHandler('reset', restart, filters=Filters.user(username = '@esft24')))
# Game Handlers
dp.add_handler(CommandHandler('start', nocallback))
dp.add_handler(CommandHandler('newgame', newgame))
dp.add_handler(CommandHandler('join', join))
dp.add_handler(CommandHandler('nogame', resetgame))
dp.add_handler(CommandHandler('startgame', startgame))
dp.add_handler(CommandHandler('position', nocallback))
dp.add_handler(CommandHandler('score', nocallback))
dp.add_handler(CommandHandler('info', nocallback))
# Query Handlers
dp.add_handler(CallbackQueryHandler(processChoice, pass_update_queue=True))
# Message Handlers
dp.add_handler(MessageHandler(Filters.command, nocallback))
updater.start_polling()
updater.idle()
# Callbacks
# Default callback for commands no added yet
def nocallback(update, context):
context.bot.send_message(chat_id=update.message.chat_id,
text="Command not found. Maybe not added yet?")
# Callback for game creation.
def newgame(update, context):
if isPrivate(update, context):
return
chat_id = update.message.chat_id
chat_data = context.chat_data
key = "game_" + str(chat_id)
if not key in chat_data:
game = Game()
game.chat_id = chat_id
chat_data[key] = game
context.bot.sendMessage(
chat_id=chat_id,
text="Game created. Every player must join the game with the /join command.\nThe game creator can join too and they can start the game with the command /startgame"
)
else:
context.bot.sendMessage(
chat_id=chat_id,
text="There is already a game created in this group. Send /resetGame to reset the game's settings and restart."
)
# Callback for adding a player to the game.
def join(update, context):
if isPrivate(update, context):
return
chat_id = update.message.chat_id
chat_data = context.chat_data
key = "game_" + str(chat_id)
if not key in chat_data:
context.bot.sendMessage(
chat_id=chat_id,
text="There's not a game started in this group yet! Use /newgame to start a game and then join."
)
return
game = chat_data[key]
user_id = update.message.from_user.id
user_first_name = update.message.from_user.first_name
addable = game.checkIfAddable(user_id)
if addable == 1:
context.bot.sendMessage(
chat_id=chat_id,
text="You are already in this game!"
)
return
elif addable == 2:
context.bot.sendMessage(
chat_id=chat_id,
text="There isn't space for another monster!"
)
return
try:
context.bot.sendMessage(
chat_id=user_id,
text="Hey {}. Welcome! You are in the game!".format(
user_first_name)
)
except:
context.bot.sendMessage(
chat_id=chat_id,
text="{}, I can't send you a private message. Please go to @mseeksm_bot and click 'Start'. Then try to /join again".format(
user_first_name)
)
return
player_monster = game.addPlayer(user_id, user_first_name)
context.bot.sendMessage(
chat_id=user_id,
text="You are the {} {}.\n{}\nGo find some spooky love!".format(
player_monster.name,
player_monster.emoji,
player_monster.description)
)
user_data = context.user_data
user_data['monster'] = player_monster
# Callback for game deleting.
def resetgame(update, context):
if isPrivate(update, context):
return
print(update.message.chat.__dict__)
chat_id = update.message.chat_id
chat_data = context.chat_data
key = "game_" + str(chat_id)
if key in chat_data:
game = chat_data[key]
game.deletePlayers()
del game
chat_data.clear()
context.bot.sendMessage(
chat_id=chat_id,
text="Game destroyed. Create a game with /newgame"
)
# Callback for game starting.
def startgame(update, context):
if isPrivate(update, context):
return
chat_id = update.message.chat_id
chat_data = context.chat_data
key = "game_" + str(chat_id)
if not key in chat_data:
context.bot.sendMessage(
chat_id=chat_id,
text="There's not a game started in this group yet! Use /newgame to start a game and then join."
)
return
game = chat_data[key]
# if len(game.players) < 3:
# context.bot.sendMessage(
# chat_id=chat_id,
# text="There's not enough players yet! You need 3 players to start the game. {} players so far.".format(
# str(len(game.players))
# )
# )
# return
context.bot.sendMessage(
chat_id=chat_id,
text="\U0001F319 The night is approaching! Check our private chat at @mseekm_bot to know more about your monster."
)
game.startGame()
time.sleep(5)
startround(update, context)
# Auxiliar functions
# Run the start of the round
def startround(update, context):
chat_id = update.message.chat_id
chat_data = context.chat_data
key = "game_" + str(chat_id)
game = chat_data[key]
game.startRound()
playersIds = game.playersIds
for p in playersIds:
startroundmessage(context, p, game.fullMoon)
context.job_queue.run_repeating(thinkTimer, 10, context={
"chat_id": chat_id, "game": game,
"bot": context.bot})
time.sleep(11)
makeChoice(update, context)
# Send a message of round start to the user
def startroundmessage(context, user_id, fullMoon):
if fullMoon:
context.bot.sendMessage(
chat_id=user_id,
text="\U0001F315 \U0001F315 \U0001F315 It's Full Moon! \U0001F315 \U0001F315 \U0001F315"
)
context.bot.sendMessage(
chat_id=user_id,
text="\U0001F315 You have 120 seconds to think about who to date, and who to betray, and who is who, and whatever else you want to think about. After that you will have to choose your date."
)
return
context.bot.sendMessage(
chat_id=user_id,
text="\U0001F319 You have 120 seconds to think about who to date, and who to betray, and who is who, and whatever else you want to think about. After that you will have to choose your date."
)
# Send timing messages to user
def thinkTimer(context):
job = context.job
job_context = job.context
chat_id = job_context["chat_id"]
game = job_context["game"]
bot = job_context["bot"]
if job.interval == 60:
for p in game.playersIds:
context.bot.sendMessage(
chat_id=p,
text="60 seconds..."
)
job.interval = 30
elif job.interval == 30:
for p in game.playersIds:
bot.sendMessage(
chat_id=p,
text="30 seconds..."
)
job.interval = 25
else:
for i in range(5, 0, -1):
for p in game.playersIds:
bot.sendMessage(
chat_id=p,
text=str(i)
)
time.sleep(1)
job.schedule_removal()
# Run the choosing round
def makeChoice(update, context):
chat_id = update.message.chat_id
chat_data = context.chat_data
key = "game_" + str(chat_id)
game = chat_data[key]
players = game.players
for p in players:
context.bot.send_message(chat_id=p.playerId, text="Make your choice! \U0000FE0F", reply_markup=p.buildChoiceMenu())
def processChoice(update, context):
print(context.__dict__)
# Check if the user is in a group or in a private chat
def isPrivate(update, context):
chat_id = update.message.chat_id
if update.message.chat.type == "private":
context.bot.sendMessage(
chat_id=chat_id,
text="You have to add me to a group and /newgame me there!"
)
return True
return False
if __name__ == "__main__":
logging.basicConfig(format='\n %(asctime)s \n %(name)s \n %(levelname)s \n %(message)s',
level=logging.INFO)
main()
|
MiddleServer.py
|
#!/usr/bin/env python3
import socket
import threading
import time
from message import Message
import TorzelaUtils as TU
class MiddleServer:
# Set the next server's IP and listening port
# also set listening port for this middle server
def __init__(self, nextServerIP, nextServerPort, localPort):
self.nextServerIP = nextServerIP
self.nextServerPort = nextServerPort
self.localPort = localPort
# We can have a maximum of one server connected to us
# Initialize these to 0 here, we will change them later
# when we get the first connection
self.previousServerIP = 0
self.previousServerPort = 0
# Used for onion rotuing in the conversational protocol
# The keys and messages will be updated each round
self.clientLocalKeys = []
self.clientMessages = []
self.nMessages = 0
# The server keys
self.__privateKey, self.publicKey = TU.generateKeys(
TU.createKeyGenerator() )
# We need to spawn off a thread here, else we will block
# the entire program
threading.Thread(target=self.setupConnection, args=()).start()
# Setup main listening socket to accept incoming connections
threading.Thread(target=self.listen, args=()).start()
def getPublicKey(self):
return self.publicKey
def setupConnection(self):
# Before we can connect to the next server, we need
# to send a setup message to the next server
setupMsg = Message()
setupMsg.setType(0)
setupMsg.setPayload("{}".format(self.localPort))
self.connectionMade = False
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
while not self.connectionMade:
try:
sock.connect((self.nextServerIP, self.nextServerPort))
sock.sendall(str.encode(str(setupMsg)))
self.connectionMade = True
except:
# Put a delay here so we don't burn CPU time
time.sleep(1)
sock.close()
print("MiddleServer successfully connected!")
# This is where all messages are handled
def listen(self):
# Wait until we have connected to the next server
while not self.connectionMade:
time.sleep(1)
# 1. Bind to localhost. We need to have the sock object
# available to other methods.
self.listenSock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.listenSock.bind(('localhost', self.localPort))
self.listenSock.listen(1)
while True:
print("MiddleServer awaiting connection")
conn, client_addr = self.listenSock.accept()
print("MiddleServer accepted connection from " + str(client_addr))
# Spawn a thread to handle the client
threading.Thread(target=self.handleMsg, args=(conn, client_addr,)).start()
# This runs in a thread and handles messages from clients
def handleMsg(self, conn, client_addr):
# Receive data from client
clientData = conn.recv(32768).decode("utf-8")
# Format as message
clientMsg = Message()
clientMsg.loadFromString(clientData)
if clientMsg.getNetInfo() != 1 and clientMsg.getNetInfo() != 2:
print("Middle Server got " + clientData)
# Check if the packet is for setting up a connection
if clientMsg.getNetInfo() == 0:
# If it is, add the previous server's IP and Port
self.previousServerIP = client_addr[0]
self.previousServerPort = int(clientMsg.getPayload())
conn.close()
elif clientMsg.getNetInfo() == 1:
print("Middle Server received message from Front server")
# In here, we handle packets being sent towards
# the dead drop. There is only one way to send packets
# TODO -> Add lock to this whole part
if self.nMessages <= len(self.clientMessages):
print("Middle server error: received more messages than expected")
# Decrypt one layer of the onion message
clientLocalKey, newPayload = TU.decryptOnionLayer(
self.__privateKey, clientMsg.getPayload(), serverType=0)
clientMsg.setPayload(newPayload)
# Save the message data
self.clientLocalKeys.append(clientLocalKey)
self.clientMessages.append(clientMsg)
if self.nMessages == len(self.clientMessages):
self.forwardMessages()
elif clientMsg.getNetInfo() == 2:
print("Middle Server received message from Spreading server")
# In here, we are handling messages send back
# to the client. There is only one way to send packets
if self.nMessages <= len(self.clientMessages):
print("Middle server error: received more messages than expected")
# Encrypt one layer of the onion message
clientLocalKey = self.clientLocalKeys[ len(self.clientMessages) ]
newPayload = TU.encryptOnionLayer(self.__privateKey,
clientLocalKey,
clientMsg.getPayload())
clientMsg.setPayload(newPayload)
self.clientMessages.append(clientMsg)
if self.nMessages == len(self.clientMessages):
self.forwardResponses()
elif clientMsg.getNetInfo() == 3:
# Dialing Protocol: Client -> DeadDrop
_, newPayload = TU.decryptOnionLayer(
self.__privateKey, clientMsg.getPayload(), serverType=0)
clientMsg.setPayload(newPayload)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((self.nextServerIP, self.nextServerPort))
sock.sendall(str(clientMsg).encode("utf-8"))
sock.close()
elif clientMsg.getNetInfo() == 4:
# In here, we handle the first message sent by the previous server.
# It notifies us of a new round and how many messages are coming
# TODO -> Add lock to this whole part
self.nMessages = int(clientMsg.getPayload())
self.clientMessages = []
self.clientLocalKeys = []
# Assuming that the messages are stores in self.clientMessages this method
# adds noise, shuffles the messages and forwards them to the next server
def forwardMessages(self):
# TODO (jose): Noise addition goes here
# Apply the mixnet by shuffling the messages
self.permutation = TU.generatePermutation(self.nMessages)
shuffledMessages = TU.shuffleWithPermutation(self.clientMessages,
self.permutation)
# Also shuffle the messages so they still match the clientMessages:
# self.clientLocalKeys[ i ] is the key that unlocks message self.clientMessges[ i ]
# This is used afterwards in handleMessage, getNetInfo() == 2
self.clientLocalKeys = TU.shuffleWithPermutation(self.clientLocalKeys,
self.permutation)
# Forward all the messages to the next server
# Send a message to the next server notifying of the numbers of
# messages that will be sent
firstMsg = Message()
firstMsg.setNetInfo(4)
firstMsg.setPayload("{}".format(self.nMessages))
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((self.nextServerIP, self.nextServerPort))
sock.sendall(str(firstMsg).encode("utf-8"))
sock.close()
# Send all the messages to the next server
for msg in shuffledMessages:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((self.nextServerIP, self.nextServerPort))
sock.sendall(str(msg).encode("utf-8"))
sock.close()
# Restart the messages so that we receive the responses from the
# next server
self.clientMessages = []
def forwardResponses(self):
# Unshuffle the messages
self.clientMessages = TU.unshuffleWithPermutation(self.clientMessages,
self.permutation)
# Send the responses back to the previous server
for msg in self.clientMessages:
tempSock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
tempSock.connect((self.previousServerIP, self.previousServerPort))
tempSock.sendall(str(msg).encode("utf-8"))
tempSock.close()
|
tcp_server.py
|
#!/usr/bin/env python3
import socket
import threading
bind_ip = '0.0.0.0'
bind_port = 9000
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind((bind_ip, bind_port))
# set maximum backlog connection to 5
server.listen(5)
print("[*] Listening on {0} : {1}".format(bind_ip, bind_ip))
# client handling thread
def handle_client(client_socket):
request = client_socket.recv(1024).decode()
print("[*] Received {0}".format(request))
# send back a packet
message = "ACK!"
client_socket.send(message.encode())
client_socket.close()
while True:
client, addr = server.accept()
print("[*] Accepted connection from {0} : {1}".format(addr[0], addr[1]))
# spin up our client thread to handle incoming data
client_handler = threading.Thread(target=handle_client(client))
client_handler.start()
|
utilities.py
|
#!/usr/bin/env python3
# This file is part of the MIDAS system.
# Copyright 2014
# Andreas Henelius <[email protected]>,
# Jari Torniainen <[email protected]>
# Finnish Institute of Occupational Health
#
# This code is released under the MIT License
# http://opensource.org/licenses/mit-license.php
#
# Please see the file LICENSE for details.
import sys
import zmq
import time
import select
import socket
import os.path
import threading
import configparser
from multiprocessing import Lock, Value
class Beacon(object):
""" A UDP broadcast beacon with some functions allowing easy use. """
def __init__(self,
name='',
node_type='',
node_id='',
ip=None,
port='',
protocol='tcp',
status='',
port_broadcast=5670,
interval=5):
""" Create the beacon and set some properties, but do not start it. """
self.name = name
self.type = node_type
self.id = node_id
self.ip = ip
self.port = port
self.protocol = protocol
self.status = status
self.is_running = False
self.data = ''
self.port_broadcast = port_broadcast
self.interval = interval
# -------------------------------------------------------------------------
def start(self):
""" Start broadcasting data on the beacon, i.e., make it visible. """
if self.ip is None:
self.ip = get_ip()
self.update_data()
self.is_running = True
t = threading.Thread(target=self.broadcast)
t.start()
def broadcast(self):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.bind(('', 0))
s.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
while self.is_running:
try:
s.sendto(self.data, ('<broadcast>', self.port_broadcast))
except OSError:
s.sendto(self.data, ('localhost', self.port_broadcast))
time.sleep(self.interval)
def stop(self):
""" Stop the beacon. """
if self.is_running:
self.is_running = False
def update_data(self):
url_node = '{}://{}:{}'.format(self.protocol, self.ip, self.port)
data = ';'.join(['midas',
str(self.name),
str(self.type),
str(self.id),
url_node,
str(self.status)
])
self.data = str.encode(data)
def set_status(self, status):
""" Set the status of the node.
If the node is already broadcasting, change the message in the
broadcast.
"""
self.status = status
if self.is_running:
self.stop()
self.start()
class DataState(object):
""" Thread-safe boolean that can, e.g., be used to keep track of whether
there is new data or not
"""
def __init__(self, initial_state=0):
self.state = Value('i', initial_state)
self.lock = Lock()
def setstate(self, val):
with self.lock:
self.state.value = val
def flipstate(self):
with self.lock:
self.state.value ^= True
def getstate(self):
return(self.state.value)
def discover_all_nodes(timeout=10, port_broadcast=5670):
""" Discover all MIDAS nodes and return them as a dictionary."""
# Loop until the socket is free.
# This is needed in order to avoid conflicts when multiple dispatchers
# are used on the same host.
while True:
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.bind(('', port_broadcast))
s.setblocking(0)
break
except OSError:
pass
buffersize = 1024
t_start = time.time()
tmp_list = []
node_dict = {}
while(time.time() - t_start < timeout):
result = select.select([s], [], [], timeout)
if result[0]:
message = result[0][0].recv(buffersize)
message = message.decode('ascii')
if message.startswith('midas'):
message = validate_message(message)
if message not in tmp_list:
tmp_list.append(message)
node_dict[message['name']] = message
s.close()
return node_dict
def validate_message(message):
""" Validate a received message to make sure that it
is a valid message in the MIDAS framework and return
a dictionary containing the information sent by the beacon.
"""
message = message.split(';')
result = None
if message[0] == 'midas':
k = ['name', 'type', 'id', 'address', 'status']
result = dict(zip(k, message[1:]))
return result
def filter_nodes(node_dict, f={}):
""" Filter nodes based on criteria in the filter dictionary.
Args:
node_dict : dictionary with nodes from discover_all_nodes()
f : a dictionary with criteria to filter nodes from
node_dict.
Returns: a new dictionary with only nodes matching the filter.
"""
if len(f) > 0:
matching_nodes = {}
# build the template string
tk = sorted(f.keys())
template = make_string(f, tk)
# compare the template with all candidates
for n in node_dict:
if make_string(node_dict[n], tk) == template:
matching_nodes[n] = node_dict[n]
else:
matching_nodes = node_dict
return matching_nodes
def make_string(d, key_list):
""" Make a string from dictionary values using keys given as a list. """
return ';'.join([str(d[k]) for k in key_list])
def midas_send(socket, message_type, message, address=None):
""" Temporary messasing functions for debuggings. """
if address:
socket.send(address, zmq.SNDMORE)
socket.send(b"", zmq.SNDMORE)
socket.send_string(message)
else:
socket.send_string(message_type, zmq.SNDMORE)
socket.send_string(message)
def midas_recv(socket):
address = socket.recv()
socket.recv() # Empty sequence
msg_type = socket.recv_string()
message = socket.recv_string()
return address, msg_type, message
def get_ip():
""" Return the current IP address."""
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
s.connect(('<broadcast>', 0))
ip = s.getsockname()[0]
except OSError:
ip = '127.0.0.1'
return(ip)
def get_config_options(otype):
""" Return list of valid configuration options for nodes and dispatcher."""
if otype is 'node':
return ['node_name', 'node_type', 'node_id', 'node_description', 'primary_node', 'ip', 'port_frontend', 'port_backend', 'port_publisher', 'n_responders', 'lsl_stream_name', 'primary_n_channels', 'primary_channel_names', 'primary_channel_descriptions', 'primary_sampling_rate', 'primary_buffer_size_s', 'run_publisher', 'secondary_node', 'secondary_n_channels', 'secondary_buffer_size', 'secondary_channel_names', 'secondary_channel_descriptions', 'default_channel']
elif otype is 'dispatcher':
return ['node_list', 'port', 'ip', 'n_threads', 'run_pubsub_proxy', 'proxy_port_in', 'proxy_port_out']
else:
return None
def midas_parse_config(nodeclass, *args):
""" Parse configuration for a node, and if valid return a node"""
# Read configuration from an INI file given as a command-line argument
args = args[0]
if len(args) < 2:
print('Error! No INI file provided.')
return None
else:
if os.path.isfile(args[1]):
cfg = configparser.ConfigParser()
cfg.read(args[1])
else:
print('Error! INI file does not exist.')
return None
if len(args) == 2:
if len(cfg.sections()) > 1:
print('Error! Multiple sections in the INI file.'
'Provide section name.')
return None
else:
tmp = dict(cfg.items(cfg.sections()[0]))
if len(args) == 3:
if cfg.has_section(args[2]):
tmp = dict(cfg.items(args[2]))
else:
print('Error! Section not found in INI file.')
return None
# Determine the object type (node or dispatcher)
clist = [i.__name__ for i in nodeclass.__mro__]
otype = None
if 'BaseNode' in clist:
otype = 'node'
if 'Dispatcher' in clist:
otype = 'dispatcher'
if otype is None:
print('Error! Unrecognised MIDAS object type.')
return None
# Check supported options for the given object type
unrecognised_options = set(list(tmp.keys())) - set(get_config_options(otype))
if (len(unrecognised_options) > 0):
print('Error! Unrecognised configuration options provided. Please fix!')
print('The following options are not recognised:')
for i in unrecognised_options:
print('\t' + i)
return None
# Create the node
return nodeclass(tmp)
def parse_config_to_dict(cfg_file, section):
""" Reads config file and returns a dict of parameters.
Args:
cfg_file: <String> path to the configuration ini-file
section: <String> section of the configuration file to read
Returns:
cfg: <dict> configuration parameters of 'section' as a dict
"""
cfg = configparser.ConfigParser()
cfg.read(cfg_file)
if cfg.has_section(section):
return dict(cfg.items(section))
else:
print("Section '%s' not found in file %s!" % (section, cfg_file))
return None
def python_version():
""" Return the major Python version (2 or 3) """
return(float(sys.version[0]))
def make_url(ip, port, protocol='tcp'):
""" Return a URL """
return '{}://{}:{}'.format(protocol, ip, port)
def str2bool(x):
""" Convert a string to a boolean. """
return x.lower() in ("true", "1")
def listify(config, key, sep=','):
""" Create a list from a string containing list elements separated by
sep.
"""
return [i.strip() for i in config[key].split(sep)]
def find_range(array, win):
""" Find indices corresponding to win[0] and win[1] inside array.
Args:
array: <list> an array of values sorted in descending order
win: <tuple> window ranges
Returns:
i0: <int> index of the first window limit
i1: <int> index of the second window limit
"""
a = array[:]
i0 = None
i1 = None
for idx, val in enumerate(a):
if i0 is None and win[0] >= val:
i0 = idx
if i1 is None and win[1] >= val:
i1 = idx
return i0, i1
def LRU_queue_broker(url_frontend, url_backend, NBR_WORKERS, run_state):
""" Least-recently used queue broker.
Args:
url_backend: the router url used for backend (workers)
url_frontend: the router url used for frontend (clients)
NBR_workers: the number of workers (worker processes / threads)
run_state: <integer> boolean "poison pill" to signal termination to the
process
This function is modified from http://zguide.zeromq.org/py:lruqueue
originally written by Guillaume Aubert (gaubert)
<guillaume(dot)aubert(at)gmail(dot)com>.
Original code licensed under the MIT/X11.
http://zguide.zeromq.org/page:all#Getting-the-Examples
"""
# Logic of LRU loop
#
# - Poll backend always, frontend only if 1+ worker ready
# - If worker replies, queue worker as ready and forward reply
# to client if necessary
# - If client requests, pop next worker and send request to it
# Prepare our context and sockets
context = zmq.Context()
frontend = context.socket(zmq.ROUTER)
frontend.bind(url_frontend)
backend = context.socket(zmq.ROUTER)
backend.bind(url_backend)
# Queue of available workers
available_workers = 0
workers_list = []
# init poller
poller = zmq.Poller()
# Always poll for worker activity on backend
poller.register(backend, zmq.POLLIN)
# Poll front-end only if we have available workers
poller.register(frontend, zmq.POLLIN)
while run_state.value:
socks = dict(poller.poll(5000))
# Handle worker activity on backend
if (backend in socks and socks[backend] == zmq.POLLIN):
# Queue worker address for LRU routing
worker_addr = backend.recv(zmq.NOBLOCK)
assert available_workers < NBR_WORKERS
# add worker back to the list of workers
available_workers += 1
workers_list.append(worker_addr)
# Second frame is empty
empty = backend.recv(zmq.NOBLOCK)
assert empty == b""
# Third frame is READY or else a client reply address
client_addr = backend.recv(zmq.NOBLOCK)
# If client reply, send rest back to frontend
if client_addr != b"READY":
# Following frame is empty
empty = backend.recv(zmq.NOBLOCK)
assert empty == b""
# reply = backend.recv()
more = True
reply = []
while more:
reply.append(backend.recv_string(zmq.NOBLOCK))
more = backend.getsockopt(zmq.RCVMORE)
frontend.send(client_addr, zmq.SNDMORE)
frontend.send(b"", zmq.SNDMORE)
for r in reply[:-1]:
frontend.send_string(r, zmq.SNDMORE)
frontend.send_string(str(reply[-1]))
# poll on frontend only if workers are available
if available_workers > 0:
if (frontend in socks and socks[frontend] == zmq.POLLIN):
# Now get next client request, route to LRU worker
# Client request is [address][empty][request]
client_addr = frontend.recv(zmq.NOBLOCK)
empty = frontend.recv(zmq.NOBLOCK)
assert empty == b""
more = True
request = []
while more:
request.append(frontend.recv_string(zmq.NOBLOCK))
more = frontend.getsockopt(zmq.RCVMORE)
# Dequeue and drop the next worker address
available_workers -= 1
worker_id = workers_list.pop()
backend.send(worker_id, zmq.SNDMORE, zmq.NOBLOCK)
backend.send(b"", zmq.SNDMORE, zmq.NOBLOCK)
backend.send(client_addr, zmq.SNDMORE, zmq.NOBLOCK)
backend.send(b"", zmq.SNDMORE, zmq.NOBLOCK)
for r in request[:-1]:
backend.send_string(r, zmq.SNDMORE, zmq.NOBLOCK)
backend.send_string(str(request[-1]), zmq.NOBLOCK)
# Clean up when exiting
frontend.close()
backend.close()
context.term()
|
main.py
|
import pdb
import time
import os
import subprocess
import re
import random
import json
import numpy as np
import glob
from tensorboard.backend.event_processing.event_accumulator import EventAccumulator
import socket
import argparse
import threading
import _thread
import signal
from datetime import datetime
import csv
from sklearn import neighbors
import gpu_pwr
parser = argparse.ArgumentParser(description='TCP client')
parser.add_argument('--tc', metavar='TESTCASE', type=str, help='select testcase')
args = parser.parse_args()
with open('job_queue.json', 'r') as fp:
queue = json.load(fp)
queue_dict = {}
arrival_time = 0
for item in queue:
arrival_time += np.random.poisson(30)
queue_dict[item] = arrival_time
queue_timer = time.time()
queue_delay = {}
for item in queue:
queue_delay[str(item)] = 0
job_start = {} #{'49': time1, '15': time2...}
JCT = {}
for item in queue:
JCT[str(item)] = 0
completion = {}
for item in queue:
completion[str(item)] = 0
overhead = {} # initialize so that every job starts with 0s overhead time
for item in queue:
overhead[str(item)] = 0
ovhd_start = {} # initialize this to 0 as well
for item in queue:
ovhd_start[str(item)] = 0
b_start = {} # initialize this to 0 as well
for item in queue:
b_start[str(item)] = 0
c_start = {} # initialize this to 0 as well
for item in queue:
c_start[str(item)] = 0
d_start = {} # initialize this to 0 as well
for item in queue:
d_start[str(item)] = 0
ovhd_a = {} # {1: [10, 12, ...], 2: [xx]}
for item in queue:
ovhd_a[str(item)] = []
ovhd_b = {} # {1: [10, 12, ...], 2: [xx]}
for item in queue:
ovhd_b[str(item)] = []
ovhd_c = {} # {1: [10, 12, ...], 2: [xx]}
for item in queue:
ovhd_c[str(item)] = []
ovhd_d = {} # {1: [10, 12, ...], 2: [xx]}
for item in queue:
ovhd_d[str(item)] = []
ovhd_total = {} # {1: [10, 12, ...], 2: [xx]}
for item in queue:
ovhd_total[str(item)] = []
k80_1st = {}
for item in queue:
k80_1st[str(item)] = []
v100_1st = {}
for item in queue:
v100_1st[str(item)] = []
num_mig = {} # initialize migration time to 0
for item in queue:
num_mig[str(item)] = 0
queue_start = {} # initialize this to 0 as well
for item in queue:
queue_start[str(item)] = 0
queue_time = {} # initialize this to 0 as well
for item in queue:
queue_time[str(item)] = 0
V100_epoch_time = {}
for item in queue:
V100_epoch_time[str(item)] = 0
K80_epoch_time = {}
for item in queue:
K80_epoch_time[str(item)] = 0
K80_start_time = {}
for item in queue:
K80_start_time[str(item)] = 0
V100_start_time = {}
for item in queue:
V100_start_time[str(item)] = 0
promote_start_time = {}
for item in queue:
promote_start_time[str(item)] = 0
demote_list = []
K80_time = {}
for item in queue:
K80_time[str(item)] = 0
V100_time = {}
for item in queue:
V100_time[str(item)] = 0
gpu_usage_time = [] # don't initialize this
gpu_usage = []
gpu_usage_completion = []
speedup_dict = {}
for item in queue:
speedup_dict[str(item)] = 0
predict_dict = {}
for item in queue:
predict_dict[str(item)] = 0
index = 0
all_jobs_started = False
K80_cap = 16
V100_cap = 8
K80_used = 0
V100_used = 0
K80_job = {}
for i in range(K80_cap):
K80_job[str(i)] = 'idle'
V100_job = {}
for i in range(V100_cap):
V100_job[str(i)] = 'idle'
qualified_job = []
step1_job = []
step2_job = []
pc_job = []
K80_node = ['c2180', 'c2181']
V100_node = ['d1021', 'd1012']
host_node = 'c0255'
testcase = args.tc
### also, change .h5 file folder in jobs ###
INTERVAL = 30 # make decision every 30s
def K80_LUT(gpu):
quotient = int(gpu) // 8
remainder = int(gpu) % 8
real_node = K80_node[quotient]
real_gpu = str(remainder)
return real_node, real_gpu
def V100_LUT(gpu):
quotient = int(gpu) // 4
remainder = int(gpu) % 4
real_node = V100_node[quotient]
real_gpu = str(remainder)
return real_node, real_gpu
######################### do a regression fit ########################
with open('x1_data.json') as f:
x1_data = json.load(f)
with open('x2_data.json') as f:
x2_data = json.load(f)
with open('x3_data.json') as f:
x3_data = json.load(f)
x1_norm = [(i - min(x1_data)) / (max(x1_data) - min(x1_data)) for i in x1_data]
x2_norm = [(i - min(x2_data)) / (max(x2_data) - min(x2_data)) for i in x2_data]
x3_norm = [(i - min(x3_data)) / (max(x3_data) - min(x3_data)) for i in x3_data]
# create training data
x_train = []
for i in range(len(x1_norm)):
x_train.append([x1_norm[i], x2_norm[i], x3_norm[i]])
with open('y_data.json') as f:
y_train = json.load(f)
model = neighbors.KNeighborsRegressor(n_neighbors = 3, weights='distance')
model.fit(x_train, y_train)
####################################################################
def send_signal(node, cmd):
# Create a TCP/IP socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
port = 10000
# Connect the socket to the port where the server is listening
server_address = (node, int(port))
print('connecting to {} port {}'.format(*server_address))
sock.connect(server_address)
try:
# Send data
message = cmd.encode('utf-8') #b'save 35' #b'start 35 gpu 6'#b'save 35'
print('sending {!r}'.format(message))
sock.sendall(message)
while True:
data = sock.recv(32)
if 'success' in data.decode('utf-8'):
# print('received {!r}'.format(data))
break
else:
print('waiting for success signal')
time.sleep(1)
finally:
#print('closing socket')
sock.close()
def max_speedup_promotion(K80_free, V100_free, V100_job, promote_list, demote_list, force_demote):
num_demote = len(force_demote)
num_promote = len(promote_list)
V100_vacant = num_demote + V100_free
K80_vacant = num_promote + K80_free
global speedup_dict
if K80_vacant >= num_demote: # if more vacant K80s than demote jobs, always force demote
# selectively promote among active V100 jobs and promote list jobs
V100_qual = demote_list
#if 'idle' in V100_qual:
# V100_qual.remove('idle')
V100_pool = list(set(V100_qual).union(promote_list))
if num_promote <= V100_vacant: # promote all jobs as well
return promote_list, force_demote
else: # promote the top 4 jobs
pool_dict = {}
V100_avail = V100_vacant + len(V100_qual)
for job in V100_pool:
if job in speedup_dict:
pool_dict[job] = speedup_dict[job]
sorted_pool = sorted(pool_dict, key=pool_dict.get, reverse=True)[:V100_avail]
promotion_list = list(set(promote_list).intersection(sorted_pool))
demotion_list = list(set(demote_list).difference(sorted_pool))
if 'idle' in demotion_list:
demotion_list.remove('idle') # this includes force demotion
# lazy migration, for every V100 job from high speeup to low speedup and not in sorted_pool, compare it with
# K80 jobs in sorted_pool, from low speedup to high speedup. If difference within 0.2, replace the K80 job
# in sorted pool
for job_demote in sorted(pool_dict, key=pool_dict.get, reverse=True):
if job_demote in demotion_list:
for job_promote in sorted(pool_dict, key=pool_dict.get, reverse=False):
if job_promote in promotion_list:
if speedup_dict[job_promote] - speedup_dict[job_demote] < 0.2:
demotion_list.remove(job_demote)
promotion_list.remove(job_promote)
break
return promotion_list, demotion_list
# situations below won't happen
elif V100_vacant >= num_promote: # if more vacant V100s than promote jobs, always promote
# less vacant K80s than demote jobs, select worst among force demote list
pool_dict = {} # here the pool only includes force demote jobs
for job in force_demote:
if job in speedup_dict:
pool_dict[job] = speedup_dict[job]
sorted_pool = sorted(pool_dict, key=pool_dict.get, reverse=False)[:K80_vacant]
if len(sorted_pool) > 0:
raise ValueError('Bug, demotion shouldnt happen because no practical complete')
return promote_list, sorted_pool
else:
raise ValueError('Bug with max speedup promotion, condition not considered')
def min_speedup_demotion(K80_job, demote_list):
num_demote = len(demote_list)
global speedup_dict
# selectively demote among active K80 jobs and demote list jobs
K80_qual = list(set(list(K80_job.values())))
if 'idle' in K80_qual:
K80_qual.remove('idle')
K80_pool = list(set(K80_qual).union(demote_list))
if len(K80_pool) <= K80_cap: # demote all jobs, no promotion
return [], demote_list[:] # must return a copy, otherwise the output points to the same address as input
else: # promote the top 4 jobs
pool_dict = {}
for job in K80_pool:
if job in speedup_dict:
pool_dict[job] = speedup_dict[job]
sorted_pool = sorted(pool_dict, key=pool_dict.get, reverse=False)[:K80_cap] # 8 least speedup jobs
demotion_list = list(set(demote_list).intersection(sorted_pool))
promotion_list = list(set(list(K80_job.values())).difference(sorted_pool))
if 'idle' in promotion_list:
promotion_list.remove('idle') # this includes force demotion
# lazy migration, for every V100 job from high speeup to low speedup and not in sorted_pool, compare it with
# K80 jobs in sorted_pool, from low speedup to high speedup. If difference within 0.2, replace the K80 job
# in sorted pool
for job_demote in sorted(pool_dict, key=pool_dict.get, reverse=True):
if job_demote in demotion_list:
for job_promote in sorted(pool_dict, key=pool_dict.get, reverse=False):
if job_promote in promotion_list:
if speedup_dict[job_promote] - speedup_dict[job_demote] < 0.2:
demotion_list.remove(job_demote)
promotion_list.remove(job_promote)
break
return promotion_list, demotion_list
def save_job(node, job): # save_job('c2176', '50')
# first wait for the job to be qualified for checkpointing
while True: # wait for ckpt_qual to be available
global ckpt_qual_dict
if ckpt_qual_dict['job'+job] == 1:
ckpt_qual_dict['job'+job] = 0
break
time.sleep(5)
global pid_dict
pid = pid_dict['job'+job]
send_signal(node, 'save ' + job + ' pid ' + pid) # 'save 50 pid 10000'
global ovhd_start
ovhd_start[job] = time.time()
time.sleep(3) # in case epoch_waste is communicate too frequently
def kill_job(node, job): # kill_job('c2176', '50')
send_signal(node, 'kill ' + job)
# resume job
def resume_job(node, gpu, job): # resume_job('c2176', '3', '50')
cmd = 'resume ' + job + ' gpu ' + gpu
send_signal(node, cmd)
# start job
def start_job(node, gpu, job):
cmd = 'start ' + job + ' gpu ' + gpu
send_signal(node, cmd)
# function that checks the tensorboard log of currently running jobs and logs jobs that have finished the first epoch
# in a global list. Once it's done, it will be in a queue to be promoted to V100 for 3 more epochs.
def check_step1_complete(job_list):
log_path = '/scratch/li.baol/tsrbrd_log/job_runs/' + testcase + '/'
global step1_job
global V100_epoch_time
for job in job_list:
if job not in step1_job and job != 'idle':
log_dir = log_path + 'job' + job + '/*'
dirs = glob.glob(log_dir)
dirs.sort()
if len(dirs) > 0:
tc = dirs[0]
iterator = EventAccumulator(tc).Reload()
tag = 'loss'
try:
if len(iterator.Scalars(tag)) > 2: # this way we can collect one epoch time
wall_time = [t.wall_time for t in iterator.Scalars(tag)]
V100_epoch_time[job] = wall_time[1] - wall_time[0]
step1_job.append(job)
print('job' + job + ' has reached step1 complete')
except Exception:
pass
def check_step2_complete(job_list):
log_path = '/scratch/li.baol/tsrbrd_log/job_runs/' + testcase + '/'
global step1_job
global step2_job
global V100_epoch_time
global K80_epoch_time
global speedup_dict
for job in job_list:
if job in step1_job and job not in step2_job and job != 'idle':
log_dir = log_path + 'job' + job + '/*'
dirs = glob.glob(log_dir)
dirs.sort()
if len(dirs) > 1:
tc = dirs[1]
iterator = EventAccumulator(tc).Reload()
tag = 'loss'
try:
if len(iterator.Scalars(tag)) > 2: # this way we can collect one epoch time
wall_time = [t.wall_time for t in iterator.Scalars(tag)]
K80_epoch_time[job] = wall_time[1] - wall_time[0]
V100_time_step2 = V100_epoch_time[job]
K80_time_step2 = wall_time[1] - wall_time[0]
speedup = (K80_time_step2 - V100_time_step2) / K80_time_step2
speedup_dict[job] = speedup
step2_job.append(job)
print('job' + job + ' has reached step2 complete')
except Exception:
pass
# measure job
def measure_job(node, gpu, job):
cmd = 'measure ' + job + ' gpu ' + gpu
send_signal(node, cmd)
############### first clear finish status of all jobs ####################
pid_dict = {}
for i in range(len(queue)):
job_name = 'job' + str(i + 1)
pid_dict[job_name] = 0
checkpoint_dict = {}
for i in range(len(queue)):
job_name = 'job' + str(i + 1)
checkpoint_dict[job_name] = 0
ckpt_qual_dict = {}
for i in range(len(queue)):
job_name = 'job' + str(i + 1)
ckpt_qual_dict[job_name] = 0
finish_dict = {}
for i in range(len(queue)):
job_name = 'job' + str(i + 1)
finish_dict[job_name] = 0
epoch_waste_dict = {}
for i in range(len(queue)):
job_name = 'job' + str(i + 1)
epoch_waste_dict[job_name] = 0
#################### background thread running TCP socket ########################
def thread_function():
# here listen on the socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_address = (host_node, 10002)
print('starting up on {} port {}'.format(*server_address))
sock.bind(server_address)
sock.listen(5)
while True:
# Wait for a connection
connection, client_address = sock.accept()
try:
while True:
data = connection.recv(32)
if data:
data_str = data.decode('utf-8')
global K80_start_time
global V100_start_time, promote_start_time
global K80_job
global V100_job
global K80_time
global V100_time
global ovhd_a, ovhd_b, ovhd_c, ovhd_d, k80_1st, v100_1st, ovhd_start, overhead, ovhd_total
global b_start, c_start, d_start, completion
if 'ckpt_qual' in data_str:
global ckpt_qual_dict
job_name = data_str.split(' ')[0]
ckpt_qual_dict[job_name] = 1
elif 'finish' in data_str:
global finish_dict
job_name = data_str.split(' ')[0]
job = job_name.replace('job','')
finish_dict[job_name] = 1
JCT[job] = int(time.time() - job_start[job])
if job in list(K80_job.values()):
K80_time[job] += int(time.time() - K80_start_time[job])
elif job in list(V100_job.values()):
V100_time[job] += int(time.time() - V100_start_time[job])
elif 'pid' in data_str:
global pid_dict
job_name = data_str.split(' ')[0]
pid = data_str.split(' ')[2]
pid_dict[job_name] = pid
elif 'checkpoint' in data_str: # can only be received after save signal is sent
global checkpoint_dict
job_name = data_str.split(' ')[0]
job = job_name.replace('job','')
checkpoint_dict[job_name] = 1
ovhd_a[job].append(int(time.time() - ovhd_start[job]))
b_start[job] = time.time()
elif 'waste' in data_str:
global epoch_waste_dict
job_name = data_str.split(' ')[0]
epoch_waste_time = data_str.split(' ')[2]
epoch_waste_dict[job_name] += int(epoch_waste_time)
elif 'b_end' in data_str:
job_name = data_str.split(' ')[0]
job = job_name.replace('job','')
ovhd_b[job].append(int(time.time() - b_start[job]))
c_start[job] = time.time()
elif 'c_end' in data_str:
job_name = data_str.split(' ')[0]
job = job_name.replace('job','')
ovhd_c[job].append(int(time.time() - c_start[job]))
d_start[job] = time.time()
elif 'd_end' in data_str:
job_name = data_str.split(' ')[0]
job = job_name.replace('job','')
ovhd_d[job].append(int(time.time() - d_start[job]))
ovhd_total[job].append(int(time.time() - ovhd_start[job]))
if ovhd_start[job] != 0:
overhead[job] += int(time.time() - ovhd_start[job])
ovhd_start[job] = 0
if job in list(K80_job.values()):
K80_start_time[job] = time.time()
elif job in list(V100_job.values()):
V100_start_time[job] = time.time()
promote_start_time[job] = time.time()
elif '1st_epoch' in data_str: # 'job50 1st_epoch 35'
job_name = data_str.split(' ')[0]
job = job_name.replace('job','')
epoch_time = int(data_str.split(' ')[2])
if job in list(K80_job.values()):
k80_1st[job].append(epoch_time)
elif job in list(V100_job.values()):
v100_1st[job].append(epoch_time)
elif 'completion' in data_str: # 'job50 completion 0.33'
job_name = data_str.split(' ')[0]
job = job_name.replace('job','')
completion_portion = float(data_str.split(' ')[2])
completion[job] = completion_portion
#if 'ckpt_qual' in data_str or 'finish' in data_str or 'checkpoint' in data_str:
# print('received ' + data_str)
connection.sendall(b'success')
#time.sleep(5)
else:
break
finally:
connection.close()
x = threading.Thread(target=thread_function, daemon=True)
x.start()
###############################################################################
######################################################################
while True:
# termination condition:
# all the jobs have finished
################### check for finished jobs on K80 and V100 ##############################
for gpu, job in K80_job.items():
if job != 'idle':
if finish_dict['job'+job] == 1:
K80_used -= 1
K80_job[gpu] = 'idle'
print('K80 finished job: ' + job)
for gpu, job in V100_job.items():
if job != 'idle':
if finish_dict['job'+job] == 1:
V100_used -= 1
V100_job[gpu] = 'idle'
print('V100 finished job: ' + job)
if job in demote_list:
demote_list.remove(job)
################ check step1 finished job of K80 jobs and step 2 of V100 #################
check_step1_complete(list(V100_job.values()))
check_step2_complete(list(K80_job.values()))
for gpu, job in V100_job.items():
if job not in qualified_job and job != 'idle':
if job in step1_job:
real_node, real_gpu = V100_LUT(gpu)
kill_job(real_node, job)
qualified_job.append(job)
print('job' + job + ' has been qualified for demotion')
time.sleep(3) # wait for run.sh to finish
x1, x3 = gpu_pwr.process_csv('job'+job, testcase)
x2 = 3600 / V100_epoch_time[job]
# preprocess the data
x1 = (x1 - min(x1_data)) / (max(x1_data) - min(x1_data))
x2 = (x2 - min(x2_data)) / (max(x2_data) - min(x2_data))
x3 = (x3 - min(x3_data)) / (max(x3_data) - min(x3_data))
speedup_pred = model.predict(np.array([x1, x2, x3]).reshape((1,-1)))[0] / 100
speedup_dict[job] = speedup_pred
predict_dict[job] = speedup_pred
############### record number of newly arrived jobs ################
new_arrival = 0
index_cpy = index
while True:
time_passed = int(time.time() - queue_timer)
if index_cpy >= len(queue):
break
elif time_passed >= queue_dict[queue[index_cpy]]:
new_arrival += 1
index_cpy += 1
elif time_passed < queue_dict[queue[index_cpy]]:
break
################ make promotion decisions ########################
V100_free = V100_cap - V100_used
K80_free = K80_cap - K80_used
if new_arrival == 0:
# this returns available jobs for promotion. Has to be qualified, and currently in K80, but not practically complete
promote_list = list(set(qualified_job).intersection(list(K80_job.values())).difference(pc_job))
else:
promote_list = []
# this returns job forced to be demoted. Currently in V100, and is practically complete
force_demote = list(set(list(V100_job.values())).intersection(pc_job))
# look at demote list
for gpu, job in V100_job.items():
if job != 'idle':
if job not in demote_list and job in step2_job and len(ovhd_total[job]) > 0:
job_speedup = speedup_dict[job] # 0.7
job_ovhd = np.mean(ovhd_total[job]) # 100
k80_1st_ovhd = np.mean(k80_1st[job]) - K80_epoch_time[job]
v100_1st_ovhd = np.mean(v100_1st[job]) - V100_epoch_time[job]
demote_qualify_time = (2 * job_ovhd + k80_1st_ovhd + v100_1st_ovhd) / job_speedup
if int(time.time() - promote_start_time[job]) > max(demote_qualify_time, max(v100_1st[job])):
demote_list.append(job)
print('job' + job + 'qualified for demote for passing demote qualify time ' +
str(int(demote_qualify_time)))
elif job not in demote_list and job not in step2_job and job in qualified_job:
demote_list.append(job)
print('job' + job + 'qualified for demote for profiling')
if len(promote_list) > 0 or len(demote_list) > 0:
if new_arrival == 0:
promoted, demoted = max_speedup_promotion(K80_free, V100_free, V100_job, promote_list, demote_list, force_demote)
else:
promoted, demoted = min_speedup_demotion(K80_job, demote_list)
if len(demoted) - len(promoted) > new_arrival - V100_free:
# demote only # of new arrivals + # of promoted
print('some demoted canceled because more demoted than new arrival + promoted, arrival = ' +
str(new_arrival))
print('original demotion: ' + str(demoted))
demoted_pool = {}
for job in demoted:
if job in speedup_dict:
demoted_pool[job] = speedup_dict[job]
if len(promoted) + new_arrival - V100_free > 0:
demoted = sorted(demoted_pool, key=demoted_pool.get, reverse=False)[:(len(promoted)+new_arrival-V100_free)]
else:
demoted = []
print('new demotion: ' + str(demoted))
if len(promoted) > 0:
if new_arrival == 0:
print('no new job arrivals')
print('promoted jobs: ', promoted)
if len(demoted) > 0:
print('demoted jobs: ', demoted)
# stop all promoted jobs on K80
checkpoint_finish_check = []
for gpu, job in K80_job.items():
if job in promoted:
real_node, real_gpu = K80_LUT(gpu)
save_job(real_node, job)
if finish_dict['job'+job] != 1:
K80_time[job] += int(time.time() - K80_start_time[job])
checkpoint_finish_check.append(job)
K80_job[gpu] = 'idle'
K80_used -= 1
# stop all demoted jobs on V100
for gpu, job in V100_job.items():
if job in demoted:
# make sure demoted step1 job doesn't get promoted back before finishing profiling
if job in step1_job and job not in step2_job:
speedup_dict[job] = 0.01
real_node, real_gpu = V100_LUT(gpu)
save_job(real_node, job)
if finish_dict['job'+job] != 1:
V100_time[job] += int(time.time() - V100_start_time[job])
checkpoint_finish_check.append(job)
V100_job[gpu] = 'idle'
V100_used -= 1
demote_list.remove(job)
# wait for all GPUs to be available
if len(checkpoint_finish_check) > 0:
while True:
time.sleep(5)
for job in checkpoint_finish_check[:]:
if checkpoint_dict['job'+job] == 1: # checkpoint has finished, gpu is free
print(job + ' checkpointed successfully')
checkpoint_dict['job'+job] = 0 # reset it
checkpoint_finish_check.remove(job)
# also check if job already finished before sending checkpoint signal
elif finish_dict['job'+job] == 1:
print(job + ' finished before receiving checkpoint signal')
checkpoint_finish_check.remove(job)
if len(checkpoint_finish_check) == 0:
break
# give it some time to cleanup old checkpointed jobs
time.sleep(3)
# resume promoted jobs on V100, make sure the gpu is idle
for job_new in promoted[:]:
if finish_dict['job'+job_new] != 1:
for gpu, job in V100_job.items():
if job == 'idle': # if gpu idle, schedule new job here
V100_job[gpu] = job_new
real_node, real_gpu = V100_LUT(gpu)
resume_job(real_node, real_gpu, job_new)
num_mig[job_new] += 1
promoted.remove(job_new)
V100_used += 1
break
else: # job has already finished before checkpointing
promoted.remove(job_new)
# resume demoted jobs on K80, make sure the gpu is idle
for job_new in demoted[:]:
if finish_dict['job'+job_new] != 1:
for gpu, job in K80_job.items():
if job == 'idle': # if gpu idle, schedule new job here
real_node, real_gpu = K80_LUT(gpu)
resume_job(real_node, real_gpu, job_new)
num_mig[job_new] += 1
K80_job[gpu] = job_new
demoted.remove(job_new)
K80_used += 1
break
else: # job has already finished before checkpointing
print('job'+job_new+' has finished before checkpointing')
demoted.remove(job_new)
# perform a check, make sure all promoted/demoted jobs are scheduled
if len(promoted) > 0 or len(demoted) > 0:
raise ValueError('Bug with promotion scheme, more jobs than free gpus')
################ submit new jobs to vacant K80 GPUs ############################
# check if there are vacant K80s
## yes: submit jobs from queue
## no: do nothing
if not all_jobs_started:
if V100_used < V100_cap:
V100_free = V100_cap - V100_used
for i in range(V100_free):
time_passed = int(time.time() - queue_timer)
if index < len(queue) and queue_dict[queue[index]] < time_passed: # make sure job has arrived in the queue
job_new = str(queue[index])
for gpu, job in V100_job.items():
if job == 'idle': # schedule new job here if idle
real_node, real_gpu = V100_LUT(gpu)
start_job(real_node, real_gpu, job_new)
measure_job(real_node, real_gpu, job_new)
V100_job[gpu] = job_new
job_start[job_new] = time.time()
queue_delay[job_new] = int(time_passed - queue_dict[queue[index]])
V100_start_time[job_new] = time.time()
index += 1
V100_used += 1
time.sleep(5) # don't communicate too often
break
elif index >= len(queue):
all_jobs_started = True
############## monitor GPU usage ############
usage = K80_used + V100_used
time_stamp = int(time.time() - queue_timer)
gpu_usage_time.append(time_stamp)
gpu_usage.append(usage)
total_completion = np.sum(list(completion.values()))
gpu_usage_completion.append(total_completion)
############### wait for next iteration
time.sleep(INTERVAL)
################ check if termination condition is met ################
K80_idle_num = sum(value == 'idle' for value in K80_job.values())
V100_idle_num = sum(value == 'idle' for value in V100_job.values())
if K80_idle_num == K80_cap and V100_idle_num == V100_cap and index == len(queue):
print('all jobs are finished!')
break
# get average JCT
average_JCT = np.average(list(JCT.values()))
JCT['average'] = average_JCT
average_overhead = np.average(list(overhead.values()))
overhead['average'] = average_overhead
average_queue_delay = np.average(list(queue_delay.values()))
queue_delay['average'] = average_queue_delay
# after everything is finished
print('finished all runs')
JCT_name = testcase + '_JCT.json'
overhead_name = testcase + '_overhead.json'
num_mig_name = testcase + '_num_mig.json'
epoch_waste_name = testcase + '_epoch_waste.json'
ckpt_qual_name = 'ckpt_qual.json'
finish_name = 'finish.json'
K80_time_name = testcase + '_K80_time.json'
V100_time_name = testcase + '_V100_time.json'
gpu_usage_name = testcase + '_gpu_usage.csv'
ovhd_a_name = testcase + '_ovhd_a.json'
ovhd_b_name = testcase + '_ovhd_b.json'
ovhd_c_name = testcase + '_ovhd_c.json'
ovhd_d_name = testcase + '_ovhd_d.json'
ovhd_total_name = testcase + '_ovhd_total.json'
k80_1st_name = testcase + '_k80_1st.json'
v100_1st_name = testcase + '_v100_1st.json'
speedup_name = 'speedup.json'
predict_name = 'predict.json'
demote_list_name = 'demote_list.json'
completion_name = 'completion.json'
queue_delay_name = testcase + '_queue_delay.json'
with open(JCT_name, 'w') as fp1:
json.dump(JCT, fp1, sort_keys=True, indent=4)
with open(overhead_name, 'w') as fp3:
json.dump(overhead, fp3, sort_keys=True, indent=4)
with open(num_mig_name, 'w') as fp3:
json.dump(num_mig, fp3, sort_keys=True, indent=4)
with open(epoch_waste_name, 'w') as fp3:
json.dump(epoch_waste_dict, fp3, sort_keys=True, indent=4)
with open(ckpt_qual_name, 'w') as fp1:
json.dump(ckpt_qual_dict, fp1, sort_keys=True, indent=4)
with open(finish_name, 'w') as fp1:
json.dump(finish_dict, fp1, sort_keys=True, indent=4)
with open(K80_time_name, 'w') as fp3:
json.dump(K80_time, fp3, sort_keys=True, indent=4)
with open(V100_time_name, 'w') as fp3:
json.dump(V100_time, fp3, sort_keys=True, indent=4)
with open(ovhd_a_name, 'w') as fp3:
json.dump(ovhd_a, fp3, sort_keys=True, indent=4)
with open(ovhd_b_name, 'w') as fp3:
json.dump(ovhd_b, fp3, sort_keys=True, indent=4)
with open(ovhd_c_name, 'w') as fp3:
json.dump(ovhd_c, fp3, sort_keys=True, indent=4)
with open(ovhd_d_name, 'w') as fp3:
json.dump(ovhd_d, fp3, sort_keys=True, indent=4)
with open(ovhd_total_name, 'w') as fp3:
json.dump(ovhd_total, fp3, sort_keys=True, indent=4)
with open(k80_1st_name, 'w') as fp3:
json.dump(k80_1st, fp3, sort_keys=True, indent=4)
with open(v100_1st_name, 'w') as fp3:
json.dump(v100_1st, fp3, sort_keys=True, indent=4)
with open(speedup_name, 'w') as fp1:
json.dump(speedup_dict, fp1, sort_keys=True, indent=4)
with open(predict_name, 'w') as fp1:
json.dump(predict_dict, fp1, sort_keys=True, indent=4)
with open(demote_list_name, 'w') as fp1:
json.dump(demote_list, fp1, sort_keys=True, indent=4)
with open(completion_name, 'w') as fp1:
json.dump(completion, fp1, sort_keys=True, indent=4)
with open(queue_delay_name, 'w') as fp1:
json.dump(queue_delay, fp1, sort_keys=True, indent=4)
gpu_usage_time = np.asarray(gpu_usage_time)
gpu_usage = np.asarray(gpu_usage)
gpu_usage_completion = np.asarray(gpu_usage_completion)
rows = zip(gpu_usage_time, gpu_usage, gpu_usage_completion)
with open(gpu_usage_name, 'w') as f:
writer = csv.writer(f)
for row in rows:
writer.writerow(row)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.